diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/test.ts b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/test.ts index 206e29be16e5..8f14f0318456 100644 --- a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/test.ts +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/test.ts @@ -20,11 +20,11 @@ sentryTest('manual Anthropic instrumentation sends gen_ai transactions', async ( const eventData = envelopeRequestParser(req); // Verify it's a gen_ai transaction - expect(eventData.transaction).toBe('messages claude-3-haiku-20240307'); - expect(eventData.contexts?.trace?.op).toBe('gen_ai.messages'); + expect(eventData.transaction).toBe('chat claude-3-haiku-20240307'); + expect(eventData.contexts?.trace?.op).toBe('gen_ai.chat'); expect(eventData.contexts?.trace?.origin).toBe('auto.ai.anthropic'); expect(eventData.contexts?.trace?.data).toMatchObject({ - 'gen_ai.operation.name': 'messages', + 'gen_ai.operation.name': 'chat', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-haiku-20240307', 'gen_ai.request.temperature': 0.7, diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts index c9e112b32241..d6a5424dbf2e 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts @@ -17,19 +17,19 @@ it('traces a basic message creation request', async ({ signal }) => { expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'msg_mock123', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_mock123', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', }), ]), diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts index 3c36e832a17a..b7c58d2f733f 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts @@ -18,14 +18,14 @@ it('traces Google GenAI chat creation and message sending', async () => { // First span - chats.create expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.temperature': 0.8, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 150, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, }), description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -34,14 +34,14 @@ it('traces Google GenAI chat creation and message sending', async () => { // Second span - chat.sendMessage expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), description: 'chat gemini-1.5-pro', op: 'gen_ai.chat', @@ -50,20 +50,20 @@ it('traces Google GenAI chat creation and message sending', async () => { // Third span - models.generateContent expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', - 'gen_ai.request.temperature': 0.7, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', }), ]), diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts index 875b4191b84b..d4abc4ae7220 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts @@ -1,4 +1,16 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { expect, it } from 'vitest'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_TOOL_NAME_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { createRunner } from '../../../runner'; // These tests are not exhaustive because the instrumentation is @@ -18,16 +30,16 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal } // Chat model span expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -36,8 +48,8 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal } // Chain span expect.objectContaining({ data: expect.objectContaining({ - 'sentry.origin': 'auto.ai.langchain', - 'sentry.op': 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', 'langchain.chain.name': 'my_test_chain', }), description: 'chain my_test_chain', @@ -47,9 +59,9 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal } // Tool span expect.objectContaining({ data: expect.objectContaining({ - 'sentry.origin': 'auto.ai.langchain', - 'sentry.op': 'gen_ai.execute_tool', - 'gen_ai.tool.name': 'search_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'search_tool', }), description: 'execute_tool search_tool', op: 'gen_ai.execute_tool', diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts index 33023b30fa55..da9635952632 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts @@ -1,4 +1,16 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { expect, it } from 'vitest'; +import { + GEN_AI_AGENT_NAME_ATTRIBUTE, + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_PIPELINE_NAME_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, + GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { createRunner } from '../../../runner'; // These tests are not exhaustive because the instrumentation is @@ -18,10 +30,10 @@ it('traces langgraph compile and invoke operations', async ({ signal }) => { const createAgentSpan = transactionEvent.spans.find((span: any) => span.op === 'gen_ai.create_agent'); expect(createAgentSpan).toMatchObject({ data: { - 'gen_ai.operation.name': 'create_agent', - 'sentry.op': 'gen_ai.create_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', }, description: 'create_agent weather_assistant', op: 'gen_ai.create_agent', @@ -32,16 +44,16 @@ it('traces langgraph compile and invoke operations', async ({ signal }) => { const invokeAgentSpan = transactionEvent.spans.find((span: any) => span.op === 'gen_ai.invoke_agent'); expect(invokeAgentSpan).toMatchObject({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', - 'gen_ai.pipeline.name': 'weather_assistant', - 'gen_ai.request.messages': '[{"role":"user","content":"What is the weather in SF?"}]', - 'gen_ai.response.model': 'mock-model', - 'gen_ai.usage.input_tokens': 20, - 'gen_ai.usage.output_tokens': 10, - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in SF?"}]', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }), description: 'invoke_agent weather_assistant', op: 'gen_ai.invoke_agent', @@ -49,8 +61,8 @@ it('traces langgraph compile and invoke operations', async ({ signal }) => { }); // Verify tools are captured - if (invokeAgentSpan.data['gen_ai.request.available_tools']) { - expect(invokeAgentSpan.data['gen_ai.request.available_tools']).toMatch(/get_weather/); + if (invokeAgentSpan.data[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]) { + expect(invokeAgentSpan.data[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]).toMatch(/get_weather/); } }) .start(signal); diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/openai/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/openai/test.ts index eb15fd80fc97..1c057e1a986c 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/openai/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/openai/test.ts @@ -1,4 +1,17 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { expect, it } from 'vitest'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { createRunner } from '../../../runner'; // These tests are not exhaustive because the instrumentation is @@ -17,18 +30,18 @@ it('traces a basic chat completion request', async ({ signal }) => { expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'chatcmpl-mock123', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'gen_ai.response.finish_reasons': '["stop"]', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', }), description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', diff --git a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts index a70e51858113..4f8ac1e470e2 100644 --- a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts @@ -12,49 +12,49 @@ describe('Anthropic integration', () => { // First span - basic message completion without PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'msg_mock123', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_mock123', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), // Second span - error handling expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }), - description: 'messages error-model', - op: 'gen_ai.messages', + description: 'chat error-model', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'internal_error', }), // Third span - token counting (no response.text because recordOutputs=false by default) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -62,13 +62,13 @@ describe('Anthropic integration', () => { expect.objectContaining({ data: expect.objectContaining({ 'anthropic.response.timestamp': '2024-05-08T05:20:00.000Z', - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'claude-3-haiku-20240307', - 'gen_ai.response.model': 'claude-3-haiku-20240307', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', }), description: 'models claude-3-haiku-20240307', op: 'gen_ai.models', @@ -84,23 +84,23 @@ describe('Anthropic integration', () => { // First span - basic message completion with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.max_tokens': 100, - 'gen_ai.request.messages': '[{"role":"user","content":"What is the capital of France?"}]', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.id': 'msg_mock123', - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.text': 'Hello from Anthropic mock!', - 'gen_ai.system': 'anthropic', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_mock123', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from Anthropic mock!', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -111,8 +111,8 @@ describe('Anthropic integration', () => { 'http.response.header.content-length': 247, 'http.response.status_code': 200, 'otel.kind': 'CLIENT', - 'sentry.op': 'http.client', - 'sentry.origin': 'auto.http.otel.node_fetch', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch', 'url.path': '/anthropic/v1/messages', 'url.query': '', 'url.scheme': 'http', @@ -125,15 +125,15 @@ describe('Anthropic integration', () => { // Second - error handling with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]', - 'gen_ai.request.model': 'error-model', - 'gen_ai.system': 'anthropic', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', }), - description: 'messages error-model', - op: 'gen_ai.messages', + description: 'chat error-model', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'internal_error', }), @@ -144,8 +144,8 @@ describe('Anthropic integration', () => { 'http.response.header.content-length': 15, 'http.response.status_code': 404, 'otel.kind': 'CLIENT', - 'sentry.op': 'http.client', - 'sentry.origin': 'auto.http.otel.node_fetch', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch', 'url.path': '/anthropic/v1/messages', 'url.query': '', 'url.scheme': 'http', @@ -158,16 +158,16 @@ describe('Anthropic integration', () => { // Third - token counting with PII (response.text is present because sendDefaultPii=true enables recordOutputs) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.messages': '[{"role":"user","content":"What is the capital of France?"}]', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.response.text': '15', - 'gen_ai.system': 'anthropic', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '15', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -178,8 +178,8 @@ describe('Anthropic integration', () => { 'http.response.header.content-length': 19, 'http.response.status_code': 200, 'otel.kind': 'CLIENT', - 'sentry.op': 'http.client', - 'sentry.origin': 'auto.http.otel.node_fetch', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch', 'url.path': '/anthropic/v1/messages/count_tokens', 'url.query': '', 'url.scheme': 'http', @@ -193,13 +193,13 @@ describe('Anthropic integration', () => { expect.objectContaining({ data: expect.objectContaining({ 'anthropic.response.timestamp': '2024-05-08T05:20:00.000Z', - 'gen_ai.operation.name': 'models', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'claude-3-haiku-20240307', - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.system': 'anthropic', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', }), description: 'models claude-3-haiku-20240307', op: 'gen_ai.models', @@ -213,8 +213,8 @@ describe('Anthropic integration', () => { 'http.response.header.content-length': 123, 'http.response.status_code': 200, 'otel.kind': 'CLIENT', - 'sentry.op': 'http.client', - 'sentry.origin': 'auto.http.otel.node_fetch', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch', 'url.path': '/anthropic/v1/models/claude-3-haiku-20240307', 'url.query': '', 'url.scheme': 'http', @@ -228,23 +228,23 @@ describe('Anthropic integration', () => { // Fifth - messages.create with stream: true expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.messages': '[{"role":"user","content":"What is the capital of France?"}]', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.stream': true, - 'gen_ai.response.id': 'msg_stream123', - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.streaming': true, - 'gen_ai.response.text': 'Hello from stream!', - 'gen_ai.system': 'anthropic', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_stream123', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from stream!', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', }), - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -254,8 +254,8 @@ describe('Anthropic integration', () => { 'http.request.method_original': 'POST', 'http.response.status_code': 200, 'otel.kind': 'CLIENT', - 'sentry.op': 'http.client', - 'sentry.origin': 'auto.http.otel.node_fetch', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch', 'url.path': '/anthropic/v1/messages', 'url.query': '', 'url.scheme': 'http', @@ -269,12 +269,12 @@ describe('Anthropic integration', () => { // Sixth - messages.stream expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.stream': true, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, }), - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -287,27 +287,27 @@ describe('Anthropic integration', () => { // Check that custom options are respected expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true }), }), // Check token counting with options expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': '15', // Present because recordOutputs=true is set in options + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '15', // Present because recordOutputs=true is set in options }), - op: 'gen_ai.messages', + op: 'gen_ai.chat', }), // Check models.retrieve with options expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'claude-3-haiku-20240307', - 'gen_ai.response.model': 'claude-3-haiku-20240307', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', }), op: 'gen_ai.models', description: 'models claude-3-haiku-20240307', @@ -378,53 +378,53 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ // messages.create with stream: true expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ - 'gen_ai.system': 'anthropic', - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.stream': true, - 'gen_ai.response.streaming': true, - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'msg_stream_1', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'gen_ai.response.finish_reasons': '["end_turn"]', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_stream_1', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["end_turn"]', }), }), // messages.stream expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ - 'gen_ai.system': 'anthropic', - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.response.streaming': true, - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'msg_stream_1', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_stream_1', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), }), // messages.stream with redundant stream: true param expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ - 'gen_ai.system': 'anthropic', - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.stream': true, - 'gen_ai.response.streaming': true, - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'msg_stream_1', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_stream_1', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), }), ]), @@ -434,28 +434,28 @@ describe('Anthropic integration', () => { transaction: 'main', spans: expect.arrayContaining([ expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ - 'gen_ai.response.streaming': true, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, // streamed text concatenated - 'gen_ai.response.text': 'Hello from stream!', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from stream!', }), }), expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ - 'gen_ai.response.streaming': true, - 'gen_ai.response.text': 'Hello from stream!', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from stream!', }), }), expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ - 'gen_ai.response.streaming': true, - 'gen_ai.response.text': 'Hello from stream!', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from stream!', }), }), ]), @@ -486,10 +486,10 @@ describe('Anthropic integration', () => { transaction: { spans: expect.arrayContaining([ expect.objectContaining({ - op: 'gen_ai.messages', + op: 'gen_ai.chat', data: expect.objectContaining({ 'gen_ai.request.available_tools': EXPECTED_TOOLS_JSON, - 'gen_ai.response.tool_calls': EXPECTED_TOOL_CALLS_JSON, + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: EXPECTED_TOOL_CALLS_JSON, }), }), ]), @@ -514,10 +514,10 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ description: expect.stringContaining('stream-response'), - op: 'gen_ai.messages', + op: 'gen_ai.chat', data: expect.objectContaining({ 'gen_ai.request.available_tools': EXPECTED_TOOLS_JSON, - 'gen_ai.response.tool_calls': EXPECTED_TOOL_CALLS_JSON, + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: EXPECTED_TOOL_CALLS_JSON, }), }), ]), @@ -534,45 +534,45 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ // Error with messages.create on stream initialization expect.objectContaining({ - description: 'messages error-stream-init stream-response', - op: 'gen_ai.messages', + description: 'chat error-stream-init stream-response', + op: 'gen_ai.chat', status: 'internal_error', // Actual status coming from the instrumentation data: expect.objectContaining({ - 'gen_ai.request.model': 'error-stream-init', - 'gen_ai.request.stream': true, + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-init', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, }), }), // Error with messages.stream on stream initialization expect.objectContaining({ - description: 'messages error-stream-init stream-response', - op: 'gen_ai.messages', + description: 'chat error-stream-init stream-response', + op: 'gen_ai.chat', status: 'internal_error', // Actual status coming from the instrumentation data: expect.objectContaining({ - 'gen_ai.request.model': 'error-stream-init', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-init', }), }), // Error midway with messages.create on streaming - note: The stream is started successfully // so we get a successful span with the content that was streamed before the error expect.objectContaining({ - description: 'messages error-stream-midway stream-response', - op: 'gen_ai.messages', + description: 'chat error-stream-midway stream-response', + op: 'gen_ai.chat', status: 'ok', data: expect.objectContaining({ - 'gen_ai.request.model': 'error-stream-midway', - 'gen_ai.request.stream': true, - 'gen_ai.response.streaming': true, - 'gen_ai.response.text': 'This stream will ', // We received some data before error + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-midway', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'This stream will ', // We received some data before error }), }), // Error midway with messages.stream - same behavior, we get a span with the streamed data expect.objectContaining({ - description: 'messages error-stream-midway stream-response', - op: 'gen_ai.messages', + description: 'chat error-stream-midway stream-response', + op: 'gen_ai.chat', status: 'ok', data: expect.objectContaining({ - 'gen_ai.request.model': 'error-stream-midway', - 'gen_ai.response.streaming': true, - 'gen_ai.response.text': 'This stream will ', // We received some data before error + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-midway', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'This stream will ', // We received some data before error }), }), ]), @@ -590,11 +590,11 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ // Invalid tool format error expect.objectContaining({ - description: 'messages invalid-format', - op: 'gen_ai.messages', + description: 'chat invalid-format', + op: 'gen_ai.chat', status: 'internal_error', data: expect.objectContaining({ - 'gen_ai.request.model': 'invalid-format', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'invalid-format', }), }), // Model retrieval error @@ -603,17 +603,17 @@ describe('Anthropic integration', () => { op: 'gen_ai.models', status: 'internal_error', data: expect.objectContaining({ - 'gen_ai.request.model': 'nonexistent-model', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'nonexistent-model', }), }), // Successful tool usage (for comparison) expect.objectContaining({ - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', status: 'ok', data: expect.objectContaining({ - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.response.tool_calls': expect.stringContaining('tool_ok_1'), + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.stringContaining('tool_ok_1'), }), }), ]), @@ -640,34 +640,34 @@ describe('Anthropic integration', () => { // First call: Last message is large and gets truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', // Messages should be present (truncation happened) and should be a JSON array - 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), // Second call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', // Small message should be kept intact - 'gen_ai.request.messages': JSON.stringify([ + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -690,13 +690,13 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', // Only the last message (with filtered media) should be kept - 'gen_ai.request.messages': JSON.stringify([ + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', content: [ @@ -712,8 +712,8 @@ describe('Anthropic integration', () => { }, ]), }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts index d6ff72cde6d8..8960e81695e9 100644 --- a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts @@ -12,14 +12,14 @@ describe('Google GenAI integration', () => { // First span - chats.create expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.temperature': 0.8, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 150, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, }, description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -29,14 +29,14 @@ describe('Google GenAI integration', () => { // Second span - chat.sendMessage (should get model from context) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', // Should get from chat context - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', // Should get from chat context + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }, description: 'chat gemini-1.5-pro', op: 'gen_ai.chat', @@ -46,34 +46,34 @@ describe('Google GenAI integration', () => { // Third span - models.generateContent expect.objectContaining({ data: { - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', - 'gen_ai.request.temperature': 0.7, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }, - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Fourth span - error handling expect.objectContaining({ data: { - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }, - description: 'models error-model', - op: 'gen_ai.models', + description: 'generate_content error-model', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), @@ -86,15 +86,15 @@ describe('Google GenAI integration', () => { // First span - chats.create with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.temperature': 0.8, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 150, - 'gen_ai.request.messages': '[{"role":"user","parts":[{"text":"Hello, how are you?"}]}]', + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","parts":[{"text":"Hello, how are you?"}]}]', }), description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -104,16 +104,16 @@ describe('Google GenAI integration', () => { // Second span - chat.sendMessage with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.messages': expect.any(String), // Should include message when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include message when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), description: 'chat gemini-1.5-pro', op: 'gen_ai.chat', @@ -123,37 +123,37 @@ describe('Google GenAI integration', () => { // Third span - models.generateContent with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', - 'gen_ai.request.temperature': 0.7, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Fourth span - error handling with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'error-model', - 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true }), - description: 'models error-model', - op: 'gen_ai.models', + description: 'generate_content error-model', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), @@ -166,8 +166,8 @@ describe('Google GenAI integration', () => { // Check that custom options are respected expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true }), description: expect.not.stringContaining('stream-response'), // Non-streaming span }), @@ -213,64 +213,64 @@ describe('Google GenAI integration', () => { // Non-streaming with tools expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-2.0-flash-001', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, - 'gen_ai.request.messages': expect.any(String), // Should include contents - 'gen_ai.response.text': expect.any(String), // Should include response text - 'gen_ai.response.tool_calls': expect.any(String), // Should include tool calls - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 8, - 'gen_ai.usage.total_tokens': 23, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), // Should include tool calls + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 23, }), - description: 'models gemini-2.0-flash-001', - op: 'gen_ai.models', + description: 'generate_content gemini-2.0-flash-001', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Streaming with tools expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-2.0-flash-001', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, - 'gen_ai.request.messages': expect.any(String), // Should include contents - 'gen_ai.response.streaming': true, - 'gen_ai.response.text': expect.any(String), // Should include response text - 'gen_ai.response.tool_calls': expect.any(String), // Should include tool calls - 'gen_ai.response.id': 'mock-response-tools-id', - 'gen_ai.response.model': 'gemini-2.0-flash-001', - 'gen_ai.usage.input_tokens': 12, - 'gen_ai.usage.output_tokens': 10, - 'gen_ai.usage.total_tokens': 22, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), // Should include tool calls + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-tools-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22, }), - description: 'models gemini-2.0-flash-001 stream-response', - op: 'gen_ai.models', + description: 'generate_content gemini-2.0-flash-001 stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Without tools for comparison expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-2.0-flash-001', - 'gen_ai.request.messages': expect.any(String), // Should include contents - 'gen_ai.response.text': expect.any(String), // Should include response text - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), - description: 'models gemini-2.0-flash-001', - op: 'gen_ai.models', + description: 'generate_content gemini-2.0-flash-001', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), @@ -289,38 +289,38 @@ describe('Google GenAI integration', () => { // First span - models.generateContentStream (streaming) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', - 'gen_ai.request.temperature': 0.7, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.response.streaming': true, - 'gen_ai.response.id': 'mock-response-streaming-id', - 'gen_ai.response.model': 'gemini-1.5-pro', - 'gen_ai.response.finish_reasons': '["STOP"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 22, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["STOP"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22, }), - description: 'models gemini-1.5-flash stream-response', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Second span - chat.create expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.temperature': 0.8, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 150, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, }), description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -330,14 +330,14 @@ describe('Google GenAI integration', () => { // Third span - chat.sendMessageStream (streaming) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.response.streaming': true, - 'gen_ai.response.id': 'mock-response-streaming-id', - 'gen_ai.response.model': 'gemini-1.5-pro', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', }), description: 'chat gemini-1.5-pro stream-response', op: 'gen_ai.chat', @@ -347,24 +347,24 @@ describe('Google GenAI integration', () => { // Fourth span - blocked content streaming expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', }), - description: 'models blocked-model stream-response', - op: 'gen_ai.models', + description: 'generate_content blocked-model stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), // Fifth span - error handling for streaming expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', }), - description: 'models error-model stream-response', - op: 'gen_ai.models', + description: 'generate_content error-model stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), @@ -377,39 +377,39 @@ describe('Google GenAI integration', () => { // First span - models.generateContentStream (streaming) with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', - 'gen_ai.request.temperature': 0.7, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true - 'gen_ai.response.streaming': true, - 'gen_ai.response.id': 'mock-response-streaming-id', - 'gen_ai.response.model': 'gemini-1.5-pro', - 'gen_ai.response.finish_reasons': '["STOP"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 22, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["STOP"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22, }), - description: 'models gemini-1.5-flash stream-response', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Second span - chat.create expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.temperature': 0.8, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 150, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, }), description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -419,19 +419,19 @@ describe('Google GenAI integration', () => { // Third span - chat.sendMessageStream (streaming) with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.messages': expect.any(String), // Should include message when recordInputs: true - 'gen_ai.response.streaming': true, - 'gen_ai.response.id': 'mock-response-streaming-id', - 'gen_ai.response.model': 'gemini-1.5-pro', - 'gen_ai.response.finish_reasons': '["STOP"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 22, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include message when recordInputs: true + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["STOP"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22, }), description: 'chat gemini-1.5-pro stream-response', op: 'gen_ai.chat', @@ -441,33 +441,33 @@ describe('Google GenAI integration', () => { // Fourth span - blocked content stream with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'blocked-model', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true - 'gen_ai.response.streaming': true, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'blocked-model', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, }), - description: 'models blocked-model stream-response', - op: 'gen_ai.models', + description: 'generate_content blocked-model stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), // Fifth span - error handling for streaming with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'error-model', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true }), - description: 'models error-model stream-response', - op: 'gen_ai.models', + description: 'generate_content error-model stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), @@ -505,39 +505,39 @@ describe('Google GenAI integration', () => { // First call: Last message is large and gets truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', // Messages should be present (truncation happened) and should be a JSON array with parts - 'gen_ai.request.messages': expect.stringMatching( + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching( /^\[\{"role":"user","parts":\[\{"text":"C+"\}\]\}\]$/, ), }), - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Second call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', // Small message should be kept intact - 'gen_ai.request.messages': JSON.stringify([ + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', parts: [{ text: 'This is a small message that fits within the limit' }], }, ]), }), - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts index 8d8f1d542f70..ff8c3353a8f7 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts @@ -12,18 +12,18 @@ describe('LangChain integration', () => { // First span - chat model with claude-3-5-sonnet expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), 'gen_ai.response.stop_reason': expect.any(String), }), description: 'chat claude-3-5-sonnet-20241022', @@ -34,19 +34,19 @@ describe('LangChain integration', () => { // Second span - chat model with claude-3-opus expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-opus-20240229', - 'gen_ai.request.temperature': 0.9, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-opus-20240229', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.9, 'gen_ai.request.top_p': 0.95, - 'gen_ai.request.max_tokens': 200, - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 200, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), 'gen_ai.response.stop_reason': expect.any(String), }), description: 'chat claude-3-opus-20240229', @@ -57,11 +57,11 @@ describe('LangChain integration', () => { // Third span - error handling expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }), description: 'chat error-model', op: 'gen_ai.chat', @@ -77,21 +77,21 @@ describe('LangChain integration', () => { // First span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), 'gen_ai.response.stop_reason': expect.any(String), - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -101,22 +101,22 @@ describe('LangChain integration', () => { // Second span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-opus-20240229', - 'gen_ai.request.temperature': 0.9, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-opus-20240229', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.9, 'gen_ai.request.top_p': 0.95, - 'gen_ai.request.max_tokens': 200, - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 200, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), 'gen_ai.response.stop_reason': expect.any(String), - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), description: 'chat claude-3-opus-20240229', op: 'gen_ai.chat', @@ -126,12 +126,12 @@ describe('LangChain integration', () => { // Third span - error handling with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'error-model', - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true }), description: 'chat error-model', op: 'gen_ai.chat', @@ -166,18 +166,18 @@ describe('LangChain integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.max_tokens': 150, - 'gen_ai.usage.input_tokens': 20, - 'gen_ai.usage.output_tokens': 30, - 'gen_ai.usage.total_tokens': 50, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 50, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), 'gen_ai.response.stop_reason': 'tool_use', 'gen_ai.response.tool_calls': expect.any(String), }), @@ -201,13 +201,13 @@ describe('LangChain integration', () => { // First call: String input truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', // Messages should be present and should include truncated string input (contains only Cs) - 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -217,13 +217,13 @@ describe('LangChain integration', () => { // Second call: Array input, last message truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', // Messages should be present (truncation happened) and should be a JSON array of a single index (contains only Cs) - 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -233,13 +233,13 @@ describe('LangChain integration', () => { // Third call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', // Small message should be kept intact - 'gen_ai.request.messages': JSON.stringify([ + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), @@ -285,8 +285,7 @@ describe('LangChain integration', () => { // First call: Direct Anthropic call made BEFORE LangChain import // This should have Anthropic instrumentation (origin: 'auto.ai.anthropic') const firstAnthropicSpan = spans.find( - span => - span.description === 'messages claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Second call: LangChain call @@ -299,8 +298,7 @@ describe('LangChain integration', () => { // This should NOT have Anthropic instrumentation (skip works correctly) // Count how many Anthropic spans we have - should be exactly 1 const anthropicSpans = spans.filter( - span => - span.description === 'messages claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Verify the edge case limitation: diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts index b05a70acdeb4..95e4529b3fdc 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts @@ -66,7 +66,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // 'gen_ai.system': 'anthropic', // 'gen_ai.request.model': 'error-model', // }), - // description: 'chat error-model', + // description: 'invoke_agent error-model', // op: 'gen_ai.chat', // origin: 'auto.ai.langchain', // status: 'internal_error', @@ -136,7 +136,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // 'gen_ai.request.model': 'error-model', // 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true // }), - // description: 'chat error-model', + // description: 'invoke_agent error-model', // op: 'gen_ai.chat', // origin: 'auto.ai.langchain', // status: 'internal_error', @@ -335,8 +335,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First call: Direct Anthropic call made BEFORE LangChain import // This should have Anthropic instrumentation (origin: 'auto.ai.anthropic') const firstAnthropicSpan = spans.find( - span => - span.description === 'messages claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Second call: LangChain call @@ -349,8 +348,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // This should NOT have Anthropic instrumentation (skip works correctly) // Count how many Anthropic spans we have - should be exactly 1 const anthropicSpans = spans.filter( - span => - span.description === 'messages claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Verify the edge case limitation: @@ -437,7 +435,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // 'gen_ai.system': 'openai', // 'gen_ai.request.model': 'error-model', // }), - // description: 'chat error-model', + // description: 'invoke_agent error-model', // op: 'gen_ai.chat', // origin: 'auto.ai.langchain', // status: 'internal_error', diff --git a/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts b/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts index bafcdf49a32c..539bce1a740e 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts @@ -1,4 +1,20 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { afterAll, describe, expect } from 'vitest'; +import { + GEN_AI_AGENT_NAME_ATTRIBUTE, + GEN_AI_CONVERSATION_ID_ATTRIBUTE, + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_PIPELINE_NAME_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, + GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, + GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner'; describe('LangGraph integration', () => { @@ -12,10 +28,10 @@ describe('LangGraph integration', () => { // create_agent span expect.objectContaining({ data: { - 'gen_ai.operation.name': 'create_agent', - 'sentry.op': 'gen_ai.create_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', }, description: 'create_agent weather_assistant', op: 'gen_ai.create_agent', @@ -25,11 +41,11 @@ describe('LangGraph integration', () => { // First invoke_agent span expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', - 'gen_ai.pipeline.name': 'weather_assistant', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant', }), description: 'invoke_agent weather_assistant', op: 'gen_ai.invoke_agent', @@ -39,11 +55,11 @@ describe('LangGraph integration', () => { // Second invoke_agent span expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', - 'gen_ai.pipeline.name': 'weather_assistant', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant', }), description: 'invoke_agent weather_assistant', op: 'gen_ai.invoke_agent', @@ -59,10 +75,10 @@ describe('LangGraph integration', () => { // create_agent span (PII enabled doesn't affect this span) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'create_agent', - 'sentry.op': 'gen_ai.create_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', }, description: 'create_agent weather_assistant', op: 'gen_ai.create_agent', @@ -72,12 +88,12 @@ describe('LangGraph integration', () => { // First invoke_agent span with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', - 'gen_ai.pipeline.name': 'weather_assistant', - 'gen_ai.request.messages': expect.stringContaining('What is the weather today?'), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringContaining('What is the weather today?'), }), description: 'invoke_agent weather_assistant', op: 'gen_ai.invoke_agent', @@ -87,12 +103,12 @@ describe('LangGraph integration', () => { // Second invoke_agent span with PII and multiple messages expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', - 'gen_ai.pipeline.name': 'weather_assistant', - 'gen_ai.request.messages': expect.stringContaining('Tell me about the weather'), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringContaining('Tell me about the weather'), }), description: 'invoke_agent weather_assistant', op: 'gen_ai.invoke_agent', @@ -108,10 +124,10 @@ describe('LangGraph integration', () => { // create_agent span for first graph (no tool calls) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'create_agent', - 'sentry.op': 'gen_ai.create_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'tool_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'tool_agent', }, description: 'create_agent tool_agent', op: 'gen_ai.create_agent', @@ -121,19 +137,19 @@ describe('LangGraph integration', () => { // invoke_agent span with tools available but not called expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'tool_agent', - 'gen_ai.pipeline.name': 'tool_agent', - 'gen_ai.request.available_tools': expect.stringContaining('get_weather'), - 'gen_ai.request.messages': expect.stringContaining('What is the weather?'), - 'gen_ai.response.model': 'gpt-4-0613', - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.text': expect.stringContaining('Response without calling tools'), - 'gen_ai.usage.input_tokens': 25, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'tool_agent', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'tool_agent', + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: expect.stringContaining('get_weather'), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringContaining('What is the weather?'), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4-0613', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.stringContaining('Response without calling tools'), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, }), description: 'invoke_agent tool_agent', op: 'gen_ai.invoke_agent', @@ -143,10 +159,10 @@ describe('LangGraph integration', () => { // create_agent span for second graph (with tool calls) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'create_agent', - 'sentry.op': 'gen_ai.create_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'tool_calling_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'tool_calling_agent', }, description: 'create_agent tool_calling_agent', op: 'gen_ai.create_agent', @@ -156,21 +172,21 @@ describe('LangGraph integration', () => { // invoke_agent span with tool calls and execution expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'tool_calling_agent', - 'gen_ai.pipeline.name': 'tool_calling_agent', - 'gen_ai.request.available_tools': expect.stringContaining('get_weather'), - 'gen_ai.request.messages': expect.stringContaining('San Francisco'), - 'gen_ai.response.model': 'gpt-4-0613', - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.text': expect.stringMatching(/"role":"tool"/), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'tool_calling_agent', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'tool_calling_agent', + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: expect.stringContaining('get_weather'), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringContaining('San Francisco'), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4-0613', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.stringMatching(/"role":"tool"/), // Verify tool_calls are captured - 'gen_ai.response.tool_calls': expect.stringContaining('get_weather'), - 'gen_ai.usage.input_tokens': 80, - 'gen_ai.usage.output_tokens': 40, - 'gen_ai.usage.total_tokens': 120, + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.stringContaining('get_weather'), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 80, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 120, }), description: 'invoke_agent tool_calling_agent', op: 'gen_ai.invoke_agent', @@ -213,10 +229,10 @@ describe('LangGraph integration', () => { // create_agent span expect.objectContaining({ data: { - 'gen_ai.operation.name': 'create_agent', - 'sentry.op': 'gen_ai.create_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'thread_test_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'thread_test_agent', }, description: 'create_agent thread_test_agent', op: 'gen_ai.create_agent', @@ -226,13 +242,13 @@ describe('LangGraph integration', () => { // First invoke_agent span with thread_id expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'thread_test_agent', - 'gen_ai.pipeline.name': 'thread_test_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'thread_test_agent', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'thread_test_agent', // The thread_id should be captured as conversation.id - 'gen_ai.conversation.id': 'thread_abc123_session_1', + [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: 'thread_abc123_session_1', }), description: 'invoke_agent thread_test_agent', op: 'gen_ai.invoke_agent', @@ -242,13 +258,13 @@ describe('LangGraph integration', () => { // Second invoke_agent span with different thread_id expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'thread_test_agent', - 'gen_ai.pipeline.name': 'thread_test_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'thread_test_agent', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'thread_test_agent', // Different thread_id for different conversation - 'gen_ai.conversation.id': 'thread_xyz789_session_2', + [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: 'thread_xyz789_session_2', }), description: 'invoke_agent thread_test_agent', op: 'gen_ai.invoke_agent', @@ -258,7 +274,7 @@ describe('LangGraph integration', () => { // Third invoke_agent span without thread_id (should NOT have gen_ai.conversation.id) expect.objectContaining({ data: expect.not.objectContaining({ - 'gen_ai.conversation.id': expect.anything(), + [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: expect.anything(), }), description: 'invoke_agent thread_test_agent', op: 'gen_ai.invoke_agent', diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts index ac40fbe94249..9010e203924f 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts @@ -117,8 +117,8 @@ describe('OpenAI Tool Calls integration', () => { // Third span - responses API with tools (non-streaming) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -135,16 +135,16 @@ describe('OpenAI Tool Calls integration', () => { 'openai.usage.completion_tokens': 12, 'openai.usage.prompt_tokens': 8, }, - description: 'responses gpt-4', - op: 'gen_ai.responses', + description: 'chat gpt-4', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Fourth span - responses API with tools and streaming expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -163,8 +163,8 @@ describe('OpenAI Tool Calls integration', () => { 'openai.usage.completion_tokens': 12, 'openai.usage.prompt_tokens': 8, }, - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -238,8 +238,8 @@ describe('OpenAI Tool Calls integration', () => { // Third span - responses API with tools (non-streaming) with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -259,16 +259,16 @@ describe('OpenAI Tool Calls integration', () => { 'openai.usage.completion_tokens': 12, 'openai.usage.prompt_tokens': 8, }, - description: 'responses gpt-4', - op: 'gen_ai.responses', + description: 'chat gpt-4', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Fourth span - responses API with tools and streaming with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -290,8 +290,8 @@ describe('OpenAI Tool Calls integration', () => { 'openai.usage.completion_tokens': 12, 'openai.usage.prompt_tokens': 8, }, - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts index bf64d2b92b72..7a1290aaa4b5 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts @@ -12,18 +12,18 @@ describe('OpenAI integration', () => { // First span - basic chat completion without PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'chatcmpl-mock123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, 'openai.response.id': 'chatcmpl-mock123', 'openai.response.model': 'gpt-3.5-turbo', 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', @@ -38,36 +38,36 @@ describe('OpenAI integration', () => { // Second span - responses API expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'resp_mock456', - 'gen_ai.response.finish_reasons': '["completed"]', - 'gen_ai.usage.input_tokens': 5, - 'gen_ai.usage.output_tokens': 8, - 'gen_ai.usage.total_tokens': 13, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["completed"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 5, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 13, 'openai.response.id': 'resp_mock456', 'openai.response.model': 'gpt-3.5-turbo', 'openai.response.timestamp': '2023-03-01T06:31:30.000Z', 'openai.usage.completion_tokens': 8, 'openai.usage.prompt_tokens': 5, }, - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Third span - error handling expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }, description: 'chat error-model', op: 'gen_ai.chat', @@ -77,19 +77,19 @@ describe('OpenAI integration', () => { // Fourth span - chat completions streaming expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.temperature': 0.8, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, 'gen_ai.request.stream': true, - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.response.id': 'chatcmpl-stream-123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.usage.input_tokens': 12, - 'gen_ai.usage.output_tokens': 18, - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 18, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, 'openai.response.id': 'chatcmpl-stream-123', 'openai.response.model': 'gpt-4', 'gen_ai.response.streaming': true, @@ -105,18 +105,18 @@ describe('OpenAI integration', () => { // Fifth span - responses API streaming expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', 'gen_ai.request.stream': true, - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.response.id': 'resp_stream_456', - 'gen_ai.response.finish_reasons': '["in_progress","completed"]', - 'gen_ai.usage.input_tokens': 6, - 'gen_ai.usage.output_tokens': 10, - 'gen_ai.usage.total_tokens': 16, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["in_progress","completed"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 6, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 16, 'openai.response.id': 'resp_stream_456', 'openai.response.model': 'gpt-4', 'gen_ai.response.streaming': true, @@ -124,20 +124,20 @@ describe('OpenAI integration', () => { 'openai.usage.completion_tokens': 10, 'openai.usage.prompt_tokens': 6, }, - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Sixth span - error handling in streaming context expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', 'gen_ai.request.stream': true, - 'gen_ai.system': 'openai', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', }, description: 'chat error-model stream-response', op: 'gen_ai.chat', @@ -153,21 +153,21 @@ describe('OpenAI integration', () => { // First span - basic chat completion with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.7, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, 'gen_ai.request.messages.original_length': 2, - 'gen_ai.request.messages': '[{"role":"user","content":"What is the capital of France?"}]', - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'chatcmpl-mock123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.response.text': '["Hello from OpenAI mock!"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '["Hello from OpenAI mock!"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, 'openai.response.id': 'chatcmpl-mock123', 'openai.response.model': 'gpt-3.5-turbo', 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', @@ -182,40 +182,40 @@ describe('OpenAI integration', () => { // Second span - responses API with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.messages': 'Translate this to French: Hello', - 'gen_ai.response.text': 'Response to: Translate this to French: Hello', - 'gen_ai.response.finish_reasons': '["completed"]', - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'resp_mock456', - 'gen_ai.usage.input_tokens': 5, - 'gen_ai.usage.output_tokens': 8, - 'gen_ai.usage.total_tokens': 13, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: 'Translate this to French: Hello', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to: Translate this to French: Hello', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["completed"]', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 5, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 13, 'openai.response.id': 'resp_mock456', 'openai.response.model': 'gpt-3.5-turbo', 'openai.response.timestamp': '2023-03-01T06:31:30.000Z', 'openai.usage.completion_tokens': 8, 'openai.usage.prompt_tokens': 5, }, - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Third span - error handling with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]', }, description: 'chat error-model', op: 'gen_ai.chat', @@ -225,22 +225,22 @@ describe('OpenAI integration', () => { // Fourth span - chat completions streaming with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.temperature': 0.8, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, 'gen_ai.request.stream': true, 'gen_ai.request.messages.original_length': 2, - 'gen_ai.request.messages': '[{"role":"user","content":"Tell me about streaming"}]', - 'gen_ai.response.text': 'Hello from OpenAI streaming!', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.response.id': 'chatcmpl-stream-123', - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.usage.input_tokens': 12, - 'gen_ai.usage.output_tokens': 18, - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Tell me about streaming"}]', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from OpenAI streaming!', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 18, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, 'openai.response.id': 'chatcmpl-stream-123', 'openai.response.model': 'gpt-4', 'gen_ai.response.streaming': true, @@ -256,20 +256,20 @@ describe('OpenAI integration', () => { // Fifth span - responses API streaming with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', 'gen_ai.request.stream': true, - 'gen_ai.request.messages': 'Test streaming responses API', - 'gen_ai.response.text': 'Streaming response to: Test streaming responses APITest streaming responses API', - 'gen_ai.response.finish_reasons': '["in_progress","completed"]', - 'gen_ai.response.id': 'resp_stream_456', - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.usage.input_tokens': 6, - 'gen_ai.usage.output_tokens': 10, - 'gen_ai.usage.total_tokens': 16, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: 'Test streaming responses API', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Streaming response to: Test streaming responses APITest streaming responses API', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["in_progress","completed"]', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 6, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 16, 'openai.response.id': 'resp_stream_456', 'openai.response.model': 'gpt-4', 'gen_ai.response.streaming': true, @@ -277,22 +277,22 @@ describe('OpenAI integration', () => { 'openai.usage.completion_tokens': 10, 'openai.usage.prompt_tokens': 6, }), - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Sixth span - error handling in streaming context with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', 'gen_ai.request.stream': true, 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]', - 'gen_ai.system': 'openai', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', }, description: 'chat error-model stream-response', op: 'gen_ai.chat', @@ -308,15 +308,15 @@ describe('OpenAI integration', () => { // Check that custom options are respected expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true }), }), // Check that custom options are respected for streaming expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true 'gen_ai.request.stream': true, // Should be marked as stream }), }), @@ -359,16 +359,16 @@ describe('OpenAI integration', () => { // First span - embeddings API expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'text-embedding-3-small', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small', 'gen_ai.request.encoding_format': 'float', 'gen_ai.request.dimensions': 1536, - 'gen_ai.response.model': 'text-embedding-3-small', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.total_tokens': 10, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, 'openai.response.model': 'text-embedding-3-small', 'openai.usage.prompt_tokens': 10, }, @@ -380,11 +380,11 @@ describe('OpenAI integration', () => { // Second span - embeddings API error model expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }, description: 'embeddings error-model', op: 'gen_ai.embeddings', @@ -400,17 +400,17 @@ describe('OpenAI integration', () => { // First span - embeddings API with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'text-embedding-3-small', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small', 'gen_ai.request.encoding_format': 'float', 'gen_ai.request.dimensions': 1536, 'gen_ai.embeddings.input': 'Embedding test!', - 'gen_ai.response.model': 'text-embedding-3-small', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.total_tokens': 10, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, 'openai.response.model': 'text-embedding-3-small', 'openai.usage.prompt_tokens': 10, }, @@ -422,11 +422,11 @@ describe('OpenAI integration', () => { // Second span - embeddings API error model with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', 'gen_ai.embeddings.input': 'Error embedding test!', }, description: 'embeddings error-model', @@ -437,15 +437,15 @@ describe('OpenAI integration', () => { // Third span - embeddings API with multiple inputs (this does not get truncated) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'text-embedding-3-small', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small', 'gen_ai.embeddings.input': '["First input text","Second input text","Third input text"]', - 'gen_ai.response.model': 'text-embedding-3-small', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.total_tokens': 10, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, 'openai.response.model': 'text-embedding-3-small', 'openai.usage.prompt_tokens': 10, }, @@ -493,18 +493,18 @@ describe('OpenAI integration', () => { span_id: expect.any(String), trace_id: expect.any(String), data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'chatcmpl-mock123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, 'openai.response.id': 'chatcmpl-mock123', 'openai.response.model': 'gpt-3.5-turbo', 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', @@ -540,18 +540,18 @@ describe('OpenAI integration', () => { span_id: expect.any(String), trace_id: expect.any(String), data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'chatcmpl-mock123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, 'openai.response.id': 'chatcmpl-mock123', 'openai.response.model': 'gpt-3.5-turbo', 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', @@ -585,13 +585,13 @@ describe('OpenAI integration', () => { // First call: Last message is large and gets truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', // Messages should be present (truncation happened) and should be a JSON array of a single index - 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', @@ -601,13 +601,13 @@ describe('OpenAI integration', () => { // Second call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', // Small message should be kept intact - 'gen_ai.request.messages': JSON.stringify([ + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), @@ -639,16 +639,16 @@ describe('OpenAI integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', // Messages should be present and should include truncated string input (contains only As) - 'gen_ai.request.messages': expect.stringMatching(/^A+$/), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^A+$/), }), - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -668,30 +668,30 @@ describe('OpenAI integration', () => { // First span - conversations.create returns conversation object with id expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'conversations', - 'sentry.op': 'gen_ai.conversations', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', // The conversation ID should be captured from the response 'gen_ai.conversation.id': 'conv_689667905b048191b4740501625afd940c7533ace33a2dab', }), - description: 'conversations unknown', - op: 'gen_ai.conversations', + description: 'chat unknown', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Second span - responses.create with conversation parameter expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', // The conversation ID should be captured from the request 'gen_ai.conversation.id': 'conv_689667905b048191b4740501625afd940c7533ace33a2dab', }), - op: 'gen_ai.responses', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -700,22 +700,22 @@ describe('OpenAI integration', () => { data: expect.not.objectContaining({ 'gen_ai.conversation.id': expect.anything(), }), - op: 'gen_ai.responses', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Fourth span - responses.create with previous_response_id (chaining) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', // The previous_response_id should be captured as conversation.id 'gen_ai.conversation.id': 'resp_mock_conv_123', }), - op: 'gen_ai.responses', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts index 9b4120b143e4..4115738a19c5 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts @@ -38,8 +38,8 @@ describe('OpenAI integration (V6)', () => { // Second span - responses API expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-3.5-turbo', @@ -55,8 +55,8 @@ describe('OpenAI integration (V6)', () => { 'openai.usage.completion_tokens': 8, 'openai.usage.prompt_tokens': 5, }, - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -105,8 +105,8 @@ describe('OpenAI integration (V6)', () => { // Fifth span - responses API streaming expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -124,8 +124,8 @@ describe('OpenAI integration (V6)', () => { 'openai.usage.completion_tokens': 10, 'openai.usage.prompt_tokens': 6, }, - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -182,8 +182,8 @@ describe('OpenAI integration (V6)', () => { // Second span - responses API with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-3.5-turbo', @@ -201,8 +201,8 @@ describe('OpenAI integration (V6)', () => { 'openai.usage.completion_tokens': 8, 'openai.usage.prompt_tokens': 5, }, - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -256,8 +256,8 @@ describe('OpenAI integration (V6)', () => { // Fifth span - responses API streaming with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -277,8 +277,8 @@ describe('OpenAI integration (V6)', () => { 'openai.usage.completion_tokens': 10, 'openai.usage.prompt_tokens': 6, }), - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts index 2e8e8711e9e9..ac6614af7502 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts @@ -24,7 +24,7 @@ describe('Vercel AI integration - generateObject', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateObject', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -38,7 +38,7 @@ describe('Vercel AI integration - generateObject', () => { data: expect.objectContaining({ 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_object', - 'gen_ai.operation.name': 'ai.generateObject.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateObject.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.model.id': 'mock-model-id', diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index 8112bcadd5f5..1ab0b49032e1 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -13,14 +13,14 @@ describe('Vercel AI integration', () => { // First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -37,17 +37,17 @@ describe('Vercel AI integration', () => { // Second span - explicitly enabled telemetry but recordInputs/recordOutputs not set, should not record when sendDefaultPii: false expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -68,16 +68,16 @@ describe('Vercel AI integration', () => { data: { 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': expect.any(String), - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -95,20 +95,20 @@ describe('Vercel AI integration', () => { // Fourth span - doGenerate for explicit telemetry enabled call expect.objectContaining({ data: { - 'gen_ai.request.messages': expect.any(String), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': expect.any(String), - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -128,14 +128,14 @@ describe('Vercel AI integration', () => { // Fifth span - tool call generateText span expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -152,17 +152,17 @@ describe('Vercel AI integration', () => { // Sixth span - tool call doGenerate span expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -182,11 +182,11 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', @@ -208,16 +208,16 @@ describe('Vercel AI integration', () => { data: { 'gen_ai.prompt': '{"prompt":"Where is the first span?"}', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the first span?"}]', - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'First span here!', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -241,19 +241,19 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'First span here!', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -280,16 +280,16 @@ describe('Vercel AI integration', () => { data: { 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': expect.any(String), - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -312,20 +312,20 @@ describe('Vercel AI integration', () => { // Fourth span - doGenerate for explicitly enabled telemetry call expect.objectContaining({ data: { - 'gen_ai.request.messages': expect.any(String), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': expect.any(String), - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -352,17 +352,17 @@ describe('Vercel AI integration', () => { data: { 'gen_ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"What is the weather in San Francisco?"}]', - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'Tool call completed!', - 'gen_ai.response.tool_calls': expect.any(String), - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Tool call completed!', + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -386,21 +386,21 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, - 'gen_ai.request.messages': expect.any(String), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'Tool call completed!', - 'gen_ai.response.tool_calls': expect.any(String), - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Tool call completed!', + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -428,12 +428,12 @@ describe('Vercel AI integration', () => { data: { 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.input': expect.any(String), - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', @@ -468,14 +468,14 @@ describe('Vercel AI integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -490,17 +490,17 @@ describe('Vercel AI integration', () => { }), expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -519,11 +519,11 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', @@ -588,14 +588,14 @@ describe('Vercel AI integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -610,17 +610,17 @@ describe('Vercel AI integration', () => { }), expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -639,11 +639,11 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', @@ -720,9 +720,9 @@ describe('Vercel AI integration', () => { origin: 'auto.vercelai.otel', status: 'ok', data: expect.objectContaining({ - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', - 'gen_ai.operation.name': 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', }), }), // The doGenerate span - name stays as 'generateText.doGenerate' since model ID is missing @@ -732,9 +732,9 @@ describe('Vercel AI integration', () => { origin: 'auto.vercelai.otel', status: 'ok', data: expect.objectContaining({ - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', }), }), ]), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts index 179644bbcd73..49a1237079d1 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts @@ -13,20 +13,20 @@ describe('Vercel AI integration (V5)', () => { // First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -36,26 +36,26 @@ describe('Vercel AI integration (V5)', () => { // Second span - explicitly enabled telemetry but recordInputs/recordOutputs not set, should not record when sendDefaultPii: false expect.objectContaining({ data: { - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -65,25 +65,25 @@ describe('Vercel AI integration (V5)', () => { // Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -93,29 +93,29 @@ describe('Vercel AI integration (V5)', () => { // Fourth span - doGenerate for explicit telemetry enabled call expect.objectContaining({ data: { - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.messages': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -125,20 +125,20 @@ describe('Vercel AI integration (V5)', () => { // Fifth span - tool call generateText span expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -148,7 +148,7 @@ describe('Vercel AI integration (V5)', () => { // Sixth span - tool call doGenerate span expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -158,16 +158,16 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -179,11 +179,11 @@ describe('Vercel AI integration (V5)', () => { data: { 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', @@ -202,25 +202,25 @@ describe('Vercel AI integration (V5)', () => { // First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the first span?"}', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the first span?"}]', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': 'First span here!', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the first span?"}', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -230,29 +230,29 @@ describe('Vercel AI integration (V5)', () => { // Second span - doGenerate for first call, should also include input/output fields when sendDefaultPii: true expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'First span here!', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -262,25 +262,25 @@ describe('Vercel AI integration (V5)', () => { // Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -290,29 +290,29 @@ describe('Vercel AI integration (V5)', () => { // Fourth span - doGenerate for explicitly enabled telemetry call expect.objectContaining({ data: { - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.messages': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -322,25 +322,25 @@ describe('Vercel AI integration (V5)', () => { // Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"What is the weather in San Francisco?"}]', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', 'vercel.ai.response.finishReason': 'tool-calls', - 'gen_ai.response.tool_calls': expect.any(String), + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -349,34 +349,34 @@ describe('Vercel AI integration (V5)', () => { }), // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true) expect.objectContaining({ - data: { - 'gen_ai.request.model': 'mock-model-id', + data: expect.objectContaining({ + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.messages': expect.any(String), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), 'vercel.ai.prompt.toolChoice': expect.any(String), 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', - // 'gen_ai.response.text': 'Tool call completed!', // TODO: look into why this is not being set + // [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Tool call completed!', // TODO: look into why this is not being set 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.response.tool_calls': expect.any(String), + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', - }, + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', origin: 'auto.vercelai.otel', @@ -384,17 +384,17 @@ describe('Vercel AI integration (V5)', () => { }), // Seventh span - tool call execution span expect.objectContaining({ - data: { + data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.input': expect.any(String), 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', - }, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + }), description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', origin: 'auto.vercelai.otel', @@ -446,19 +446,19 @@ describe('Vercel AI integration (V5)', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.response.finishReason': 'tool-calls', }, description: 'generateText', @@ -467,7 +467,7 @@ describe('Vercel AI integration (V5)', () => { }), expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -477,16 +477,16 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -497,11 +497,11 @@ describe('Vercel AI integration (V5)', () => { data: { 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts index 98a16618d77d..def23e825b62 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts @@ -13,7 +13,7 @@ describe('Vercel AI integration (V6)', () => { // First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -21,13 +21,13 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generateText', op: 'gen_ai.invoke_agent', @@ -37,27 +37,27 @@ describe('Vercel AI integration (V6)', () => { // Second span - explicitly enabled telemetry but recordInputs/recordOutputs not set, should not record when sendDefaultPii: false expect.objectContaining({ data: expect.objectContaining({ - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -67,25 +67,25 @@ describe('Vercel AI integration (V6)', () => { // Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', 'vercel.ai.request.headers.user-agent': expect.any(String), 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generateText', op: 'gen_ai.invoke_agent', @@ -95,29 +95,29 @@ describe('Vercel AI integration (V6)', () => { // Fourth span - doGenerate for explicit telemetry enabled call expect.objectContaining({ data: expect.objectContaining({ - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.request.messages': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -127,7 +127,7 @@ describe('Vercel AI integration (V6)', () => { // Fifth span - tool call generateText span expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -135,13 +135,13 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generateText', op: 'gen_ai.invoke_agent', @@ -151,7 +151,7 @@ describe('Vercel AI integration (V6)', () => { // Sixth span - tool call doGenerate span expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -162,16 +162,16 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -183,11 +183,11 @@ describe('Vercel AI integration (V6)', () => { data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', @@ -206,25 +206,25 @@ describe('Vercel AI integration (V6)', () => { // First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the first span?"}', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the first span?"}]', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': 'First span here!', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the first span?"}', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generateText', op: 'gen_ai.invoke_agent', @@ -234,29 +234,29 @@ describe('Vercel AI integration (V6)', () => { // Second span - doGenerate for first call, should also include input/output fields when sendDefaultPii: true expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'First span here!', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -266,25 +266,25 @@ describe('Vercel AI integration (V6)', () => { // Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generateText', op: 'gen_ai.invoke_agent', @@ -294,29 +294,29 @@ describe('Vercel AI integration (V6)', () => { // Fourth span - doGenerate for explicitly enabled telemetry call expect.objectContaining({ data: expect.objectContaining({ - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.request.messages': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -326,25 +326,25 @@ describe('Vercel AI integration (V6)', () => { // Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.messages': '[{"role":"user","content":"What is the weather in San Francisco?"}]', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', 'vercel.ai.response.finishReason': 'tool-calls', - 'gen_ai.response.tool_calls': expect.any(String), + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generateText', op: 'gen_ai.invoke_agent', @@ -354,32 +354,32 @@ describe('Vercel AI integration (V6)', () => { // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.messages': expect.any(String), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), 'vercel.ai.prompt.toolChoice': expect.any(String), 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', - // 'gen_ai.response.text': 'Tool call completed!', // TODO: look into why this is not being set + // [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Tool call completed!', // TODO: look into why this is not being set 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.response.tool_calls': expect.any(String), + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -391,13 +391,13 @@ describe('Vercel AI integration (V6)', () => { data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.input': expect.any(String), 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', @@ -450,20 +450,20 @@ describe('Vercel AI integration (V6)', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.request.headers.user-agent': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.response.finishReason': 'tool-calls', }), description: 'generateText', @@ -472,7 +472,7 @@ describe('Vercel AI integration (V6)', () => { }), expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -483,16 +483,16 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -503,11 +503,11 @@ describe('Vercel AI integration (V6)', () => { data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', diff --git a/packages/core/src/tracing/ai/gen-ai-attributes.ts b/packages/core/src/tracing/ai/gen-ai-attributes.ts index 4fa7274d7281..8fbc0f30c014 100644 --- a/packages/core/src/tracing/ai/gen-ai-attributes.ts +++ b/packages/core/src/tracing/ai/gen-ai-attributes.ts @@ -232,6 +232,31 @@ export const GEN_AI_EMBED_MANY_DO_EMBED_OPERATION_ATTRIBUTE = 'gen_ai.embed_many */ export const GEN_AI_EXECUTE_TOOL_OPERATION_ATTRIBUTE = 'gen_ai.execute_tool'; +/** + * The tool name for tool call spans + */ +export const GEN_AI_TOOL_NAME_ATTRIBUTE = 'gen_ai.tool.name'; + +/** + * The tool call ID + */ +export const GEN_AI_TOOL_CALL_ID_ATTRIBUTE = 'gen_ai.tool.call.id'; + +/** + * The tool type (e.g., 'function') + */ +export const GEN_AI_TOOL_TYPE_ATTRIBUTE = 'gen_ai.tool.type'; + +/** + * The tool input/arguments + */ +export const GEN_AI_TOOL_INPUT_ATTRIBUTE = 'gen_ai.tool.input'; + +/** + * The tool output/result + */ +export const GEN_AI_TOOL_OUTPUT_ATTRIBUTE = 'gen_ai.tool.output'; + // ============================================================================= // OPENAI-SPECIFIC ATTRIBUTES // ============================================================================= @@ -266,13 +291,12 @@ export const OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE = 'openai.usage.prompt_tokens' // ============================================================================= /** - * OpenAI API operations + * OpenAI API operations following OpenTelemetry semantic conventions + * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#llm-request-spans */ export const OPENAI_OPERATIONS = { CHAT: 'chat', - RESPONSES: 'responses', EMBEDDINGS: 'embeddings', - CONVERSATIONS: 'conversations', } as const; // ============================================================================= diff --git a/packages/core/src/tracing/ai/utils.ts b/packages/core/src/tracing/ai/utils.ts index 4a7a14eea554..53cd696ba909 100644 --- a/packages/core/src/tracing/ai/utils.ts +++ b/packages/core/src/tracing/ai/utils.ts @@ -9,15 +9,21 @@ import { } from './gen-ai-attributes'; import { truncateGenAiMessages, truncateGenAiStringInput } from './messageTruncation'; /** - * Maps AI method paths to Sentry operation name + * Maps AI method paths to OpenTelemetry semantic convention operation names + * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#llm-request-spans */ export function getFinalOperationName(methodPath: string): string { if (methodPath.includes('messages')) { - return 'messages'; + return 'chat'; } if (methodPath.includes('completions')) { - return 'completions'; + return 'text_completion'; + } + // Google GenAI: models.generateContent* -> generate_content (actually generates AI responses) + if (methodPath.includes('generateContent')) { + return 'generate_content'; } + // Anthropic: models.get/retrieve -> models (metadata retrieval only) if (methodPath.includes('models')) { return 'models'; } diff --git a/packages/core/src/tracing/langchain/index.ts b/packages/core/src/tracing/langchain/index.ts index 1930be794be5..8cf12dfcb861 100644 --- a/packages/core/src/tracing/langchain/index.ts +++ b/packages/core/src/tracing/langchain/index.ts @@ -3,7 +3,13 @@ import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from ' import { SPAN_STATUS_ERROR } from '../../tracing'; import { startSpanManual } from '../../tracing/trace'; import type { Span, SpanAttributeValue } from '../../types-hoist/span'; -import { GEN_AI_OPERATION_NAME_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE } from '../ai/gen-ai-attributes'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_TOOL_INPUT_ATTRIBUTE, + GEN_AI_TOOL_NAME_ATTRIBUTE, + GEN_AI_TOOL_OUTPUT_ATTRIBUTE, +} from '../ai/gen-ai-attributes'; import { LANGCHAIN_ORIGIN } from './constants'; import type { LangChainCallbackHandler, @@ -92,10 +98,10 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): startSpanManual( { name: `${operationName} ${modelName}`, - op: 'gen_ai.pipeline', + op: 'gen_ai.chat', attributes: { ...attributes, - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.pipeline', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', }, }, span => { @@ -241,12 +247,12 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): const toolName = tool.name || 'unknown_tool'; const attributes: Record = { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: LANGCHAIN_ORIGIN, - 'gen_ai.tool.name': toolName, + [GEN_AI_TOOL_NAME_ATTRIBUTE]: toolName, }; // Add input if recordInputs is enabled if (recordInputs) { - attributes['gen_ai.tool.input'] = input; + attributes[GEN_AI_TOOL_INPUT_ATTRIBUTE] = input; } startSpanManual( @@ -272,7 +278,7 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): // Add output if recordOutputs is enabled if (recordOutputs) { span.setAttributes({ - 'gen_ai.tool.output': JSON.stringify(output), + [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: JSON.stringify(output), }); } exitSpan(runId); diff --git a/packages/core/src/tracing/langchain/utils.ts b/packages/core/src/tracing/langchain/utils.ts index 0a07ae8df370..d119de798950 100644 --- a/packages/core/src/tracing/langchain/utils.ts +++ b/packages/core/src/tracing/langchain/utils.ts @@ -216,18 +216,18 @@ function extractCommonRequestAttributes( /** * Small helper to assemble boilerplate attributes shared by both request extractors. + * Always uses 'chat' as the operation type for all LLM and chat model operations. */ function baseRequestAttributes( system: unknown, modelName: unknown, - operation: 'pipeline' | 'chat', serialized: LangChainSerialized, invocationParams?: Record, langSmithMetadata?: Record, ): Record { return { [GEN_AI_SYSTEM_ATTRIBUTE]: asString(system ?? 'langchain'), - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: operation, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: asString(modelName), [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: LANGCHAIN_ORIGIN, ...extractCommonRequestAttributes(serialized, invocationParams, langSmithMetadata), @@ -237,7 +237,8 @@ function baseRequestAttributes( /** * Extracts attributes for plain LLM invocations (string prompts). * - * - Operation is tagged as `pipeline` to distinguish from chat-style invocations. + * - Operation is tagged as `chat` following OpenTelemetry semantic conventions. + * LangChain LLM operations are treated as chat operations. * - When `recordInputs` is true, string prompts are wrapped into `{role:"user"}` * messages to align with the chat schema used elsewhere. */ @@ -251,7 +252,7 @@ export function extractLLMRequestAttributes( const system = langSmithMetadata?.ls_provider; const modelName = invocationParams?.model ?? langSmithMetadata?.ls_model_name ?? 'unknown'; - const attrs = baseRequestAttributes(system, modelName, 'pipeline', llm, invocationParams, langSmithMetadata); + const attrs = baseRequestAttributes(system, modelName, llm, invocationParams, langSmithMetadata); if (recordInputs && Array.isArray(prompts) && prompts.length > 0) { setIfDefined(attrs, GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, prompts.length); @@ -265,7 +266,8 @@ export function extractLLMRequestAttributes( /** * Extracts attributes for ChatModel invocations (array-of-arrays of messages). * - * - Operation is tagged as `chat`. + * - Operation is tagged as `chat` following OpenTelemetry semantic conventions. + * LangChain chat model operations are chat operations. * - We flatten LangChain's `LangChainMessage[][]` and normalize shapes into a * consistent `{ role, content }` array when `recordInputs` is true. * - Provider system value falls back to `serialized.id?.[2]`. @@ -280,7 +282,7 @@ export function extractChatModelRequestAttributes( const system = langSmithMetadata?.ls_provider ?? llm.id?.[2]; const modelName = invocationParams?.model ?? langSmithMetadata?.ls_model_name ?? 'unknown'; - const attrs = baseRequestAttributes(system, modelName, 'chat', llm, invocationParams, langSmithMetadata); + const attrs = baseRequestAttributes(system, modelName, llm, invocationParams, langSmithMetadata); if (recordInputs && Array.isArray(langChainMessages) && langChainMessages.length > 0) { const normalized = normalizeLangChainMessages(langChainMessages.flat()); diff --git a/packages/core/src/tracing/openai/utils.ts b/packages/core/src/tracing/openai/utils.ts index 007dd93a91b1..82494f7ae018 100644 --- a/packages/core/src/tracing/openai/utils.ts +++ b/packages/core/src/tracing/openai/utils.ts @@ -35,20 +35,21 @@ import type { } from './types'; /** - * Maps OpenAI method paths to Sentry operation names + * Maps OpenAI method paths to OpenTelemetry semantic convention operation names + * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#llm-request-spans */ export function getOperationName(methodPath: string): string { if (methodPath.includes('chat.completions')) { return OPENAI_OPERATIONS.CHAT; } if (methodPath.includes('responses')) { - return OPENAI_OPERATIONS.RESPONSES; + return OPENAI_OPERATIONS.CHAT; } if (methodPath.includes('embeddings')) { return OPENAI_OPERATIONS.EMBEDDINGS; } if (methodPath.includes('conversations')) { - return OPENAI_OPERATIONS.CONVERSATIONS; + return OPENAI_OPERATIONS.CHAT; } return methodPath.split('.').pop() || 'unknown'; } diff --git a/packages/core/src/tracing/vercel-ai/constants.ts b/packages/core/src/tracing/vercel-ai/constants.ts index fe307b03e7fb..57e8bf2a57c8 100644 --- a/packages/core/src/tracing/vercel-ai/constants.ts +++ b/packages/core/src/tracing/vercel-ai/constants.ts @@ -3,3 +3,22 @@ import type { Span } from '../../types-hoist/span'; // Global Map to track tool call IDs to their corresponding spans // This allows us to capture tool errors and link them to the correct span export const toolCallSpanMap = new Map(); + +// Operation sets for efficient mapping to OpenTelemetry semantic convention values +export const INVOKE_AGENT_OPS = new Set([ + 'ai.generateText', + 'ai.streamText', + 'ai.generateObject', + 'ai.streamObject', + 'ai.embed', + 'ai.embedMany', +]); + +export const GENERATE_CONTENT_OPS = new Set([ + 'ai.generateText.doGenerate', + 'ai.streamText.doStream', + 'ai.generateObject.doGenerate', + 'ai.streamObject.doStream', +]); + +export const EMBEDDINGS_OPS = new Set(['ai.embed.doEmbed', 'ai.embedMany.doEmbed']); diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index 9b95e8aa91ad..1a06937b8aae 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -8,12 +8,18 @@ import { GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_TOOL_CALL_ID_ATTRIBUTE, + GEN_AI_TOOL_INPUT_ATTRIBUTE, + GEN_AI_TOOL_NAME_ATTRIBUTE, + GEN_AI_TOOL_OUTPUT_ATTRIBUTE, + GEN_AI_TOOL_TYPE_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_CACHED_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, } from '../ai/gen-ai-attributes'; -import { toolCallSpanMap } from './constants'; +import { EMBEDDINGS_OPS, GENERATE_CONTENT_OPS, INVOKE_AGENT_OPS, toolCallSpanMap } from './constants'; import type { TokenSummary } from './types'; import { accumulateTokensForParent, @@ -48,6 +54,29 @@ function addOriginToSpan(span: Span, origin: SpanOrigin): void { span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN, origin); } +/** + * Maps Vercel AI SDK operation names to OpenTelemetry semantic convention values + * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#llm-request-spans + */ +function mapVercelAiOperationName(operationName: string): string { + // Top-level pipeline operations map to invoke_agent + if (INVOKE_AGENT_OPS.has(operationName)) { + return 'invoke_agent'; + } + // .do* operations are the actual LLM calls + if (GENERATE_CONTENT_OPS.has(operationName)) { + return 'generate_content'; + } + if (EMBEDDINGS_OPS.has(operationName)) { + return 'embeddings'; + } + if (operationName === 'ai.toolCall') { + return 'execute_tool'; + } + // Return the original value for unknown operations + return operationName; +} + /** * Post-process spans emitted by the Vercel AI SDK. * This is supposed to be used in `client.on('spanStart', ...) @@ -133,7 +162,7 @@ function processEndedVercelAiSpan(span: SpanJSON): void { typeof attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] === 'number' && typeof attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] === 'number' ) { - attributes['gen_ai.usage.total_tokens'] = + attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE] = attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] + attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]; } @@ -145,15 +174,21 @@ function processEndedVercelAiSpan(span: SpanJSON): void { } // Rename AI SDK attributes to standardized gen_ai attributes - renameAttributeKey(attributes, OPERATION_NAME_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE); + // Map operation.name to OpenTelemetry semantic convention values + if (attributes[OPERATION_NAME_ATTRIBUTE]) { + const operationName = mapVercelAiOperationName(attributes[OPERATION_NAME_ATTRIBUTE] as string); + attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE] = operationName; + // eslint-disable-next-line @typescript-eslint/no-dynamic-delete + delete attributes[OPERATION_NAME_ATTRIBUTE]; + } renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_MESSAGES_ATTRIBUTE); renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text'); renameAttributeKey(attributes, AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, 'gen_ai.response.tool_calls'); renameAttributeKey(attributes, AI_RESPONSE_OBJECT_ATTRIBUTE, 'gen_ai.response.object'); renameAttributeKey(attributes, AI_PROMPT_TOOLS_ATTRIBUTE, 'gen_ai.request.available_tools'); - renameAttributeKey(attributes, AI_TOOL_CALL_ARGS_ATTRIBUTE, 'gen_ai.tool.input'); - renameAttributeKey(attributes, AI_TOOL_CALL_RESULT_ATTRIBUTE, 'gen_ai.tool.output'); + renameAttributeKey(attributes, AI_TOOL_CALL_ARGS_ATTRIBUTE, GEN_AI_TOOL_INPUT_ATTRIBUTE); + renameAttributeKey(attributes, AI_TOOL_CALL_RESULT_ATTRIBUTE, GEN_AI_TOOL_OUTPUT_ATTRIBUTE); renameAttributeKey(attributes, AI_SCHEMA_ATTRIBUTE, 'gen_ai.request.schema'); renameAttributeKey(attributes, AI_MODEL_ID_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE); @@ -183,22 +218,23 @@ function renameAttributeKey(attributes: Record, oldKey: string, function processToolCallSpan(span: Span, attributes: SpanAttributes): void { addOriginToSpan(span, 'auto.vercelai.otel'); span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.execute_tool'); - renameAttributeKey(attributes, AI_TOOL_CALL_NAME_ATTRIBUTE, 'gen_ai.tool.name'); - renameAttributeKey(attributes, AI_TOOL_CALL_ID_ATTRIBUTE, 'gen_ai.tool.call.id'); + span.setAttribute(GEN_AI_OPERATION_NAME_ATTRIBUTE, 'execute_tool'); + renameAttributeKey(attributes, AI_TOOL_CALL_NAME_ATTRIBUTE, GEN_AI_TOOL_NAME_ATTRIBUTE); + renameAttributeKey(attributes, AI_TOOL_CALL_ID_ATTRIBUTE, GEN_AI_TOOL_CALL_ID_ATTRIBUTE); // Store the span in our global map using the tool call ID // This allows us to capture tool errors and link them to the correct span - const toolCallId = attributes['gen_ai.tool.call.id']; + const toolCallId = attributes[GEN_AI_TOOL_CALL_ID_ATTRIBUTE]; if (typeof toolCallId === 'string') { toolCallSpanMap.set(toolCallId, span); } // https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/#gen-ai-tool-type - if (!attributes['gen_ai.tool.type']) { - span.setAttribute('gen_ai.tool.type', 'function'); + if (!attributes[GEN_AI_TOOL_TYPE_ATTRIBUTE]) { + span.setAttribute(GEN_AI_TOOL_TYPE_ATTRIBUTE, 'function'); } - const toolName = attributes['gen_ai.tool.name']; + const toolName = attributes[GEN_AI_TOOL_NAME_ATTRIBUTE]; if (toolName) { span.updateName(`execute_tool ${toolName}`); } diff --git a/packages/core/test/lib/utils/openai-utils.test.ts b/packages/core/test/lib/utils/openai-utils.test.ts index ff951e8be40b..25cd873ace08 100644 --- a/packages/core/test/lib/utils/openai-utils.test.ts +++ b/packages/core/test/lib/utils/openai-utils.test.ts @@ -18,14 +18,14 @@ describe('openai-utils', () => { expect(getOperationName('some.path.chat.completions.method')).toBe('chat'); }); - it('should return responses for responses methods', () => { - expect(getOperationName('responses.create')).toBe('responses'); - expect(getOperationName('some.path.responses.method')).toBe('responses'); + it('should return chat for responses methods', () => { + expect(getOperationName('responses.create')).toBe('chat'); + expect(getOperationName('some.path.responses.method')).toBe('chat'); }); - it('should return conversations for conversations methods', () => { - expect(getOperationName('conversations.create')).toBe('conversations'); - expect(getOperationName('some.path.conversations.method')).toBe('conversations'); + it('should return chat for conversations methods', () => { + expect(getOperationName('conversations.create')).toBe('chat'); + expect(getOperationName('some.path.conversations.method')).toBe('chat'); }); it('should return the last part of path for unknown methods', () => { @@ -41,7 +41,7 @@ describe('openai-utils', () => { describe('getSpanOperation', () => { it('should prefix operation with gen_ai', () => { expect(getSpanOperation('chat.completions.create')).toBe('gen_ai.chat'); - expect(getSpanOperation('responses.create')).toBe('gen_ai.responses'); + expect(getSpanOperation('responses.create')).toBe('gen_ai.chat'); expect(getSpanOperation('some.custom.operation')).toBe('gen_ai.operation'); }); });