From 8821f063e034783dc72f18f2d68b156789561ff8 Mon Sep 17 00:00:00 2001 From: Nicolas Hrubec Date: Fri, 23 Jan 2026 14:31:23 +0100 Subject: [PATCH 1/8] ref(tests): use constants in ai integration tests and add missing ones (#18945) While working on the `gen_ai.request.messages` rename I noticed that we hardcode all the attributes in our ai integration tests (both node and cloudflare), meaning that we currently need to change each individual test just to rename one attribute name. This PR refactors this to use the attributes from our `gen-ai-attributes.ts`. I also added some missing entries for attributes that we currently hardcode in the integrations. Closes #18951 (added automatically) --- .../suites/tracing/anthropic-ai/test.ts | 31 +- .../suites/tracing/google-genai/test.ts | 66 ++- .../suites/tracing/langchain/test.ts | 42 +- .../suites/tracing/langgraph/test.ts | 44 +- .../suites/tracing/openai/test.ts | 37 +- .../suites/tracing/anthropic/test.ts | 428 ++++++++-------- .../suites/tracing/google-genai/test.ts | 440 +++++++++-------- .../suites/tracing/langchain/test.ts | 224 +++++---- .../suites/tracing/langchain/v1/test.ts | 4 +- .../suites/tracing/langgraph/test.ts | 180 +++---- .../tracing/openai/openai-tool-calls/test.ts | 32 +- .../suites/tracing/openai/test.ts | 462 +++++++++--------- .../suites/tracing/openai/v6/test.ts | 32 +- .../tracing/vercelai/test-generate-object.ts | 4 +- .../suites/tracing/vercelai/test.ts | 376 +++++++------- .../suites/tracing/vercelai/v5/test.ts | 318 ++++++------ .../suites/tracing/vercelai/v6/test.ts | 318 ++++++------ .../core/src/tracing/ai/gen-ai-attributes.ts | 30 +- packages/core/src/tracing/ai/utils.ts | 7 +- packages/core/src/tracing/langchain/index.ts | 14 +- packages/core/src/tracing/langchain/utils.ts | 12 +- packages/core/src/tracing/openai/utils.ts | 7 +- packages/core/src/tracing/vercel-ai/index.ts | 64 ++- 23 files changed, 1682 insertions(+), 1490 deletions(-) diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts index c9e112b32241..d8087ebf79fe 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts @@ -1,4 +1,15 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { expect, it } from 'vitest'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { createRunner } from '../../../runner'; // These tests are not exhaustive because the instrumentation is @@ -17,16 +28,16 @@ it('traces a basic message creation request', async ({ signal }) => { expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'msg_mock123', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_mock123', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, }), description: 'messages claude-3-haiku-20240307', op: 'gen_ai.messages', diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts index 3c36e832a17a..4579baf7d5de 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts @@ -1,4 +1,16 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { expect, it } from 'vitest'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_REQUEST_TOP_P_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { createRunner } from '../../../runner'; // These tests are not exhaustive because the instrumentation is @@ -18,14 +30,14 @@ it('traces Google GenAI chat creation and message sending', async () => { // First span - chats.create expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.temperature': 0.8, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 150, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, }), description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -34,14 +46,14 @@ it('traces Google GenAI chat creation and message sending', async () => { // Second span - chat.sendMessage expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), description: 'chat gemini-1.5-pro', op: 'gen_ai.chat', @@ -50,17 +62,17 @@ it('traces Google GenAI chat creation and message sending', async () => { // Third span - models.generateContent expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), description: 'models gemini-1.5-flash', op: 'gen_ai.models', diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts index 875b4191b84b..d4abc4ae7220 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts @@ -1,4 +1,16 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { expect, it } from 'vitest'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_TOOL_NAME_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { createRunner } from '../../../runner'; // These tests are not exhaustive because the instrumentation is @@ -18,16 +30,16 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal } // Chat model span expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -36,8 +48,8 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal } // Chain span expect.objectContaining({ data: expect.objectContaining({ - 'sentry.origin': 'auto.ai.langchain', - 'sentry.op': 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', 'langchain.chain.name': 'my_test_chain', }), description: 'chain my_test_chain', @@ -47,9 +59,9 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal } // Tool span expect.objectContaining({ data: expect.objectContaining({ - 'sentry.origin': 'auto.ai.langchain', - 'sentry.op': 'gen_ai.execute_tool', - 'gen_ai.tool.name': 'search_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'search_tool', }), description: 'execute_tool search_tool', op: 'gen_ai.execute_tool', diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts index 33023b30fa55..da9635952632 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts @@ -1,4 +1,16 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { expect, it } from 'vitest'; +import { + GEN_AI_AGENT_NAME_ATTRIBUTE, + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_PIPELINE_NAME_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, + GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { createRunner } from '../../../runner'; // These tests are not exhaustive because the instrumentation is @@ -18,10 +30,10 @@ it('traces langgraph compile and invoke operations', async ({ signal }) => { const createAgentSpan = transactionEvent.spans.find((span: any) => span.op === 'gen_ai.create_agent'); expect(createAgentSpan).toMatchObject({ data: { - 'gen_ai.operation.name': 'create_agent', - 'sentry.op': 'gen_ai.create_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', }, description: 'create_agent weather_assistant', op: 'gen_ai.create_agent', @@ -32,16 +44,16 @@ it('traces langgraph compile and invoke operations', async ({ signal }) => { const invokeAgentSpan = transactionEvent.spans.find((span: any) => span.op === 'gen_ai.invoke_agent'); expect(invokeAgentSpan).toMatchObject({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', - 'gen_ai.pipeline.name': 'weather_assistant', - 'gen_ai.request.messages': '[{"role":"user","content":"What is the weather in SF?"}]', - 'gen_ai.response.model': 'mock-model', - 'gen_ai.usage.input_tokens': 20, - 'gen_ai.usage.output_tokens': 10, - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in SF?"}]', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }), description: 'invoke_agent weather_assistant', op: 'gen_ai.invoke_agent', @@ -49,8 +61,8 @@ it('traces langgraph compile and invoke operations', async ({ signal }) => { }); // Verify tools are captured - if (invokeAgentSpan.data['gen_ai.request.available_tools']) { - expect(invokeAgentSpan.data['gen_ai.request.available_tools']).toMatch(/get_weather/); + if (invokeAgentSpan.data[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]) { + expect(invokeAgentSpan.data[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]).toMatch(/get_weather/); } }) .start(signal); diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/openai/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/openai/test.ts index eb15fd80fc97..1c057e1a986c 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/openai/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/openai/test.ts @@ -1,4 +1,17 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { expect, it } from 'vitest'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { createRunner } from '../../../runner'; // These tests are not exhaustive because the instrumentation is @@ -17,18 +30,18 @@ it('traces a basic chat completion request', async ({ signal }) => { expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'chatcmpl-mock123', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'gen_ai.response.finish_reasons': '["stop"]', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', }), description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', diff --git a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts index a70e51858113..dac3af573b0a 100644 --- a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts @@ -12,49 +12,49 @@ describe('Anthropic integration', () => { // First span - basic message completion without PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'msg_mock123', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_mock123', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), // Second span - error handling expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }), - description: 'messages error-model', - op: 'gen_ai.messages', + description: 'chat error-model', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'internal_error', }), // Third span - token counting (no response.text because recordOutputs=false by default) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -62,13 +62,13 @@ describe('Anthropic integration', () => { expect.objectContaining({ data: expect.objectContaining({ 'anthropic.response.timestamp': '2024-05-08T05:20:00.000Z', - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'claude-3-haiku-20240307', - 'gen_ai.response.model': 'claude-3-haiku-20240307', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', }), description: 'models claude-3-haiku-20240307', op: 'gen_ai.models', @@ -84,23 +84,23 @@ describe('Anthropic integration', () => { // First span - basic message completion with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.max_tokens': 100, - 'gen_ai.request.messages': '[{"role":"user","content":"What is the capital of France?"}]', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.id': 'msg_mock123', - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.text': 'Hello from Anthropic mock!', - 'gen_ai.system': 'anthropic', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_mock123', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from Anthropic mock!', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -111,8 +111,8 @@ describe('Anthropic integration', () => { 'http.response.header.content-length': 247, 'http.response.status_code': 200, 'otel.kind': 'CLIENT', - 'sentry.op': 'http.client', - 'sentry.origin': 'auto.http.otel.node_fetch', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch', 'url.path': '/anthropic/v1/messages', 'url.query': '', 'url.scheme': 'http', @@ -125,15 +125,15 @@ describe('Anthropic integration', () => { // Second - error handling with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]', - 'gen_ai.request.model': 'error-model', - 'gen_ai.system': 'anthropic', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', }), - description: 'messages error-model', - op: 'gen_ai.messages', + description: 'chat error-model', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'internal_error', }), @@ -144,8 +144,8 @@ describe('Anthropic integration', () => { 'http.response.header.content-length': 15, 'http.response.status_code': 404, 'otel.kind': 'CLIENT', - 'sentry.op': 'http.client', - 'sentry.origin': 'auto.http.otel.node_fetch', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch', 'url.path': '/anthropic/v1/messages', 'url.query': '', 'url.scheme': 'http', @@ -158,16 +158,16 @@ describe('Anthropic integration', () => { // Third - token counting with PII (response.text is present because sendDefaultPii=true enables recordOutputs) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.messages': '[{"role":"user","content":"What is the capital of France?"}]', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.response.text': '15', - 'gen_ai.system': 'anthropic', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '15', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -178,8 +178,8 @@ describe('Anthropic integration', () => { 'http.response.header.content-length': 19, 'http.response.status_code': 200, 'otel.kind': 'CLIENT', - 'sentry.op': 'http.client', - 'sentry.origin': 'auto.http.otel.node_fetch', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch', 'url.path': '/anthropic/v1/messages/count_tokens', 'url.query': '', 'url.scheme': 'http', @@ -193,13 +193,13 @@ describe('Anthropic integration', () => { expect.objectContaining({ data: expect.objectContaining({ 'anthropic.response.timestamp': '2024-05-08T05:20:00.000Z', - 'gen_ai.operation.name': 'models', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'claude-3-haiku-20240307', - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.system': 'anthropic', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', }), description: 'models claude-3-haiku-20240307', op: 'gen_ai.models', @@ -213,8 +213,8 @@ describe('Anthropic integration', () => { 'http.response.header.content-length': 123, 'http.response.status_code': 200, 'otel.kind': 'CLIENT', - 'sentry.op': 'http.client', - 'sentry.origin': 'auto.http.otel.node_fetch', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch', 'url.path': '/anthropic/v1/models/claude-3-haiku-20240307', 'url.query': '', 'url.scheme': 'http', @@ -228,23 +228,23 @@ describe('Anthropic integration', () => { // Fifth - messages.create with stream: true expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.messages': '[{"role":"user","content":"What is the capital of France?"}]', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.stream': true, - 'gen_ai.response.id': 'msg_stream123', - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.streaming': true, - 'gen_ai.response.text': 'Hello from stream!', - 'gen_ai.system': 'anthropic', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_stream123', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from stream!', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', }), - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -254,8 +254,8 @@ describe('Anthropic integration', () => { 'http.request.method_original': 'POST', 'http.response.status_code': 200, 'otel.kind': 'CLIENT', - 'sentry.op': 'http.client', - 'sentry.origin': 'auto.http.otel.node_fetch', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch', 'url.path': '/anthropic/v1/messages', 'url.query': '', 'url.scheme': 'http', @@ -269,12 +269,12 @@ describe('Anthropic integration', () => { // Sixth - messages.stream expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.stream': true, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, }), - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -287,27 +287,27 @@ describe('Anthropic integration', () => { // Check that custom options are respected expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true }), }), // Check token counting with options expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': '15', // Present because recordOutputs=true is set in options + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '15', // Present because recordOutputs=true is set in options }), - op: 'gen_ai.messages', + op: 'gen_ai.chat', }), // Check models.retrieve with options expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'claude-3-haiku-20240307', - 'gen_ai.response.model': 'claude-3-haiku-20240307', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', }), op: 'gen_ai.models', description: 'models claude-3-haiku-20240307', @@ -378,53 +378,53 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ // messages.create with stream: true expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ - 'gen_ai.system': 'anthropic', - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.stream': true, - 'gen_ai.response.streaming': true, - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'msg_stream_1', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'gen_ai.response.finish_reasons': '["end_turn"]', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_stream_1', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["end_turn"]', }), }), // messages.stream expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ - 'gen_ai.system': 'anthropic', - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.response.streaming': true, - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'msg_stream_1', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_stream_1', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), }), // messages.stream with redundant stream: true param expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ - 'gen_ai.system': 'anthropic', - 'gen_ai.operation.name': 'messages', - 'gen_ai.request.model': 'claude-3-haiku-20240307', - 'gen_ai.request.stream': true, - 'gen_ai.response.streaming': true, - 'gen_ai.response.model': 'claude-3-haiku-20240307', - 'gen_ai.response.id': 'msg_stream_1', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_stream_1', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), }), ]), @@ -434,28 +434,28 @@ describe('Anthropic integration', () => { transaction: 'main', spans: expect.arrayContaining([ expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ - 'gen_ai.response.streaming': true, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, // streamed text concatenated - 'gen_ai.response.text': 'Hello from stream!', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from stream!', }), }), expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ - 'gen_ai.response.streaming': true, - 'gen_ai.response.text': 'Hello from stream!', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from stream!', }), }), expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ - 'gen_ai.response.streaming': true, - 'gen_ai.response.text': 'Hello from stream!', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from stream!', }), }), ]), @@ -486,7 +486,7 @@ describe('Anthropic integration', () => { transaction: { spans: expect.arrayContaining([ expect.objectContaining({ - op: 'gen_ai.messages', + op: 'gen_ai.chat', data: expect.objectContaining({ 'gen_ai.request.available_tools': EXPECTED_TOOLS_JSON, 'gen_ai.response.tool_calls': EXPECTED_TOOL_CALLS_JSON, @@ -514,7 +514,7 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ description: expect.stringContaining('stream-response'), - op: 'gen_ai.messages', + op: 'gen_ai.chat', data: expect.objectContaining({ 'gen_ai.request.available_tools': EXPECTED_TOOLS_JSON, 'gen_ai.response.tool_calls': EXPECTED_TOOL_CALLS_JSON, @@ -534,45 +534,45 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ // Error with messages.create on stream initialization expect.objectContaining({ - description: 'messages error-stream-init stream-response', - op: 'gen_ai.messages', + description: 'chat error-stream-init stream-response', + op: 'gen_ai.chat', status: 'internal_error', // Actual status coming from the instrumentation data: expect.objectContaining({ - 'gen_ai.request.model': 'error-stream-init', - 'gen_ai.request.stream': true, + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-init', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, }), }), // Error with messages.stream on stream initialization expect.objectContaining({ - description: 'messages error-stream-init stream-response', - op: 'gen_ai.messages', + description: 'chat error-stream-init stream-response', + op: 'gen_ai.chat', status: 'internal_error', // Actual status coming from the instrumentation data: expect.objectContaining({ - 'gen_ai.request.model': 'error-stream-init', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-init', }), }), // Error midway with messages.create on streaming - note: The stream is started successfully // so we get a successful span with the content that was streamed before the error expect.objectContaining({ - description: 'messages error-stream-midway stream-response', - op: 'gen_ai.messages', + description: 'chat error-stream-midway stream-response', + op: 'gen_ai.chat', status: 'ok', data: expect.objectContaining({ - 'gen_ai.request.model': 'error-stream-midway', - 'gen_ai.request.stream': true, - 'gen_ai.response.streaming': true, - 'gen_ai.response.text': 'This stream will ', // We received some data before error + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-midway', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'This stream will ', // We received some data before error }), }), // Error midway with messages.stream - same behavior, we get a span with the streamed data expect.objectContaining({ - description: 'messages error-stream-midway stream-response', - op: 'gen_ai.messages', + description: 'chat error-stream-midway stream-response', + op: 'gen_ai.chat', status: 'ok', data: expect.objectContaining({ - 'gen_ai.request.model': 'error-stream-midway', - 'gen_ai.response.streaming': true, - 'gen_ai.response.text': 'This stream will ', // We received some data before error + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-midway', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'This stream will ', // We received some data before error }), }), ]), @@ -590,11 +590,11 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ // Invalid tool format error expect.objectContaining({ - description: 'messages invalid-format', - op: 'gen_ai.messages', + description: 'chat invalid-format', + op: 'gen_ai.chat', status: 'internal_error', data: expect.objectContaining({ - 'gen_ai.request.model': 'invalid-format', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'invalid-format', }), }), // Model retrieval error @@ -603,16 +603,16 @@ describe('Anthropic integration', () => { op: 'gen_ai.models', status: 'internal_error', data: expect.objectContaining({ - 'gen_ai.request.model': 'nonexistent-model', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'nonexistent-model', }), }), // Successful tool usage (for comparison) expect.objectContaining({ - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', status: 'ok', data: expect.objectContaining({ - 'gen_ai.request.model': 'claude-3-haiku-20240307', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', 'gen_ai.response.tool_calls': expect.stringContaining('tool_ok_1'), }), }), @@ -640,29 +640,29 @@ describe('Anthropic integration', () => { // First call: Last message is large and gets truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', // Messages should be present (truncation happened) and should be a JSON array - 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), // Second call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', // Small message should be kept intact - 'gen_ai.request.messages': JSON.stringify([ + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), @@ -690,13 +690,13 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', - 'sentry.origin': 'auto.ai.anthropic', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-haiku-20240307', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', // Only the last message (with filtered media) should be kept - 'gen_ai.request.messages': JSON.stringify([ + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', content: [ @@ -712,8 +712,8 @@ describe('Anthropic integration', () => { }, ]), }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts index d6ff72cde6d8..b61aea2ac7b8 100644 --- a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts @@ -1,4 +1,24 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { afterAll, describe, expect } from 'vitest'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, + GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, + GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_REQUEST_TOP_P_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_STREAMING_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, + GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner'; describe('Google GenAI integration', () => { @@ -12,14 +32,14 @@ describe('Google GenAI integration', () => { // First span - chats.create expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.temperature': 0.8, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 150, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, }, description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -29,14 +49,14 @@ describe('Google GenAI integration', () => { // Second span - chat.sendMessage (should get model from context) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', // Should get from chat context - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', // Should get from chat context + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }, description: 'chat gemini-1.5-pro', op: 'gen_ai.chat', @@ -46,17 +66,17 @@ describe('Google GenAI integration', () => { // Third span - models.generateContent expect.objectContaining({ data: { - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }, description: 'models gemini-1.5-flash', op: 'gen_ai.models', @@ -66,11 +86,11 @@ describe('Google GenAI integration', () => { // Fourth span - error handling expect.objectContaining({ data: { - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }, description: 'models error-model', op: 'gen_ai.models', @@ -86,15 +106,15 @@ describe('Google GenAI integration', () => { // First span - chats.create with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.temperature': 0.8, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 150, - 'gen_ai.request.messages': '[{"role":"user","parts":[{"text":"Hello, how are you?"}]}]', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","parts":[{"text":"Hello, how are you?"}]}]', }), description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -104,16 +124,16 @@ describe('Google GenAI integration', () => { // Second span - chat.sendMessage with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.messages': expect.any(String), // Should include message when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include message when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), description: 'chat gemini-1.5-pro', op: 'gen_ai.chat', @@ -123,19 +143,19 @@ describe('Google GenAI integration', () => { // Third span - models.generateContent with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), description: 'models gemini-1.5-flash', op: 'gen_ai.models', @@ -145,12 +165,12 @@ describe('Google GenAI integration', () => { // Fourth span - error handling with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'error-model', - 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true }), description: 'models error-model', op: 'gen_ai.models', @@ -166,8 +186,8 @@ describe('Google GenAI integration', () => { // Check that custom options are respected expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true }), description: expect.not.stringContaining('stream-response'), // Non-streaming span }), @@ -213,18 +233,18 @@ describe('Google GenAI integration', () => { // Non-streaming with tools expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-2.0-flash-001', - 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, - 'gen_ai.request.messages': expect.any(String), // Should include contents - 'gen_ai.response.text': expect.any(String), // Should include response text - 'gen_ai.response.tool_calls': expect.any(String), // Should include tool calls - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 8, - 'gen_ai.usage.total_tokens': 23, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), // Should include tool calls + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 23, }), description: 'models gemini-2.0-flash-001', op: 'gen_ai.models', @@ -234,21 +254,21 @@ describe('Google GenAI integration', () => { // Streaming with tools expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-2.0-flash-001', - 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, - 'gen_ai.request.messages': expect.any(String), // Should include contents - 'gen_ai.response.streaming': true, - 'gen_ai.response.text': expect.any(String), // Should include response text - 'gen_ai.response.tool_calls': expect.any(String), // Should include tool calls - 'gen_ai.response.id': 'mock-response-tools-id', - 'gen_ai.response.model': 'gemini-2.0-flash-001', - 'gen_ai.usage.input_tokens': 12, - 'gen_ai.usage.output_tokens': 10, - 'gen_ai.usage.total_tokens': 22, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), // Should include tool calls + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-tools-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22, }), description: 'models gemini-2.0-flash-001 stream-response', op: 'gen_ai.models', @@ -258,16 +278,16 @@ describe('Google GenAI integration', () => { // Without tools for comparison expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-2.0-flash-001', - 'gen_ai.request.messages': expect.any(String), // Should include contents - 'gen_ai.response.text': expect.any(String), // Should include response text - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), description: 'models gemini-2.0-flash-001', op: 'gen_ai.models', @@ -289,21 +309,21 @@ describe('Google GenAI integration', () => { // First span - models.generateContentStream (streaming) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.response.streaming': true, - 'gen_ai.response.id': 'mock-response-streaming-id', - 'gen_ai.response.model': 'gemini-1.5-pro', - 'gen_ai.response.finish_reasons': '["STOP"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 22, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["STOP"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22, }), description: 'models gemini-1.5-flash stream-response', op: 'gen_ai.models', @@ -313,14 +333,14 @@ describe('Google GenAI integration', () => { // Second span - chat.create expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.temperature': 0.8, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 150, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, }), description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -330,14 +350,14 @@ describe('Google GenAI integration', () => { // Third span - chat.sendMessageStream (streaming) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.response.streaming': true, - 'gen_ai.response.id': 'mock-response-streaming-id', - 'gen_ai.response.model': 'gemini-1.5-pro', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', }), description: 'chat gemini-1.5-pro stream-response', op: 'gen_ai.chat', @@ -347,9 +367,9 @@ describe('Google GenAI integration', () => { // Fourth span - blocked content streaming expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', }), description: 'models blocked-model stream-response', op: 'gen_ai.models', @@ -359,9 +379,9 @@ describe('Google GenAI integration', () => { // Fifth span - error handling for streaming expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', }), description: 'models error-model stream-response', op: 'gen_ai.models', @@ -377,22 +397,22 @@ describe('Google GenAI integration', () => { // First span - models.generateContentStream (streaming) with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true - 'gen_ai.response.streaming': true, - 'gen_ai.response.id': 'mock-response-streaming-id', - 'gen_ai.response.model': 'gemini-1.5-pro', - 'gen_ai.response.finish_reasons': '["STOP"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 22, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["STOP"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22, }), description: 'models gemini-1.5-flash stream-response', op: 'gen_ai.models', @@ -402,14 +422,14 @@ describe('Google GenAI integration', () => { // Second span - chat.create expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.temperature': 0.8, - 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 150, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, }), description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -419,19 +439,19 @@ describe('Google GenAI integration', () => { // Third span - chat.sendMessageStream (streaming) with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-pro', - 'gen_ai.request.messages': expect.any(String), // Should include message when recordInputs: true - 'gen_ai.response.streaming': true, - 'gen_ai.response.id': 'mock-response-streaming-id', - 'gen_ai.response.model': 'gemini-1.5-pro', - 'gen_ai.response.finish_reasons': '["STOP"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 22, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include message when recordInputs: true + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["STOP"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22, }), description: 'chat gemini-1.5-pro stream-response', op: 'gen_ai.chat', @@ -441,14 +461,14 @@ describe('Google GenAI integration', () => { // Fourth span - blocked content stream with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'blocked-model', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true - 'gen_ai.response.streaming': true, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'blocked-model', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, }), description: 'models blocked-model stream-response', op: 'gen_ai.models', @@ -458,13 +478,13 @@ describe('Google GenAI integration', () => { // Fifth span - error handling for streaming with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'error-model', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true }), description: 'models error-model stream-response', op: 'gen_ai.models', @@ -505,13 +525,13 @@ describe('Google GenAI integration', () => { // First call: Last message is large and gets truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', // Messages should be present (truncation happened) and should be a JSON array with parts - 'gen_ai.request.messages': expect.stringMatching( + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching( /^\[\{"role":"user","parts":\[\{"text":"C+"\}\]\}\]$/, ), }), @@ -523,13 +543,13 @@ describe('Google GenAI integration', () => { // Second call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', - 'sentry.origin': 'auto.ai.google_genai', - 'gen_ai.system': 'google_genai', - 'gen_ai.request.model': 'gemini-1.5-flash', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', // Small message should be kept intact - 'gen_ai.request.messages': JSON.stringify([ + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', parts: [{ text: 'This is a small message that fits within the limit' }], diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts index 8d8f1d542f70..ce024288a3a0 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts @@ -1,4 +1,22 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { afterAll, describe, expect } from 'vitest'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, + GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_REQUEST_TOP_P_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, + GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner'; describe('LangChain integration', () => { @@ -12,19 +30,19 @@ describe('LangChain integration', () => { // First span - chat model with claude-3-5-sonnet expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), - 'gen_ai.response.stop_reason': expect.any(String), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String), }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -34,20 +52,20 @@ describe('LangChain integration', () => { // Second span - chat model with claude-3-opus expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-opus-20240229', - 'gen_ai.request.temperature': 0.9, - 'gen_ai.request.top_p': 0.95, - 'gen_ai.request.max_tokens': 200, - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), - 'gen_ai.response.stop_reason': expect.any(String), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-opus-20240229', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.95, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 200, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String), }), description: 'chat claude-3-opus-20240229', op: 'gen_ai.chat', @@ -57,11 +75,11 @@ describe('LangChain integration', () => { // Third span - error handling expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }), description: 'chat error-model', op: 'gen_ai.chat', @@ -77,21 +95,21 @@ describe('LangChain integration', () => { // First span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), - 'gen_ai.response.stop_reason': expect.any(String), - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -101,22 +119,22 @@ describe('LangChain integration', () => { // Second span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-opus-20240229', - 'gen_ai.request.temperature': 0.9, - 'gen_ai.request.top_p': 0.95, - 'gen_ai.request.max_tokens': 200, - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), - 'gen_ai.response.stop_reason': expect.any(String), - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-opus-20240229', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.95, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 200, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), description: 'chat claude-3-opus-20240229', op: 'gen_ai.chat', @@ -126,12 +144,12 @@ describe('LangChain integration', () => { // Third span - error handling with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'error-model', - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true }), description: 'chat error-model', op: 'gen_ai.chat', @@ -166,20 +184,20 @@ describe('LangChain integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.max_tokens': 150, - 'gen_ai.usage.input_tokens': 20, - 'gen_ai.usage.output_tokens': 30, - 'gen_ai.usage.total_tokens': 50, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), - 'gen_ai.response.stop_reason': 'tool_use', - 'gen_ai.response.tool_calls': expect.any(String), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 50, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: 'tool_use', + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -201,13 +219,13 @@ describe('LangChain integration', () => { // First call: String input truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', // Messages should be present and should include truncated string input (contains only Cs) - 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -217,13 +235,13 @@ describe('LangChain integration', () => { // Second call: Array input, last message truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', // Messages should be present (truncation happened) and should be a JSON array of a single index (contains only Cs) - 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -233,13 +251,13 @@ describe('LangChain integration', () => { // Third call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', // Small message should be kept intact - 'gen_ai.request.messages': JSON.stringify([ + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), @@ -286,7 +304,7 @@ describe('LangChain integration', () => { // This should have Anthropic instrumentation (origin: 'auto.ai.anthropic') const firstAnthropicSpan = spans.find( span => - span.description === 'messages claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Second call: LangChain call @@ -300,7 +318,7 @@ describe('LangChain integration', () => { // Count how many Anthropic spans we have - should be exactly 1 const anthropicSpans = spans.filter( span => - span.description === 'messages claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Verify the edge case limitation: diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts index b05a70acdeb4..2389715d9307 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts @@ -336,7 +336,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // This should have Anthropic instrumentation (origin: 'auto.ai.anthropic') const firstAnthropicSpan = spans.find( span => - span.description === 'messages claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Second call: LangChain call @@ -350,7 +350,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // Count how many Anthropic spans we have - should be exactly 1 const anthropicSpans = spans.filter( span => - span.description === 'messages claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Verify the edge case limitation: diff --git a/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts b/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts index bafcdf49a32c..539bce1a740e 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts @@ -1,4 +1,20 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { afterAll, describe, expect } from 'vitest'; +import { + GEN_AI_AGENT_NAME_ATTRIBUTE, + GEN_AI_CONVERSATION_ID_ATTRIBUTE, + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_PIPELINE_NAME_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, + GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, + GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner'; describe('LangGraph integration', () => { @@ -12,10 +28,10 @@ describe('LangGraph integration', () => { // create_agent span expect.objectContaining({ data: { - 'gen_ai.operation.name': 'create_agent', - 'sentry.op': 'gen_ai.create_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', }, description: 'create_agent weather_assistant', op: 'gen_ai.create_agent', @@ -25,11 +41,11 @@ describe('LangGraph integration', () => { // First invoke_agent span expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', - 'gen_ai.pipeline.name': 'weather_assistant', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant', }), description: 'invoke_agent weather_assistant', op: 'gen_ai.invoke_agent', @@ -39,11 +55,11 @@ describe('LangGraph integration', () => { // Second invoke_agent span expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', - 'gen_ai.pipeline.name': 'weather_assistant', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant', }), description: 'invoke_agent weather_assistant', op: 'gen_ai.invoke_agent', @@ -59,10 +75,10 @@ describe('LangGraph integration', () => { // create_agent span (PII enabled doesn't affect this span) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'create_agent', - 'sentry.op': 'gen_ai.create_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', }, description: 'create_agent weather_assistant', op: 'gen_ai.create_agent', @@ -72,12 +88,12 @@ describe('LangGraph integration', () => { // First invoke_agent span with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', - 'gen_ai.pipeline.name': 'weather_assistant', - 'gen_ai.request.messages': expect.stringContaining('What is the weather today?'), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringContaining('What is the weather today?'), }), description: 'invoke_agent weather_assistant', op: 'gen_ai.invoke_agent', @@ -87,12 +103,12 @@ describe('LangGraph integration', () => { // Second invoke_agent span with PII and multiple messages expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'weather_assistant', - 'gen_ai.pipeline.name': 'weather_assistant', - 'gen_ai.request.messages': expect.stringContaining('Tell me about the weather'), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringContaining('Tell me about the weather'), }), description: 'invoke_agent weather_assistant', op: 'gen_ai.invoke_agent', @@ -108,10 +124,10 @@ describe('LangGraph integration', () => { // create_agent span for first graph (no tool calls) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'create_agent', - 'sentry.op': 'gen_ai.create_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'tool_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'tool_agent', }, description: 'create_agent tool_agent', op: 'gen_ai.create_agent', @@ -121,19 +137,19 @@ describe('LangGraph integration', () => { // invoke_agent span with tools available but not called expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'tool_agent', - 'gen_ai.pipeline.name': 'tool_agent', - 'gen_ai.request.available_tools': expect.stringContaining('get_weather'), - 'gen_ai.request.messages': expect.stringContaining('What is the weather?'), - 'gen_ai.response.model': 'gpt-4-0613', - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.text': expect.stringContaining('Response without calling tools'), - 'gen_ai.usage.input_tokens': 25, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'tool_agent', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'tool_agent', + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: expect.stringContaining('get_weather'), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringContaining('What is the weather?'), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4-0613', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.stringContaining('Response without calling tools'), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, }), description: 'invoke_agent tool_agent', op: 'gen_ai.invoke_agent', @@ -143,10 +159,10 @@ describe('LangGraph integration', () => { // create_agent span for second graph (with tool calls) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'create_agent', - 'sentry.op': 'gen_ai.create_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'tool_calling_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'tool_calling_agent', }, description: 'create_agent tool_calling_agent', op: 'gen_ai.create_agent', @@ -156,21 +172,21 @@ describe('LangGraph integration', () => { // invoke_agent span with tool calls and execution expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'tool_calling_agent', - 'gen_ai.pipeline.name': 'tool_calling_agent', - 'gen_ai.request.available_tools': expect.stringContaining('get_weather'), - 'gen_ai.request.messages': expect.stringContaining('San Francisco'), - 'gen_ai.response.model': 'gpt-4-0613', - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.text': expect.stringMatching(/"role":"tool"/), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'tool_calling_agent', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'tool_calling_agent', + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: expect.stringContaining('get_weather'), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringContaining('San Francisco'), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4-0613', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.stringMatching(/"role":"tool"/), // Verify tool_calls are captured - 'gen_ai.response.tool_calls': expect.stringContaining('get_weather'), - 'gen_ai.usage.input_tokens': 80, - 'gen_ai.usage.output_tokens': 40, - 'gen_ai.usage.total_tokens': 120, + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.stringContaining('get_weather'), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 80, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 120, }), description: 'invoke_agent tool_calling_agent', op: 'gen_ai.invoke_agent', @@ -213,10 +229,10 @@ describe('LangGraph integration', () => { // create_agent span expect.objectContaining({ data: { - 'gen_ai.operation.name': 'create_agent', - 'sentry.op': 'gen_ai.create_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'thread_test_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'thread_test_agent', }, description: 'create_agent thread_test_agent', op: 'gen_ai.create_agent', @@ -226,13 +242,13 @@ describe('LangGraph integration', () => { // First invoke_agent span with thread_id expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'thread_test_agent', - 'gen_ai.pipeline.name': 'thread_test_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'thread_test_agent', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'thread_test_agent', // The thread_id should be captured as conversation.id - 'gen_ai.conversation.id': 'thread_abc123_session_1', + [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: 'thread_abc123_session_1', }), description: 'invoke_agent thread_test_agent', op: 'gen_ai.invoke_agent', @@ -242,13 +258,13 @@ describe('LangGraph integration', () => { // Second invoke_agent span with different thread_id expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.ai.langgraph', - 'gen_ai.agent.name': 'thread_test_agent', - 'gen_ai.pipeline.name': 'thread_test_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', + [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'thread_test_agent', + [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'thread_test_agent', // Different thread_id for different conversation - 'gen_ai.conversation.id': 'thread_xyz789_session_2', + [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: 'thread_xyz789_session_2', }), description: 'invoke_agent thread_test_agent', op: 'gen_ai.invoke_agent', @@ -258,7 +274,7 @@ describe('LangGraph integration', () => { // Third invoke_agent span without thread_id (should NOT have gen_ai.conversation.id) expect.objectContaining({ data: expect.not.objectContaining({ - 'gen_ai.conversation.id': expect.anything(), + [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: expect.anything(), }), description: 'invoke_agent thread_test_agent', op: 'gen_ai.invoke_agent', diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts index ac40fbe94249..9010e203924f 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts @@ -117,8 +117,8 @@ describe('OpenAI Tool Calls integration', () => { // Third span - responses API with tools (non-streaming) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -135,16 +135,16 @@ describe('OpenAI Tool Calls integration', () => { 'openai.usage.completion_tokens': 12, 'openai.usage.prompt_tokens': 8, }, - description: 'responses gpt-4', - op: 'gen_ai.responses', + description: 'chat gpt-4', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Fourth span - responses API with tools and streaming expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -163,8 +163,8 @@ describe('OpenAI Tool Calls integration', () => { 'openai.usage.completion_tokens': 12, 'openai.usage.prompt_tokens': 8, }, - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -238,8 +238,8 @@ describe('OpenAI Tool Calls integration', () => { // Third span - responses API with tools (non-streaming) with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -259,16 +259,16 @@ describe('OpenAI Tool Calls integration', () => { 'openai.usage.completion_tokens': 12, 'openai.usage.prompt_tokens': 8, }, - description: 'responses gpt-4', - op: 'gen_ai.responses', + description: 'chat gpt-4', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Fourth span - responses API with tools and streaming with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -290,8 +290,8 @@ describe('OpenAI Tool Calls integration', () => { 'openai.usage.completion_tokens': 12, 'openai.usage.prompt_tokens': 8, }, - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts index bf64d2b92b72..7a1290aaa4b5 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts @@ -12,18 +12,18 @@ describe('OpenAI integration', () => { // First span - basic chat completion without PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'chatcmpl-mock123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, 'openai.response.id': 'chatcmpl-mock123', 'openai.response.model': 'gpt-3.5-turbo', 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', @@ -38,36 +38,36 @@ describe('OpenAI integration', () => { // Second span - responses API expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'resp_mock456', - 'gen_ai.response.finish_reasons': '["completed"]', - 'gen_ai.usage.input_tokens': 5, - 'gen_ai.usage.output_tokens': 8, - 'gen_ai.usage.total_tokens': 13, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["completed"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 5, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 13, 'openai.response.id': 'resp_mock456', 'openai.response.model': 'gpt-3.5-turbo', 'openai.response.timestamp': '2023-03-01T06:31:30.000Z', 'openai.usage.completion_tokens': 8, 'openai.usage.prompt_tokens': 5, }, - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Third span - error handling expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }, description: 'chat error-model', op: 'gen_ai.chat', @@ -77,19 +77,19 @@ describe('OpenAI integration', () => { // Fourth span - chat completions streaming expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.temperature': 0.8, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, 'gen_ai.request.stream': true, - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.response.id': 'chatcmpl-stream-123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.usage.input_tokens': 12, - 'gen_ai.usage.output_tokens': 18, - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 18, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, 'openai.response.id': 'chatcmpl-stream-123', 'openai.response.model': 'gpt-4', 'gen_ai.response.streaming': true, @@ -105,18 +105,18 @@ describe('OpenAI integration', () => { // Fifth span - responses API streaming expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', 'gen_ai.request.stream': true, - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.response.id': 'resp_stream_456', - 'gen_ai.response.finish_reasons': '["in_progress","completed"]', - 'gen_ai.usage.input_tokens': 6, - 'gen_ai.usage.output_tokens': 10, - 'gen_ai.usage.total_tokens': 16, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["in_progress","completed"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 6, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 16, 'openai.response.id': 'resp_stream_456', 'openai.response.model': 'gpt-4', 'gen_ai.response.streaming': true, @@ -124,20 +124,20 @@ describe('OpenAI integration', () => { 'openai.usage.completion_tokens': 10, 'openai.usage.prompt_tokens': 6, }, - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Sixth span - error handling in streaming context expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', 'gen_ai.request.stream': true, - 'gen_ai.system': 'openai', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', }, description: 'chat error-model stream-response', op: 'gen_ai.chat', @@ -153,21 +153,21 @@ describe('OpenAI integration', () => { // First span - basic chat completion with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.7, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, 'gen_ai.request.messages.original_length': 2, - 'gen_ai.request.messages': '[{"role":"user","content":"What is the capital of France?"}]', - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'chatcmpl-mock123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.response.text': '["Hello from OpenAI mock!"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '["Hello from OpenAI mock!"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, 'openai.response.id': 'chatcmpl-mock123', 'openai.response.model': 'gpt-3.5-turbo', 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', @@ -182,40 +182,40 @@ describe('OpenAI integration', () => { // Second span - responses API with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.messages': 'Translate this to French: Hello', - 'gen_ai.response.text': 'Response to: Translate this to French: Hello', - 'gen_ai.response.finish_reasons': '["completed"]', - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'resp_mock456', - 'gen_ai.usage.input_tokens': 5, - 'gen_ai.usage.output_tokens': 8, - 'gen_ai.usage.total_tokens': 13, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: 'Translate this to French: Hello', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to: Translate this to French: Hello', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["completed"]', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 5, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 13, 'openai.response.id': 'resp_mock456', 'openai.response.model': 'gpt-3.5-turbo', 'openai.response.timestamp': '2023-03-01T06:31:30.000Z', 'openai.usage.completion_tokens': 8, 'openai.usage.prompt_tokens': 5, }, - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Third span - error handling with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]', }, description: 'chat error-model', op: 'gen_ai.chat', @@ -225,22 +225,22 @@ describe('OpenAI integration', () => { // Fourth span - chat completions streaming with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.temperature': 0.8, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, 'gen_ai.request.stream': true, 'gen_ai.request.messages.original_length': 2, - 'gen_ai.request.messages': '[{"role":"user","content":"Tell me about streaming"}]', - 'gen_ai.response.text': 'Hello from OpenAI streaming!', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.response.id': 'chatcmpl-stream-123', - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.usage.input_tokens': 12, - 'gen_ai.usage.output_tokens': 18, - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Tell me about streaming"}]', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from OpenAI streaming!', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 18, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, 'openai.response.id': 'chatcmpl-stream-123', 'openai.response.model': 'gpt-4', 'gen_ai.response.streaming': true, @@ -256,20 +256,20 @@ describe('OpenAI integration', () => { // Fifth span - responses API streaming with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', 'gen_ai.request.stream': true, - 'gen_ai.request.messages': 'Test streaming responses API', - 'gen_ai.response.text': 'Streaming response to: Test streaming responses APITest streaming responses API', - 'gen_ai.response.finish_reasons': '["in_progress","completed"]', - 'gen_ai.response.id': 'resp_stream_456', - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.usage.input_tokens': 6, - 'gen_ai.usage.output_tokens': 10, - 'gen_ai.usage.total_tokens': 16, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: 'Test streaming responses API', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Streaming response to: Test streaming responses APITest streaming responses API', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["in_progress","completed"]', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 6, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 16, 'openai.response.id': 'resp_stream_456', 'openai.response.model': 'gpt-4', 'gen_ai.response.streaming': true, @@ -277,22 +277,22 @@ describe('OpenAI integration', () => { 'openai.usage.completion_tokens': 10, 'openai.usage.prompt_tokens': 6, }), - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Sixth span - error handling in streaming context with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', 'gen_ai.request.stream': true, 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]', - 'gen_ai.system': 'openai', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', }, description: 'chat error-model stream-response', op: 'gen_ai.chat', @@ -308,15 +308,15 @@ describe('OpenAI integration', () => { // Check that custom options are respected expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true }), }), // Check that custom options are respected for streaming expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true 'gen_ai.request.stream': true, // Should be marked as stream }), }), @@ -359,16 +359,16 @@ describe('OpenAI integration', () => { // First span - embeddings API expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'text-embedding-3-small', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small', 'gen_ai.request.encoding_format': 'float', 'gen_ai.request.dimensions': 1536, - 'gen_ai.response.model': 'text-embedding-3-small', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.total_tokens': 10, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, 'openai.response.model': 'text-embedding-3-small', 'openai.usage.prompt_tokens': 10, }, @@ -380,11 +380,11 @@ describe('OpenAI integration', () => { // Second span - embeddings API error model expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }, description: 'embeddings error-model', op: 'gen_ai.embeddings', @@ -400,17 +400,17 @@ describe('OpenAI integration', () => { // First span - embeddings API with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'text-embedding-3-small', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small', 'gen_ai.request.encoding_format': 'float', 'gen_ai.request.dimensions': 1536, 'gen_ai.embeddings.input': 'Embedding test!', - 'gen_ai.response.model': 'text-embedding-3-small', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.total_tokens': 10, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, 'openai.response.model': 'text-embedding-3-small', 'openai.usage.prompt_tokens': 10, }, @@ -422,11 +422,11 @@ describe('OpenAI integration', () => { // Second span - embeddings API error model with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', 'gen_ai.embeddings.input': 'Error embedding test!', }, description: 'embeddings error-model', @@ -437,15 +437,15 @@ describe('OpenAI integration', () => { // Third span - embeddings API with multiple inputs (this does not get truncated) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'text-embedding-3-small', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small', 'gen_ai.embeddings.input': '["First input text","Second input text","Third input text"]', - 'gen_ai.response.model': 'text-embedding-3-small', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.total_tokens': 10, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, 'openai.response.model': 'text-embedding-3-small', 'openai.usage.prompt_tokens': 10, }, @@ -493,18 +493,18 @@ describe('OpenAI integration', () => { span_id: expect.any(String), trace_id: expect.any(String), data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'chatcmpl-mock123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, 'openai.response.id': 'chatcmpl-mock123', 'openai.response.model': 'gpt-3.5-turbo', 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', @@ -540,18 +540,18 @@ describe('OpenAI integration', () => { span_id: expect.any(String), trace_id: expect.any(String), data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'chatcmpl-mock123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, 'openai.response.id': 'chatcmpl-mock123', 'openai.response.model': 'gpt-3.5-turbo', 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', @@ -585,13 +585,13 @@ describe('OpenAI integration', () => { // First call: Last message is large and gets truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', // Messages should be present (truncation happened) and should be a JSON array of a single index - 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', @@ -601,13 +601,13 @@ describe('OpenAI integration', () => { // Second call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', // Small message should be kept intact - 'gen_ai.request.messages': JSON.stringify([ + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), @@ -639,16 +639,16 @@ describe('OpenAI integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', // Messages should be present and should include truncated string input (contains only As) - 'gen_ai.request.messages': expect.stringMatching(/^A+$/), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^A+$/), }), - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -668,30 +668,30 @@ describe('OpenAI integration', () => { // First span - conversations.create returns conversation object with id expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'conversations', - 'sentry.op': 'gen_ai.conversations', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', // The conversation ID should be captured from the response 'gen_ai.conversation.id': 'conv_689667905b048191b4740501625afd940c7533ace33a2dab', }), - description: 'conversations unknown', - op: 'gen_ai.conversations', + description: 'chat unknown', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Second span - responses.create with conversation parameter expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', // The conversation ID should be captured from the request 'gen_ai.conversation.id': 'conv_689667905b048191b4740501625afd940c7533ace33a2dab', }), - op: 'gen_ai.responses', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -700,22 +700,22 @@ describe('OpenAI integration', () => { data: expect.not.objectContaining({ 'gen_ai.conversation.id': expect.anything(), }), - op: 'gen_ai.responses', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Fourth span - responses.create with previous_response_id (chaining) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', // The previous_response_id should be captured as conversation.id 'gen_ai.conversation.id': 'resp_mock_conv_123', }), - op: 'gen_ai.responses', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts index 9b4120b143e4..4115738a19c5 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts @@ -38,8 +38,8 @@ describe('OpenAI integration (V6)', () => { // Second span - responses API expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-3.5-turbo', @@ -55,8 +55,8 @@ describe('OpenAI integration (V6)', () => { 'openai.usage.completion_tokens': 8, 'openai.usage.prompt_tokens': 5, }, - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -105,8 +105,8 @@ describe('OpenAI integration (V6)', () => { // Fifth span - responses API streaming expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -124,8 +124,8 @@ describe('OpenAI integration (V6)', () => { 'openai.usage.completion_tokens': 10, 'openai.usage.prompt_tokens': 6, }, - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -182,8 +182,8 @@ describe('OpenAI integration (V6)', () => { // Second span - responses API with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-3.5-turbo', @@ -201,8 +201,8 @@ describe('OpenAI integration (V6)', () => { 'openai.usage.completion_tokens': 8, 'openai.usage.prompt_tokens': 5, }, - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -256,8 +256,8 @@ describe('OpenAI integration (V6)', () => { // Fifth span - responses API streaming with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -277,8 +277,8 @@ describe('OpenAI integration (V6)', () => { 'openai.usage.completion_tokens': 10, 'openai.usage.prompt_tokens': 6, }), - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts index 2e8e8711e9e9..4261248da349 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts @@ -24,7 +24,7 @@ describe('Vercel AI integration - generateObject', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateObject', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -38,7 +38,7 @@ describe('Vercel AI integration - generateObject', () => { data: expect.objectContaining({ 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_object', - 'gen_ai.operation.name': 'ai.generateObject.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateObject.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.model.id': 'mock-model-id', diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index 8112bcadd5f5..2c4e72731660 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -13,14 +13,14 @@ describe('Vercel AI integration', () => { // First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -37,17 +37,17 @@ describe('Vercel AI integration', () => { // Second span - explicitly enabled telemetry but recordInputs/recordOutputs not set, should not record when sendDefaultPii: false expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -68,16 +68,16 @@ describe('Vercel AI integration', () => { data: { 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': expect.any(String), - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -95,20 +95,20 @@ describe('Vercel AI integration', () => { // Fourth span - doGenerate for explicit telemetry enabled call expect.objectContaining({ data: { - 'gen_ai.request.messages': expect.any(String), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': expect.any(String), - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -128,14 +128,14 @@ describe('Vercel AI integration', () => { // Fifth span - tool call generateText span expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -152,17 +152,17 @@ describe('Vercel AI integration', () => { // Sixth span - tool call doGenerate span expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -184,9 +184,9 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', @@ -208,16 +208,16 @@ describe('Vercel AI integration', () => { data: { 'gen_ai.prompt': '{"prompt":"Where is the first span?"}', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the first span?"}]', - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'First span here!', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -241,19 +241,19 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'First span here!', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -280,16 +280,16 @@ describe('Vercel AI integration', () => { data: { 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': expect.any(String), - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -312,20 +312,20 @@ describe('Vercel AI integration', () => { // Fourth span - doGenerate for explicitly enabled telemetry call expect.objectContaining({ data: { - 'gen_ai.request.messages': expect.any(String), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': expect.any(String), - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -352,17 +352,17 @@ describe('Vercel AI integration', () => { data: { 'gen_ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"What is the weather in San Francisco?"}]', - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'Tool call completed!', - 'gen_ai.response.tool_calls': expect.any(String), - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Tool call completed!', + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -386,21 +386,21 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, - 'gen_ai.request.messages': expect.any(String), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'Tool call completed!', - 'gen_ai.response.tool_calls': expect.any(String), - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Tool call completed!', + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -431,9 +431,9 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', @@ -468,14 +468,14 @@ describe('Vercel AI integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -490,17 +490,17 @@ describe('Vercel AI integration', () => { }), expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -521,9 +521,9 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', @@ -588,14 +588,14 @@ describe('Vercel AI integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -610,17 +610,17 @@ describe('Vercel AI integration', () => { }), expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -641,9 +641,9 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', @@ -720,9 +720,9 @@ describe('Vercel AI integration', () => { origin: 'auto.vercelai.otel', status: 'ok', data: expect.objectContaining({ - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', - 'gen_ai.operation.name': 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', }), }), // The doGenerate span - name stays as 'generateText.doGenerate' since model ID is missing @@ -732,9 +732,9 @@ describe('Vercel AI integration', () => { origin: 'auto.vercelai.otel', status: 'ok', data: expect.objectContaining({ - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', }), }), ]), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts index 179644bbcd73..79e321d30942 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts @@ -13,20 +13,20 @@ describe('Vercel AI integration (V5)', () => { // First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -36,26 +36,26 @@ describe('Vercel AI integration (V5)', () => { // Second span - explicitly enabled telemetry but recordInputs/recordOutputs not set, should not record when sendDefaultPii: false expect.objectContaining({ data: { - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -65,25 +65,25 @@ describe('Vercel AI integration (V5)', () => { // Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -93,29 +93,29 @@ describe('Vercel AI integration (V5)', () => { // Fourth span - doGenerate for explicit telemetry enabled call expect.objectContaining({ data: { - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.messages': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -125,20 +125,20 @@ describe('Vercel AI integration (V5)', () => { // Fifth span - tool call generateText span expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -148,7 +148,7 @@ describe('Vercel AI integration (V5)', () => { // Sixth span - tool call doGenerate span expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -158,16 +158,16 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -181,9 +181,9 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', @@ -202,25 +202,25 @@ describe('Vercel AI integration (V5)', () => { // First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the first span?"}', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the first span?"}]', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': 'First span here!', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the first span?"}', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -230,29 +230,29 @@ describe('Vercel AI integration (V5)', () => { // Second span - doGenerate for first call, should also include input/output fields when sendDefaultPii: true expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'First span here!', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -262,25 +262,25 @@ describe('Vercel AI integration (V5)', () => { // Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -290,29 +290,29 @@ describe('Vercel AI integration (V5)', () => { // Fourth span - doGenerate for explicitly enabled telemetry call expect.objectContaining({ data: { - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.messages': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -322,25 +322,25 @@ describe('Vercel AI integration (V5)', () => { // Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"What is the weather in San Francisco?"}]', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', 'vercel.ai.response.finishReason': 'tool-calls', - 'gen_ai.response.tool_calls': expect.any(String), + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -350,32 +350,32 @@ describe('Vercel AI integration (V5)', () => { // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.messages': expect.any(String), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), 'vercel.ai.prompt.toolChoice': expect.any(String), 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', - // 'gen_ai.response.text': 'Tool call completed!', // TODO: look into why this is not being set + // [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Tool call completed!', // TODO: look into why this is not being set 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.response.tool_calls': expect.any(String), + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -391,9 +391,9 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.tool.input': expect.any(String), 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', @@ -446,19 +446,19 @@ describe('Vercel AI integration (V5)', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.response.finishReason': 'tool-calls', }, description: 'generateText', @@ -467,7 +467,7 @@ describe('Vercel AI integration (V5)', () => { }), expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -477,16 +477,16 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -499,9 +499,9 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts index 98a16618d77d..8f5d1ab65341 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts @@ -13,7 +13,7 @@ describe('Vercel AI integration (V6)', () => { // First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -21,13 +21,13 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generateText', op: 'gen_ai.invoke_agent', @@ -37,27 +37,27 @@ describe('Vercel AI integration (V6)', () => { // Second span - explicitly enabled telemetry but recordInputs/recordOutputs not set, should not record when sendDefaultPii: false expect.objectContaining({ data: expect.objectContaining({ - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -67,25 +67,25 @@ describe('Vercel AI integration (V6)', () => { // Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', 'vercel.ai.request.headers.user-agent': expect.any(String), 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generateText', op: 'gen_ai.invoke_agent', @@ -95,29 +95,29 @@ describe('Vercel AI integration (V6)', () => { // Fourth span - doGenerate for explicit telemetry enabled call expect.objectContaining({ data: expect.objectContaining({ - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.request.messages': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -127,7 +127,7 @@ describe('Vercel AI integration (V6)', () => { // Fifth span - tool call generateText span expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -135,13 +135,13 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generateText', op: 'gen_ai.invoke_agent', @@ -151,7 +151,7 @@ describe('Vercel AI integration (V6)', () => { // Sixth span - tool call doGenerate span expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -162,16 +162,16 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -185,9 +185,9 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', @@ -206,25 +206,25 @@ describe('Vercel AI integration (V6)', () => { // First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the first span?"}', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the first span?"}]', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': 'First span here!', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the first span?"}', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generateText', op: 'gen_ai.invoke_agent', @@ -234,29 +234,29 @@ describe('Vercel AI integration (V6)', () => { // Second span - doGenerate for first call, should also include input/output fields when sendDefaultPii: true expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'First span here!', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -266,25 +266,25 @@ describe('Vercel AI integration (V6)', () => { // Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generateText', op: 'gen_ai.invoke_agent', @@ -294,29 +294,29 @@ describe('Vercel AI integration (V6)', () => { // Fourth span - doGenerate for explicitly enabled telemetry call expect.objectContaining({ data: expect.objectContaining({ - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.request.messages': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -326,25 +326,25 @@ describe('Vercel AI integration (V6)', () => { // Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.messages': '[{"role":"user","content":"What is the weather in San Francisco?"}]', + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', 'vercel.ai.response.finishReason': 'tool-calls', - 'gen_ai.response.tool_calls': expect.any(String), + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generateText', op: 'gen_ai.invoke_agent', @@ -354,32 +354,32 @@ describe('Vercel AI integration (V6)', () => { // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.messages': expect.any(String), + [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), 'vercel.ai.prompt.toolChoice': expect.any(String), 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', - // 'gen_ai.response.text': 'Tool call completed!', // TODO: look into why this is not being set + // [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Tool call completed!', // TODO: look into why this is not being set 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.response.tool_calls': expect.any(String), + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -395,9 +395,9 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.tool.input': expect.any(String), 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', @@ -450,20 +450,20 @@ describe('Vercel AI integration (V6)', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.request.headers.user-agent': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.response.finishReason': 'tool-calls', }), description: 'generateText', @@ -472,7 +472,7 @@ describe('Vercel AI integration (V6)', () => { }), expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -483,16 +483,16 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -505,9 +505,9 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', diff --git a/packages/core/src/tracing/ai/gen-ai-attributes.ts b/packages/core/src/tracing/ai/gen-ai-attributes.ts index 4fa7274d7281..8fbc0f30c014 100644 --- a/packages/core/src/tracing/ai/gen-ai-attributes.ts +++ b/packages/core/src/tracing/ai/gen-ai-attributes.ts @@ -232,6 +232,31 @@ export const GEN_AI_EMBED_MANY_DO_EMBED_OPERATION_ATTRIBUTE = 'gen_ai.embed_many */ export const GEN_AI_EXECUTE_TOOL_OPERATION_ATTRIBUTE = 'gen_ai.execute_tool'; +/** + * The tool name for tool call spans + */ +export const GEN_AI_TOOL_NAME_ATTRIBUTE = 'gen_ai.tool.name'; + +/** + * The tool call ID + */ +export const GEN_AI_TOOL_CALL_ID_ATTRIBUTE = 'gen_ai.tool.call.id'; + +/** + * The tool type (e.g., 'function') + */ +export const GEN_AI_TOOL_TYPE_ATTRIBUTE = 'gen_ai.tool.type'; + +/** + * The tool input/arguments + */ +export const GEN_AI_TOOL_INPUT_ATTRIBUTE = 'gen_ai.tool.input'; + +/** + * The tool output/result + */ +export const GEN_AI_TOOL_OUTPUT_ATTRIBUTE = 'gen_ai.tool.output'; + // ============================================================================= // OPENAI-SPECIFIC ATTRIBUTES // ============================================================================= @@ -266,13 +291,12 @@ export const OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE = 'openai.usage.prompt_tokens' // ============================================================================= /** - * OpenAI API operations + * OpenAI API operations following OpenTelemetry semantic conventions + * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#llm-request-spans */ export const OPENAI_OPERATIONS = { CHAT: 'chat', - RESPONSES: 'responses', EMBEDDINGS: 'embeddings', - CONVERSATIONS: 'conversations', } as const; // ============================================================================= diff --git a/packages/core/src/tracing/ai/utils.ts b/packages/core/src/tracing/ai/utils.ts index 4a7a14eea554..17e062c976c8 100644 --- a/packages/core/src/tracing/ai/utils.ts +++ b/packages/core/src/tracing/ai/utils.ts @@ -9,14 +9,15 @@ import { } from './gen-ai-attributes'; import { truncateGenAiMessages, truncateGenAiStringInput } from './messageTruncation'; /** - * Maps AI method paths to Sentry operation name + * Maps AI method paths to OpenTelemetry semantic convention operation names + * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#llm-request-spans */ export function getFinalOperationName(methodPath: string): string { if (methodPath.includes('messages')) { - return 'messages'; + return 'chat'; } if (methodPath.includes('completions')) { - return 'completions'; + return 'text_completion'; } if (methodPath.includes('models')) { return 'models'; diff --git a/packages/core/src/tracing/langchain/index.ts b/packages/core/src/tracing/langchain/index.ts index 1930be794be5..e5ad6cc14189 100644 --- a/packages/core/src/tracing/langchain/index.ts +++ b/packages/core/src/tracing/langchain/index.ts @@ -3,7 +3,13 @@ import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from ' import { SPAN_STATUS_ERROR } from '../../tracing'; import { startSpanManual } from '../../tracing/trace'; import type { Span, SpanAttributeValue } from '../../types-hoist/span'; -import { GEN_AI_OPERATION_NAME_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE } from '../ai/gen-ai-attributes'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_TOOL_INPUT_ATTRIBUTE, + GEN_AI_TOOL_NAME_ATTRIBUTE, + GEN_AI_TOOL_OUTPUT_ATTRIBUTE, +} from '../ai/gen-ai-attributes'; import { LANGCHAIN_ORIGIN } from './constants'; import type { LangChainCallbackHandler, @@ -241,12 +247,12 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): const toolName = tool.name || 'unknown_tool'; const attributes: Record = { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: LANGCHAIN_ORIGIN, - 'gen_ai.tool.name': toolName, + [GEN_AI_TOOL_NAME_ATTRIBUTE]: toolName, }; // Add input if recordInputs is enabled if (recordInputs) { - attributes['gen_ai.tool.input'] = input; + attributes[GEN_AI_TOOL_INPUT_ATTRIBUTE] = input; } startSpanManual( @@ -272,7 +278,7 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): // Add output if recordOutputs is enabled if (recordOutputs) { span.setAttributes({ - 'gen_ai.tool.output': JSON.stringify(output), + [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: JSON.stringify(output), }); } exitSpan(runId); diff --git a/packages/core/src/tracing/langchain/utils.ts b/packages/core/src/tracing/langchain/utils.ts index 0a07ae8df370..0e36b5542ba5 100644 --- a/packages/core/src/tracing/langchain/utils.ts +++ b/packages/core/src/tracing/langchain/utils.ts @@ -216,18 +216,19 @@ function extractCommonRequestAttributes( /** * Small helper to assemble boilerplate attributes shared by both request extractors. + * Both LLM and ChatModel invocations use 'chat' as the operation name since modern + * LLM interactions are chat-based and this aligns with OpenTelemetry semantic conventions. */ function baseRequestAttributes( system: unknown, modelName: unknown, - operation: 'pipeline' | 'chat', serialized: LangChainSerialized, invocationParams?: Record, langSmithMetadata?: Record, ): Record { return { [GEN_AI_SYSTEM_ATTRIBUTE]: asString(system ?? 'langchain'), - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: operation, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: asString(modelName), [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: LANGCHAIN_ORIGIN, ...extractCommonRequestAttributes(serialized, invocationParams, langSmithMetadata), @@ -237,7 +238,8 @@ function baseRequestAttributes( /** * Extracts attributes for plain LLM invocations (string prompts). * - * - Operation is tagged as `pipeline` to distinguish from chat-style invocations. + * - Operation is tagged as `chat` following OpenTelemetry semantic conventions. + * Modern LLM invocations typically use chat-based models even when called with string prompts. * - When `recordInputs` is true, string prompts are wrapped into `{role:"user"}` * messages to align with the chat schema used elsewhere. */ @@ -251,7 +253,7 @@ export function extractLLMRequestAttributes( const system = langSmithMetadata?.ls_provider; const modelName = invocationParams?.model ?? langSmithMetadata?.ls_model_name ?? 'unknown'; - const attrs = baseRequestAttributes(system, modelName, 'pipeline', llm, invocationParams, langSmithMetadata); + const attrs = baseRequestAttributes(system, modelName, llm, invocationParams, langSmithMetadata); if (recordInputs && Array.isArray(prompts) && prompts.length > 0) { setIfDefined(attrs, GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, prompts.length); @@ -280,7 +282,7 @@ export function extractChatModelRequestAttributes( const system = langSmithMetadata?.ls_provider ?? llm.id?.[2]; const modelName = invocationParams?.model ?? langSmithMetadata?.ls_model_name ?? 'unknown'; - const attrs = baseRequestAttributes(system, modelName, 'chat', llm, invocationParams, langSmithMetadata); + const attrs = baseRequestAttributes(system, modelName, llm, invocationParams, langSmithMetadata); if (recordInputs && Array.isArray(langChainMessages) && langChainMessages.length > 0) { const normalized = normalizeLangChainMessages(langChainMessages.flat()); diff --git a/packages/core/src/tracing/openai/utils.ts b/packages/core/src/tracing/openai/utils.ts index 007dd93a91b1..82494f7ae018 100644 --- a/packages/core/src/tracing/openai/utils.ts +++ b/packages/core/src/tracing/openai/utils.ts @@ -35,20 +35,21 @@ import type { } from './types'; /** - * Maps OpenAI method paths to Sentry operation names + * Maps OpenAI method paths to OpenTelemetry semantic convention operation names + * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#llm-request-spans */ export function getOperationName(methodPath: string): string { if (methodPath.includes('chat.completions')) { return OPENAI_OPERATIONS.CHAT; } if (methodPath.includes('responses')) { - return OPENAI_OPERATIONS.RESPONSES; + return OPENAI_OPERATIONS.CHAT; } if (methodPath.includes('embeddings')) { return OPENAI_OPERATIONS.EMBEDDINGS; } if (methodPath.includes('conversations')) { - return OPENAI_OPERATIONS.CONVERSATIONS; + return OPENAI_OPERATIONS.CHAT; } return methodPath.split('.').pop() || 'unknown'; } diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index 9b95e8aa91ad..e1c7cdedf61c 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -8,10 +8,16 @@ import { GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_TOOL_CALL_ID_ATTRIBUTE, + GEN_AI_TOOL_INPUT_ATTRIBUTE, + GEN_AI_TOOL_NAME_ATTRIBUTE, + GEN_AI_TOOL_OUTPUT_ATTRIBUTE, + GEN_AI_TOOL_TYPE_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_CACHED_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, } from '../ai/gen-ai-attributes'; import { toolCallSpanMap } from './constants'; import type { TokenSummary } from './types'; @@ -48,6 +54,39 @@ function addOriginToSpan(span: Span, origin: SpanOrigin): void { span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN, origin); } +/** + * Maps Vercel AI SDK operation names to OpenTelemetry semantic convention values + * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#llm-request-spans + */ +function mapVercelAiOperationName(operationName: string): string { + // Map to OpenTelemetry well-known values + if ( + operationName === 'ai.generateText' || + operationName === 'ai.streamText' || + operationName === 'ai.generateObject' || + operationName === 'ai.streamObject' || + operationName === 'ai.generateText.doGenerate' || + operationName === 'ai.streamText.doStream' || + operationName === 'ai.generateObject.doGenerate' || + operationName === 'ai.streamObject.doStream' + ) { + return 'generate_content'; + } + if ( + operationName === 'ai.embed' || + operationName === 'ai.embedMany' || + operationName === 'ai.embed.doEmbed' || + operationName === 'ai.embedMany.doEmbed' + ) { + return 'embeddings'; + } + if (operationName === 'ai.toolCall') { + return 'execute_tool'; + } + // Return the original value for unknown operations + return operationName; +} + /** * Post-process spans emitted by the Vercel AI SDK. * This is supposed to be used in `client.on('spanStart', ...) @@ -133,7 +172,7 @@ function processEndedVercelAiSpan(span: SpanJSON): void { typeof attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] === 'number' && typeof attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] === 'number' ) { - attributes['gen_ai.usage.total_tokens'] = + attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE] = attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] + attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]; } @@ -145,15 +184,19 @@ function processEndedVercelAiSpan(span: SpanJSON): void { } // Rename AI SDK attributes to standardized gen_ai attributes - renameAttributeKey(attributes, OPERATION_NAME_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE); + // Map operation.name to OpenTelemetry semantic convention values + if (attributes[OPERATION_NAME_ATTRIBUTE]) { + const operationName = mapVercelAiOperationName(attributes[OPERATION_NAME_ATTRIBUTE] as string); + attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE] = operationName; + } renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_MESSAGES_ATTRIBUTE); renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text'); renameAttributeKey(attributes, AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, 'gen_ai.response.tool_calls'); renameAttributeKey(attributes, AI_RESPONSE_OBJECT_ATTRIBUTE, 'gen_ai.response.object'); renameAttributeKey(attributes, AI_PROMPT_TOOLS_ATTRIBUTE, 'gen_ai.request.available_tools'); - renameAttributeKey(attributes, AI_TOOL_CALL_ARGS_ATTRIBUTE, 'gen_ai.tool.input'); - renameAttributeKey(attributes, AI_TOOL_CALL_RESULT_ATTRIBUTE, 'gen_ai.tool.output'); + renameAttributeKey(attributes, AI_TOOL_CALL_ARGS_ATTRIBUTE, GEN_AI_TOOL_INPUT_ATTRIBUTE); + renameAttributeKey(attributes, AI_TOOL_CALL_RESULT_ATTRIBUTE, GEN_AI_TOOL_OUTPUT_ATTRIBUTE); renameAttributeKey(attributes, AI_SCHEMA_ATTRIBUTE, 'gen_ai.request.schema'); renameAttributeKey(attributes, AI_MODEL_ID_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE); @@ -183,22 +226,23 @@ function renameAttributeKey(attributes: Record, oldKey: string, function processToolCallSpan(span: Span, attributes: SpanAttributes): void { addOriginToSpan(span, 'auto.vercelai.otel'); span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.execute_tool'); - renameAttributeKey(attributes, AI_TOOL_CALL_NAME_ATTRIBUTE, 'gen_ai.tool.name'); - renameAttributeKey(attributes, AI_TOOL_CALL_ID_ATTRIBUTE, 'gen_ai.tool.call.id'); + span.setAttribute(GEN_AI_OPERATION_NAME_ATTRIBUTE, 'execute_tool'); + renameAttributeKey(attributes, AI_TOOL_CALL_NAME_ATTRIBUTE, GEN_AI_TOOL_NAME_ATTRIBUTE); + renameAttributeKey(attributes, AI_TOOL_CALL_ID_ATTRIBUTE, GEN_AI_TOOL_CALL_ID_ATTRIBUTE); // Store the span in our global map using the tool call ID // This allows us to capture tool errors and link them to the correct span - const toolCallId = attributes['gen_ai.tool.call.id']; + const toolCallId = attributes[GEN_AI_TOOL_CALL_ID_ATTRIBUTE]; if (typeof toolCallId === 'string') { toolCallSpanMap.set(toolCallId, span); } // https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/#gen-ai-tool-type - if (!attributes['gen_ai.tool.type']) { - span.setAttribute('gen_ai.tool.type', 'function'); + if (!attributes[GEN_AI_TOOL_TYPE_ATTRIBUTE]) { + span.setAttribute(GEN_AI_TOOL_TYPE_ATTRIBUTE, 'function'); } - const toolName = attributes['gen_ai.tool.name']; + const toolName = attributes[GEN_AI_TOOL_NAME_ATTRIBUTE]; if (toolName) { span.updateName(`execute_tool ${toolName}`); } From 1156b0a9a3b392040e0dba2061b80a6edff1ffa2 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Thu, 22 Jan 2026 10:22:20 +0100 Subject: [PATCH 2/8] fix vercel ai op name, and fix tests --- packages/core/test/lib/utils/openai-utils.test.ts | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/core/test/lib/utils/openai-utils.test.ts b/packages/core/test/lib/utils/openai-utils.test.ts index ff951e8be40b..25cd873ace08 100644 --- a/packages/core/test/lib/utils/openai-utils.test.ts +++ b/packages/core/test/lib/utils/openai-utils.test.ts @@ -18,14 +18,14 @@ describe('openai-utils', () => { expect(getOperationName('some.path.chat.completions.method')).toBe('chat'); }); - it('should return responses for responses methods', () => { - expect(getOperationName('responses.create')).toBe('responses'); - expect(getOperationName('some.path.responses.method')).toBe('responses'); + it('should return chat for responses methods', () => { + expect(getOperationName('responses.create')).toBe('chat'); + expect(getOperationName('some.path.responses.method')).toBe('chat'); }); - it('should return conversations for conversations methods', () => { - expect(getOperationName('conversations.create')).toBe('conversations'); - expect(getOperationName('some.path.conversations.method')).toBe('conversations'); + it('should return chat for conversations methods', () => { + expect(getOperationName('conversations.create')).toBe('chat'); + expect(getOperationName('some.path.conversations.method')).toBe('chat'); }); it('should return the last part of path for unknown methods', () => { @@ -41,7 +41,7 @@ describe('openai-utils', () => { describe('getSpanOperation', () => { it('should prefix operation with gen_ai', () => { expect(getSpanOperation('chat.completions.create')).toBe('gen_ai.chat'); - expect(getSpanOperation('responses.create')).toBe('gen_ai.responses'); + expect(getSpanOperation('responses.create')).toBe('gen_ai.chat'); expect(getSpanOperation('some.custom.operation')).toBe('gen_ai.operation'); }); }); From 47cb2ce68b0d2c9c1000018bfd7ae11960d1bd5f Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Thu, 22 Jan 2026 10:50:08 +0100 Subject: [PATCH 3/8] fix vercel ai op name, and fix tests --- .../suites/tracing/google-genai/test.ts | 180 ++++++++---------- .../suites/tracing/langchain/test.ts | 6 +- .../suites/tracing/langchain/v1/test.ts | 6 +- .../suites/tracing/vercelai/test.ts | 20 ++ .../suites/tracing/vercelai/v5/test.ts | 17 ++ packages/core/src/tracing/ai/utils.ts | 5 + packages/core/src/tracing/vercel-ai/index.ts | 1 + 7 files changed, 127 insertions(+), 108 deletions(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts index b61aea2ac7b8..c46f3508743e 100644 --- a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts @@ -1,24 +1,4 @@ -import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { afterAll, describe, expect } from 'vitest'; -import { - GEN_AI_OPERATION_NAME_ATTRIBUTE, - GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, - GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, - GEN_AI_REQUEST_MODEL_ATTRIBUTE, - GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, - GEN_AI_REQUEST_TOP_P_ATTRIBUTE, - GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, - GEN_AI_RESPONSE_ID_ATTRIBUTE, - GEN_AI_RESPONSE_MODEL_ATTRIBUTE, - GEN_AI_RESPONSE_STREAMING_ATTRIBUTE, - GEN_AI_RESPONSE_TEXT_ATTRIBUTE, - GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, - GEN_AI_SYSTEM_ATTRIBUTE, - GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, - GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, - GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, -} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner'; describe('Google GenAI integration', () => { @@ -38,8 +18,8 @@ describe('Google GenAI integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, - [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, - [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, + 'gen_ai.request.top_p': 0.9, + 'gen_ai.request.max_tokens': 150, }, description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -66,34 +46,34 @@ describe('Google GenAI integration', () => { // Third span - models.generateContent expect.objectContaining({ data: { - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, - [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, - [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + 'gen_ai.request.top_p': 0.9, + 'gen_ai.request.max_tokens': 100, [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }, - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Fourth span - error handling expect.objectContaining({ data: { - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }, - description: 'models error-model', - op: 'gen_ai.models', + description: 'generate_content error-model', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), @@ -112,8 +92,8 @@ describe('Google GenAI integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, - [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, - [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, + 'gen_ai.request.top_p': 0.9, + 'gen_ai.request.max_tokens': 150, [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","parts":[{"text":"Hello, how are you?"}]}]', }), description: 'chat gemini-1.5-pro create', @@ -143,37 +123,37 @@ describe('Google GenAI integration', () => { // Third span - models.generateContent with PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, - [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, - [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + 'gen_ai.request.top_p': 0.9, + 'gen_ai.request.max_tokens': 100, [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Fourth span - error handling with PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true }), - description: 'models error-model', - op: 'gen_ai.models', + description: 'generate_content error-model', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), @@ -233,12 +213,12 @@ describe('Google GenAI integration', () => { // Non-streaming with tools expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', - [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, + 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), // Should include tool calls @@ -246,22 +226,22 @@ describe('Google GenAI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 23, }), - description: 'models gemini-2.0-flash-001', - op: 'gen_ai.models', + description: 'generate_content gemini-2.0-flash-001', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Streaming with tools expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', - [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, + 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents - [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + 'gen_ai.response.streaming': true, [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), // Should include tool calls [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-tools-id', @@ -270,16 +250,16 @@ describe('Google GenAI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22, }), - description: 'models gemini-2.0-flash-001 stream-response', - op: 'gen_ai.models', + description: 'generate_content gemini-2.0-flash-001 stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Without tools for comparison expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', @@ -289,8 +269,8 @@ describe('Google GenAI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), - description: 'models gemini-2.0-flash-001', - op: 'gen_ai.models', + description: 'generate_content gemini-2.0-flash-001', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), @@ -309,15 +289,15 @@ describe('Google GenAI integration', () => { // First span - models.generateContentStream (streaming) expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, - [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, - [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, - [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + 'gen_ai.request.top_p': 0.9, + 'gen_ai.request.max_tokens': 100, + 'gen_ai.response.streaming': true, [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["STOP"]', @@ -325,8 +305,8 @@ describe('Google GenAI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22, }), - description: 'models gemini-1.5-flash stream-response', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), @@ -339,8 +319,8 @@ describe('Google GenAI integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, - [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, - [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, + 'gen_ai.request.top_p': 0.9, + 'gen_ai.request.max_tokens': 150, }), description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -355,7 +335,7 @@ describe('Google GenAI integration', () => { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', - [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + 'gen_ai.response.streaming': true, [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', }), @@ -367,24 +347,24 @@ describe('Google GenAI integration', () => { // Fourth span - blocked content streaming expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', }), - description: 'models blocked-model stream-response', - op: 'gen_ai.models', + description: 'generate_content blocked-model stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), // Fifth span - error handling for streaming expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', }), - description: 'models error-model stream-response', - op: 'gen_ai.models', + description: 'generate_content error-model stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), @@ -397,16 +377,16 @@ describe('Google GenAI integration', () => { // First span - models.generateContentStream (streaming) with PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, - [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, - [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + 'gen_ai.request.top_p': 0.9, + 'gen_ai.request.max_tokens': 100, [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true - [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + 'gen_ai.response.streaming': true, [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["STOP"]', @@ -414,8 +394,8 @@ describe('Google GenAI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22, }), - description: 'models gemini-1.5-flash stream-response', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), @@ -428,8 +408,8 @@ describe('Google GenAI integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, - [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, - [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, + 'gen_ai.request.top_p': 0.9, + 'gen_ai.request.max_tokens': 150, }), description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -445,7 +425,7 @@ describe('Google GenAI integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include message when recordInputs: true - [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + 'gen_ai.response.streaming': true, [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["STOP"]', @@ -461,33 +441,33 @@ describe('Google GenAI integration', () => { // Fourth span - blocked content stream with PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'blocked-model', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true - [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + 'gen_ai.response.streaming': true, }), - description: 'models blocked-model stream-response', - op: 'gen_ai.models', + description: 'generate_content blocked-model stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), // Fifth span - error handling for streaming with PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true }), - description: 'models error-model stream-response', - op: 'gen_ai.models', + description: 'generate_content error-model stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), @@ -525,8 +505,8 @@ describe('Google GenAI integration', () => { // First call: Last message is large and gets truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', @@ -535,8 +515,8 @@ describe('Google GenAI integration', () => { /^\[\{"role":"user","parts":\[\{"text":"C+"\}\]\}\]$/, ), }), - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts index ce024288a3a0..14f396d1a9c5 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts @@ -303,8 +303,7 @@ describe('LangChain integration', () => { // First call: Direct Anthropic call made BEFORE LangChain import // This should have Anthropic instrumentation (origin: 'auto.ai.anthropic') const firstAnthropicSpan = spans.find( - span => - span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Second call: LangChain call @@ -317,8 +316,7 @@ describe('LangChain integration', () => { // This should NOT have Anthropic instrumentation (skip works correctly) // Count how many Anthropic spans we have - should be exactly 1 const anthropicSpans = spans.filter( - span => - span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Verify the edge case limitation: diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts index 2389715d9307..6f2654a86260 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts @@ -335,8 +335,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First call: Direct Anthropic call made BEFORE LangChain import // This should have Anthropic instrumentation (origin: 'auto.ai.anthropic') const firstAnthropicSpan = spans.find( - span => - span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Second call: LangChain call @@ -349,8 +348,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // This should NOT have Anthropic instrumentation (skip works correctly) // Count how many Anthropic spans we have - should be exactly 1 const anthropicSpans = spans.filter( - span => - span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Verify the edge case limitation: diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index 2c4e72731660..eaab79771a71 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -19,6 +19,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -46,6 +47,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -76,6 +78,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -107,6 +110,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -134,6 +138,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -161,6 +166,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -185,6 +191,7 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + 'operation.name': 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -216,6 +223,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -252,6 +260,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -288,6 +297,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -324,6 +334,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -361,6 +372,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -399,6 +411,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -432,6 +445,7 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + 'operation.name': 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -474,6 +488,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -499,6 +514,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -522,6 +538,7 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + 'operation.name': 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -594,6 +611,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -619,6 +637,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -642,6 +661,7 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + 'operation.name': 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts index 79e321d30942..251416619376 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts @@ -25,6 +25,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -39,6 +40,7 @@ describe('Vercel AI integration (V5)', () => { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -82,6 +84,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -96,6 +99,7 @@ describe('Vercel AI integration (V5)', () => { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -137,6 +141,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -166,6 +171,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -182,6 +188,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + 'operation.name': 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -219,6 +226,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -251,6 +259,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -279,6 +288,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -293,6 +303,7 @@ describe('Vercel AI integration (V5)', () => { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -339,6 +350,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -374,6 +386,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -392,6 +405,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + 'operation.name': 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -457,6 +471,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.response.finishReason': 'tool-calls', @@ -485,6 +500,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -500,6 +516,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', + 'operation.name': 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, diff --git a/packages/core/src/tracing/ai/utils.ts b/packages/core/src/tracing/ai/utils.ts index 17e062c976c8..53cd696ba909 100644 --- a/packages/core/src/tracing/ai/utils.ts +++ b/packages/core/src/tracing/ai/utils.ts @@ -19,6 +19,11 @@ export function getFinalOperationName(methodPath: string): string { if (methodPath.includes('completions')) { return 'text_completion'; } + // Google GenAI: models.generateContent* -> generate_content (actually generates AI responses) + if (methodPath.includes('generateContent')) { + return 'generate_content'; + } + // Anthropic: models.get/retrieve -> models (metadata retrieval only) if (methodPath.includes('models')) { return 'models'; } diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index e1c7cdedf61c..db625ee81a54 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -188,6 +188,7 @@ function processEndedVercelAiSpan(span: SpanJSON): void { if (attributes[OPERATION_NAME_ATTRIBUTE]) { const operationName = mapVercelAiOperationName(attributes[OPERATION_NAME_ATTRIBUTE] as string); attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE] = operationName; + attributes[OPERATION_NAME_ATTRIBUTE] = operationName; } renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_MESSAGES_ATTRIBUTE); renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text'); From fafe00b8c859c0f988a07d7767c5a1f3c291fe91 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Thu, 22 Jan 2026 11:28:35 +0100 Subject: [PATCH 4/8] fix cloudflare tests --- .../suites/tracing/anthropic-ai/test.ts | 19 ++++----------- .../suites/tracing/google-genai/test.ts | 24 +++++-------------- 2 files changed, 10 insertions(+), 33 deletions(-) diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts index d8087ebf79fe..d6a5424dbf2e 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts @@ -1,15 +1,4 @@ -import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { expect, it } from 'vitest'; -import { - GEN_AI_OPERATION_NAME_ATTRIBUTE, - GEN_AI_REQUEST_MODEL_ATTRIBUTE, - GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, - GEN_AI_RESPONSE_ID_ATTRIBUTE, - GEN_AI_RESPONSE_MODEL_ATTRIBUTE, - GEN_AI_SYSTEM_ATTRIBUTE, - GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, - GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, -} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { createRunner } from '../../../runner'; // These tests are not exhaustive because the instrumentation is @@ -28,8 +17,8 @@ it('traces a basic message creation request', async ({ signal }) => { expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', @@ -39,8 +28,8 @@ it('traces a basic message creation request', async ({ signal }) => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', }), ]), diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts index 4579baf7d5de..b7c58d2f733f 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts @@ -1,16 +1,4 @@ -import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { expect, it } from 'vitest'; -import { - GEN_AI_OPERATION_NAME_ATTRIBUTE, - GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, - GEN_AI_REQUEST_MODEL_ATTRIBUTE, - GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, - GEN_AI_REQUEST_TOP_P_ATTRIBUTE, - GEN_AI_SYSTEM_ATTRIBUTE, - GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, - GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, - GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, -} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { createRunner } from '../../../runner'; // These tests are not exhaustive because the instrumentation is @@ -36,7 +24,7 @@ it('traces Google GenAI chat creation and message sending', async () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, - [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + 'gen_ai.request.top_p': 0.9, [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, }), description: 'chat gemini-1.5-pro create', @@ -62,20 +50,20 @@ it('traces Google GenAI chat creation and message sending', async () => { // Third span - models.generateContent expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, - [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, + 'gen_ai.request.top_p': 0.9, [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', }), ]), From c3ec4d5a9bada4f11544a533d72cf90b3f02d228 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Thu, 22 Jan 2026 17:23:37 +0100 Subject: [PATCH 5/8] fix tests --- .../tracing/ai-providers/anthropic/test.ts | 6 +- .../tracing/ai-providers/langchain/test.ts | 6 +- .../suites/tracing/langchain/test.ts | 20 +-- .../suites/tracing/anthropic/test.ts | 14 +-- .../suites/tracing/google-genai/test.ts | 36 +++--- .../suites/tracing/langchain/test.ts | 115 ++++++++---------- .../suites/tracing/langchain/v1/test.ts | 89 +++++++------- .../tracing/vercelai/test-generate-object.ts | 2 +- .../suites/tracing/vercelai/test.ts | 46 ++----- .../suites/tracing/vercelai/v5/test.ts | 45 +++---- .../suites/tracing/vercelai/v6/test.ts | 20 +-- packages/core/src/tracing/langchain/index.ts | 8 +- packages/core/src/tracing/langchain/utils.ts | 15 ++- packages/core/src/tracing/vercel-ai/index.ts | 19 +-- 14 files changed, 189 insertions(+), 252 deletions(-) diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/test.ts b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/test.ts index 206e29be16e5..8f14f0318456 100644 --- a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/test.ts +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/test.ts @@ -20,11 +20,11 @@ sentryTest('manual Anthropic instrumentation sends gen_ai transactions', async ( const eventData = envelopeRequestParser(req); // Verify it's a gen_ai transaction - expect(eventData.transaction).toBe('messages claude-3-haiku-20240307'); - expect(eventData.contexts?.trace?.op).toBe('gen_ai.messages'); + expect(eventData.transaction).toBe('chat claude-3-haiku-20240307'); + expect(eventData.contexts?.trace?.op).toBe('gen_ai.chat'); expect(eventData.contexts?.trace?.origin).toBe('auto.ai.anthropic'); expect(eventData.contexts?.trace?.data).toMatchObject({ - 'gen_ai.operation.name': 'messages', + 'gen_ai.operation.name': 'chat', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-haiku-20240307', 'gen_ai.request.temperature': 0.7, diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/langchain/test.ts b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/langchain/test.ts index 9cc1cc9ff98b..6ea947a61baf 100644 --- a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/langchain/test.ts +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/langchain/test.ts @@ -20,11 +20,11 @@ sentryTest('manual LangChain instrumentation sends gen_ai transactions', async ( const eventData = envelopeRequestParser(req); // Verify it's a gen_ai transaction - expect(eventData.transaction).toBe('chat claude-3-haiku-20240307'); - expect(eventData.contexts?.trace?.op).toBe('gen_ai.chat'); + expect(eventData.transaction).toBe('invoke_agent claude-3-haiku-20240307'); + expect(eventData.contexts?.trace?.op).toBe('gen_ai.invoke_agent'); expect(eventData.contexts?.trace?.origin).toBe('auto.ai.langchain'); expect(eventData.contexts?.trace?.data).toMatchObject({ - 'gen_ai.operation.name': 'chat', + 'gen_ai.operation.name': 'invoke_agent', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-haiku-20240307', 'gen_ai.request.temperature': 0.7, diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts index d4abc4ae7220..695ef6f7611f 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts @@ -1,16 +1,4 @@ -import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { expect, it } from 'vitest'; -import { - GEN_AI_OPERATION_NAME_ATTRIBUTE, - GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, - GEN_AI_REQUEST_MODEL_ATTRIBUTE, - GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, - GEN_AI_SYSTEM_ATTRIBUTE, - GEN_AI_TOOL_NAME_ATTRIBUTE, - GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, - GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, - GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, -} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { createRunner } from '../../../runner'; // These tests are not exhaustive because the instrumentation is @@ -30,8 +18,8 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal } // Chat model span expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', @@ -41,8 +29,8 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal } [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', }), // Chain span diff --git a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts index dac3af573b0a..4f8ac1e470e2 100644 --- a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts @@ -489,7 +489,7 @@ describe('Anthropic integration', () => { op: 'gen_ai.chat', data: expect.objectContaining({ 'gen_ai.request.available_tools': EXPECTED_TOOLS_JSON, - 'gen_ai.response.tool_calls': EXPECTED_TOOL_CALLS_JSON, + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: EXPECTED_TOOL_CALLS_JSON, }), }), ]), @@ -517,7 +517,7 @@ describe('Anthropic integration', () => { op: 'gen_ai.chat', data: expect.objectContaining({ 'gen_ai.request.available_tools': EXPECTED_TOOLS_JSON, - 'gen_ai.response.tool_calls': EXPECTED_TOOL_CALLS_JSON, + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: EXPECTED_TOOL_CALLS_JSON, }), }), ]), @@ -613,7 +613,7 @@ describe('Anthropic integration', () => { status: 'ok', data: expect.objectContaining({ [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', - 'gen_ai.response.tool_calls': expect.stringContaining('tool_ok_1'), + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.stringContaining('tool_ok_1'), }), }), ]), @@ -656,8 +656,8 @@ describe('Anthropic integration', () => { // Second call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', @@ -666,8 +666,8 @@ describe('Anthropic integration', () => { { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts index c46f3508743e..8960e81695e9 100644 --- a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts @@ -19,7 +19,7 @@ describe('Google GenAI integration', () => { [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 150, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, }, description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -53,7 +53,7 @@ describe('Google GenAI integration', () => { [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 100, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, @@ -93,7 +93,7 @@ describe('Google GenAI integration', () => { [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 150, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","parts":[{"text":"Hello, how are you?"}]}]', }), description: 'chat gemini-1.5-pro create', @@ -130,7 +130,7 @@ describe('Google GenAI integration', () => { [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 100, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, @@ -241,7 +241,7 @@ describe('Google GenAI integration', () => { [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents - 'gen_ai.response.streaming': true, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), // Should include tool calls [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-tools-id', @@ -296,8 +296,8 @@ describe('Google GenAI integration', () => { [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.response.streaming': true, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["STOP"]', @@ -320,7 +320,7 @@ describe('Google GenAI integration', () => { [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 150, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, }), description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -335,7 +335,7 @@ describe('Google GenAI integration', () => { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', - 'gen_ai.response.streaming': true, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', }), @@ -384,9 +384,9 @@ describe('Google GenAI integration', () => { [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 100, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true - 'gen_ai.response.streaming': true, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["STOP"]', @@ -409,7 +409,7 @@ describe('Google GenAI integration', () => { [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, 'gen_ai.request.top_p': 0.9, - 'gen_ai.request.max_tokens': 150, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, }), description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -425,7 +425,7 @@ describe('Google GenAI integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include message when recordInputs: true - 'gen_ai.response.streaming': true, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["STOP"]', @@ -448,7 +448,7 @@ describe('Google GenAI integration', () => { [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'blocked-model', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true - 'gen_ai.response.streaming': true, + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, }), description: 'generate_content blocked-model stream-response', op: 'gen_ai.generate_content', @@ -523,8 +523,8 @@ describe('Google GenAI integration', () => { // Second call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', @@ -536,8 +536,8 @@ describe('Google GenAI integration', () => { }, ]), }), - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts index 14f396d1a9c5..1a4d4df883f6 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts @@ -1,22 +1,4 @@ -import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { afterAll, describe, expect } from 'vitest'; -import { - GEN_AI_OPERATION_NAME_ATTRIBUTE, - GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, - GEN_AI_REQUEST_MODEL_ATTRIBUTE, - GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, - GEN_AI_REQUEST_TOP_P_ATTRIBUTE, - GEN_AI_RESPONSE_ID_ATTRIBUTE, - GEN_AI_RESPONSE_MODEL_ATTRIBUTE, - GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE, - GEN_AI_RESPONSE_TEXT_ATTRIBUTE, - GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, - GEN_AI_SYSTEM_ATTRIBUTE, - GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, - GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, - GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, -} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner'; describe('LangChain integration', () => { @@ -30,8 +12,8 @@ describe('LangChain integration', () => { // First span - chat model with claude-3-5-sonnet expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', @@ -42,47 +24,47 @@ describe('LangChain integration', () => { [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), - [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String), + 'gen_ai.response.stop_reason': expect.any(String), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - chat model with claude-3-opus expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-opus-20240229', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.9, - [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.95, + 'gen_ai.request.top_p': 0.95, [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 200, [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), - [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String), + 'gen_ai.response.stop_reason': expect.any(String), }), - description: 'chat claude-3-opus-20240229', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-opus-20240229', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Third span - error handling expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }), - description: 'chat error-model', - op: 'gen_ai.chat', + description: 'invoke_agent error-model', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'internal_error', }), @@ -95,8 +77,8 @@ describe('LangChain integration', () => { // First span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', @@ -106,53 +88,53 @@ describe('LangChain integration', () => { [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), - [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String), + 'gen_ai.response.stop_reason': expect.any(String), [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-opus-20240229', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.9, - [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.95, + 'gen_ai.request.top_p': 0.95, [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 200, [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), - [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String), + 'gen_ai.response.stop_reason': expect.any(String), [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), - description: 'chat claude-3-opus-20240229', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-opus-20240229', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Third span - error handling with PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true }), - description: 'chat error-model', - op: 'gen_ai.chat', + description: 'invoke_agent error-model', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'internal_error', }), @@ -184,8 +166,8 @@ describe('LangChain integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', @@ -196,11 +178,11 @@ describe('LangChain integration', () => { [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 50, [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), - [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: 'tool_use', + 'gen_ai.response.stop_reason': 'tool_use', [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), @@ -219,40 +201,40 @@ describe('LangChain integration', () => { // First call: String input truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', // Messages should be present and should include truncated string input (contains only Cs) [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Second call: Array input, last message truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', // Messages should be present (truncation happened) and should be a JSON array of a single index (contains only Cs) [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Third call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', @@ -261,8 +243,8 @@ describe('LangChain integration', () => { { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), @@ -309,7 +291,8 @@ describe('LangChain integration', () => { // Second call: LangChain call // This should have LangChain instrumentation (origin: 'auto.ai.langchain') const langchainSpan = spans.find( - span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', + span => + span.description === 'invoke_agent claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', ); // Third call: Direct Anthropic call made AFTER LangChain import diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts index 6f2654a86260..eab8421f17f6 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts @@ -15,8 +15,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First span - chat model with claude-3-5-sonnet expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -29,16 +29,16 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.model': expect.any(String), 'gen_ai.response.stop_reason': expect.any(String), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - chat model with claude-3-opus expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-opus-20240229', @@ -52,8 +52,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.model': expect.any(String), 'gen_ai.response.stop_reason': expect.any(String), }), - description: 'chat claude-3-opus-20240229', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-opus-20240229', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), @@ -66,7 +66,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // 'gen_ai.system': 'anthropic', // 'gen_ai.request.model': 'error-model', // }), - // description: 'chat error-model', + // description: 'invoke_agent error-model', // op: 'gen_ai.chat', // origin: 'auto.ai.langchain', // status: 'internal_error', @@ -80,8 +80,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -96,16 +96,16 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.usage.output_tokens': 15, 'gen_ai.usage.total_tokens': 25, }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-opus-20240229', @@ -121,8 +121,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.usage.output_tokens': 15, 'gen_ai.usage.total_tokens': 25, }), - description: 'chat claude-3-opus-20240229', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-opus-20240229', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), @@ -136,7 +136,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // 'gen_ai.request.model': 'error-model', // 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true // }), - // description: 'chat error-model', + // description: 'invoke_agent error-model', // op: 'gen_ai.chat', // origin: 'auto.ai.langchain', // status: 'internal_error', @@ -193,8 +193,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -208,8 +208,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.stop_reason': 'tool_use', 'gen_ai.response.tool_calls': expect.any(String), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), @@ -244,40 +244,40 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First call: String input truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', // Messages should be present and should include truncated string input (contains only Cs) 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Second call: Array input, last message truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', // Messages should be present (truncation happened) and should be a JSON array of a single index (contains only Cs) 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Third call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -286,8 +286,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), @@ -341,7 +341,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // Second call: LangChain call // This should have LangChain instrumentation (origin: 'auto.ai.langchain') const langchainSpan = spans.find( - span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', + span => + span.description === 'invoke_agent claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', ); // Third call: Direct Anthropic call made AFTER LangChain import @@ -386,8 +387,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First span - initChatModel with gpt-4o expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4o', @@ -400,16 +401,16 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.model': 'gpt-4o', 'gen_ai.response.stop_reason': 'stop', }), - description: 'chat gpt-4o', - op: 'gen_ai.chat', + description: 'invoke_agent gpt-4o', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - initChatModel with gpt-3.5-turbo expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-3.5-turbo', @@ -421,8 +422,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.model': 'gpt-3.5-turbo', 'gen_ai.response.stop_reason': 'stop', }), - description: 'chat gpt-3.5-turbo', - op: 'gen_ai.chat', + description: 'invoke_agent gpt-3.5-turbo', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), @@ -435,7 +436,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // 'gen_ai.system': 'openai', // 'gen_ai.request.model': 'error-model', // }), - // description: 'chat error-model', + // description: 'invoke_agent error-model', // op: 'gen_ai.chat', // origin: 'auto.ai.langchain', // status: 'internal_error', diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts index 4261248da349..ac6614af7502 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts @@ -24,7 +24,7 @@ describe('Vercel AI integration - generateObject', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index eaab79771a71..1ab0b49032e1 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -18,8 +18,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -47,7 +46,6 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -77,8 +75,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -110,7 +107,6 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -137,8 +133,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -166,7 +161,6 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -188,10 +182,9 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.type': 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', - 'operation.name': 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -222,8 +215,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -260,7 +252,6 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -296,8 +287,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -334,7 +324,6 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -371,8 +360,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -411,7 +399,6 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -441,11 +428,10 @@ describe('Vercel AI integration', () => { data: { 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.input': expect.any(String), - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', - 'operation.name': 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -487,8 +473,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -514,7 +499,6 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -535,10 +519,9 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.type': 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', - 'operation.name': 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -610,8 +593,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -637,7 +619,6 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -658,10 +639,9 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.type': 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', - 'operation.name': 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -742,7 +722,7 @@ describe('Vercel AI integration', () => { data: expect.objectContaining({ [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', }), }), // The doGenerate span - name stays as 'generateText.doGenerate' since model ID is missing diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts index 251416619376..49a1237079d1 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts @@ -24,8 +24,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -40,7 +39,6 @@ describe('Vercel AI integration (V5)', () => { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -83,8 +81,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -99,7 +96,6 @@ describe('Vercel AI integration (V5)', () => { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -140,8 +136,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -171,7 +166,6 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -185,10 +179,9 @@ describe('Vercel AI integration (V5)', () => { data: { 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.type': 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', - 'operation.name': 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -225,8 +218,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -259,7 +251,6 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -287,8 +278,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -303,7 +293,6 @@ describe('Vercel AI integration (V5)', () => { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -349,8 +338,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -361,7 +349,7 @@ describe('Vercel AI integration (V5)', () => { }), // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true) expect.objectContaining({ - data: { + data: expect.objectContaining({ [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', @@ -386,10 +374,9 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', - }, + }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', origin: 'auto.vercelai.otel', @@ -397,18 +384,17 @@ describe('Vercel AI integration (V5)', () => { }), // Seventh span - tool call execution span expect.objectContaining({ - data: { + data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.input': expect.any(String), 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', - 'operation.name': 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', - }, + }), description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', origin: 'auto.vercelai.otel', @@ -470,8 +456,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.response.finishReason': 'tool-calls', @@ -500,7 +485,6 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', - 'operation.name': 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, @@ -513,10 +497,9 @@ describe('Vercel AI integration (V5)', () => { data: { 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.type': 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', - 'operation.name': 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts index 8f5d1ab65341..def23e825b62 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts @@ -25,7 +25,7 @@ describe('Vercel AI integration (V6)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), @@ -83,7 +83,7 @@ describe('Vercel AI integration (V6)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), @@ -139,7 +139,7 @@ describe('Vercel AI integration (V6)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), @@ -183,7 +183,7 @@ describe('Vercel AI integration (V6)', () => { data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.type': 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', @@ -222,7 +222,7 @@ describe('Vercel AI integration (V6)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), @@ -282,7 +282,7 @@ describe('Vercel AI integration (V6)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), @@ -342,7 +342,7 @@ describe('Vercel AI integration (V6)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), @@ -391,7 +391,7 @@ describe('Vercel AI integration (V6)', () => { data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.input': expect.any(String), 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', @@ -461,7 +461,7 @@ describe('Vercel AI integration (V6)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.response.finishReason': 'tool-calls', @@ -503,7 +503,7 @@ describe('Vercel AI integration (V6)', () => { data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', + [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', 'gen_ai.tool.type': 'function', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', diff --git a/packages/core/src/tracing/langchain/index.ts b/packages/core/src/tracing/langchain/index.ts index e5ad6cc14189..bb48a2efe0b7 100644 --- a/packages/core/src/tracing/langchain/index.ts +++ b/packages/core/src/tracing/langchain/index.ts @@ -98,10 +98,10 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): startSpanManual( { name: `${operationName} ${modelName}`, - op: 'gen_ai.pipeline', + op: 'gen_ai.invoke_agent', attributes: { ...attributes, - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.pipeline', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', }, }, span => { @@ -136,10 +136,10 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): startSpanManual( { name: `${operationName} ${modelName}`, - op: 'gen_ai.chat', + op: 'gen_ai.invoke_agent', attributes: { ...attributes, - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', }, }, span => { diff --git a/packages/core/src/tracing/langchain/utils.ts b/packages/core/src/tracing/langchain/utils.ts index 0e36b5542ba5..c79c1140e6a9 100644 --- a/packages/core/src/tracing/langchain/utils.ts +++ b/packages/core/src/tracing/langchain/utils.ts @@ -216,19 +216,18 @@ function extractCommonRequestAttributes( /** * Small helper to assemble boilerplate attributes shared by both request extractors. - * Both LLM and ChatModel invocations use 'chat' as the operation name since modern - * LLM interactions are chat-based and this aligns with OpenTelemetry semantic conventions. */ function baseRequestAttributes( system: unknown, modelName: unknown, + operationType: 'invoke_agent' | 'chat', serialized: LangChainSerialized, invocationParams?: Record, langSmithMetadata?: Record, ): Record { return { [GEN_AI_SYSTEM_ATTRIBUTE]: asString(system ?? 'langchain'), - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: operationType, [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: asString(modelName), [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: LANGCHAIN_ORIGIN, ...extractCommonRequestAttributes(serialized, invocationParams, langSmithMetadata), @@ -238,8 +237,8 @@ function baseRequestAttributes( /** * Extracts attributes for plain LLM invocations (string prompts). * - * - Operation is tagged as `chat` following OpenTelemetry semantic conventions. - * Modern LLM invocations typically use chat-based models even when called with string prompts. + * - Operation is tagged as `invoke_agent` following OpenTelemetry semantic conventions. + * LangChain orchestrates LLM invocations as agent operations. * - When `recordInputs` is true, string prompts are wrapped into `{role:"user"}` * messages to align with the chat schema used elsewhere. */ @@ -253,7 +252,7 @@ export function extractLLMRequestAttributes( const system = langSmithMetadata?.ls_provider; const modelName = invocationParams?.model ?? langSmithMetadata?.ls_model_name ?? 'unknown'; - const attrs = baseRequestAttributes(system, modelName, llm, invocationParams, langSmithMetadata); + const attrs = baseRequestAttributes(system, modelName, 'invoke_agent', llm, invocationParams, langSmithMetadata); if (recordInputs && Array.isArray(prompts) && prompts.length > 0) { setIfDefined(attrs, GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, prompts.length); @@ -267,7 +266,7 @@ export function extractLLMRequestAttributes( /** * Extracts attributes for ChatModel invocations (array-of-arrays of messages). * - * - Operation is tagged as `chat`. + * - Operation is tagged as `invoke_agent` since LangChain orchestrates agent invocations. * - We flatten LangChain's `LangChainMessage[][]` and normalize shapes into a * consistent `{ role, content }` array when `recordInputs` is true. * - Provider system value falls back to `serialized.id?.[2]`. @@ -282,7 +281,7 @@ export function extractChatModelRequestAttributes( const system = langSmithMetadata?.ls_provider ?? llm.id?.[2]; const modelName = invocationParams?.model ?? langSmithMetadata?.ls_model_name ?? 'unknown'; - const attrs = baseRequestAttributes(system, modelName, llm, invocationParams, langSmithMetadata); + const attrs = baseRequestAttributes(system, modelName, 'invoke_agent', llm, invocationParams, langSmithMetadata); if (recordInputs && Array.isArray(langChainMessages) && langChainMessages.length > 0) { const normalized = normalizeLangChainMessages(langChainMessages.flat()); diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index db625ee81a54..0c6ca2822820 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -59,12 +59,19 @@ function addOriginToSpan(span: Span, origin: SpanOrigin): void { * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#llm-request-spans */ function mapVercelAiOperationName(operationName: string): string { - // Map to OpenTelemetry well-known values + // Top-level pipeline operations map to invoke_agent if ( operationName === 'ai.generateText' || operationName === 'ai.streamText' || operationName === 'ai.generateObject' || operationName === 'ai.streamObject' || + operationName === 'ai.embed' || + operationName === 'ai.embedMany' + ) { + return 'invoke_agent'; + } + // .do* operations are the actual LLM calls + if ( operationName === 'ai.generateText.doGenerate' || operationName === 'ai.streamText.doStream' || operationName === 'ai.generateObject.doGenerate' || @@ -72,12 +79,7 @@ function mapVercelAiOperationName(operationName: string): string { ) { return 'generate_content'; } - if ( - operationName === 'ai.embed' || - operationName === 'ai.embedMany' || - operationName === 'ai.embed.doEmbed' || - operationName === 'ai.embedMany.doEmbed' - ) { + if (operationName === 'ai.embed.doEmbed' || operationName === 'ai.embedMany.doEmbed') { return 'embeddings'; } if (operationName === 'ai.toolCall') { @@ -188,7 +190,8 @@ function processEndedVercelAiSpan(span: SpanJSON): void { if (attributes[OPERATION_NAME_ATTRIBUTE]) { const operationName = mapVercelAiOperationName(attributes[OPERATION_NAME_ATTRIBUTE] as string); attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE] = operationName; - attributes[OPERATION_NAME_ATTRIBUTE] = operationName; + // eslint-disable-next-line @typescript-eslint/no-dynamic-delete + delete attributes[OPERATION_NAME_ATTRIBUTE]; } renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_MESSAGES_ATTRIBUTE); renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text'); From cebf9a6b1a88b3062b459529d54f1b2cf45c7a2b Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Fri, 23 Jan 2026 12:58:44 +0100 Subject: [PATCH 6/8] refactor to use chat for llms and chat models --- .../suites/tracing/langchain/test.ts | 85 +++++++++---------- .../suites/tracing/langchain/v1/test.ts | 82 +++++++++--------- packages/core/src/tracing/langchain/index.ts | 8 +- packages/core/src/tracing/langchain/utils.ts | 15 ++-- .../core/src/tracing/vercel-ai/constants.ts | 19 +++++ packages/core/src/tracing/vercel-ai/index.ts | 20 +---- 6 files changed, 118 insertions(+), 111 deletions(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts index 1a4d4df883f6..ff8c3353a8f7 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts @@ -12,8 +12,8 @@ describe('LangChain integration', () => { // First span - chat model with claude-3-5-sonnet expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', @@ -26,16 +26,16 @@ describe('LangChain integration', () => { [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), 'gen_ai.response.stop_reason': expect.any(String), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - chat model with claude-3-opus expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-opus-20240229', @@ -49,22 +49,22 @@ describe('LangChain integration', () => { [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), 'gen_ai.response.stop_reason': expect.any(String), }), - description: 'invoke_agent claude-3-opus-20240229', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-opus-20240229', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Third span - error handling expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }), - description: 'invoke_agent error-model', - op: 'gen_ai.invoke_agent', + description: 'chat error-model', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'internal_error', }), @@ -77,8 +77,8 @@ describe('LangChain integration', () => { // First span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', @@ -93,16 +93,16 @@ describe('LangChain integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-opus-20240229', @@ -118,23 +118,23 @@ describe('LangChain integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), - description: 'invoke_agent claude-3-opus-20240229', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-opus-20240229', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Third span - error handling with PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true }), - description: 'invoke_agent error-model', - op: 'gen_ai.invoke_agent', + description: 'chat error-model', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'internal_error', }), @@ -166,8 +166,8 @@ describe('LangChain integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', @@ -179,10 +179,10 @@ describe('LangChain integration', () => { [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), 'gen_ai.response.stop_reason': 'tool_use', - [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), + 'gen_ai.response.tool_calls': expect.any(String), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), @@ -201,40 +201,40 @@ describe('LangChain integration', () => { // First call: String input truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', // Messages should be present and should include truncated string input (contains only Cs) [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Second call: Array input, last message truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', // Messages should be present (truncation happened) and should be a JSON array of a single index (contains only Cs) [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Third call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', @@ -243,8 +243,8 @@ describe('LangChain integration', () => { { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), @@ -291,8 +291,7 @@ describe('LangChain integration', () => { // Second call: LangChain call // This should have LangChain instrumentation (origin: 'auto.ai.langchain') const langchainSpan = spans.find( - span => - span.description === 'invoke_agent claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', ); // Third call: Direct Anthropic call made AFTER LangChain import diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts index eab8421f17f6..c0aed4077b19 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts @@ -15,8 +15,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First span - chat model with claude-3-5-sonnet expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -29,16 +29,16 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.model': expect.any(String), 'gen_ai.response.stop_reason': expect.any(String), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - chat model with claude-3-opus expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-opus-20240229', @@ -52,8 +52,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.model': expect.any(String), 'gen_ai.response.stop_reason': expect.any(String), }), - description: 'invoke_agent claude-3-opus-20240229', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-opus-20240229', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), @@ -80,8 +80,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -96,16 +96,16 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.usage.output_tokens': 15, 'gen_ai.usage.total_tokens': 25, }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-opus-20240229', @@ -121,8 +121,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.usage.output_tokens': 15, 'gen_ai.usage.total_tokens': 25, }), - description: 'invoke_agent claude-3-opus-20240229', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-opus-20240229', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), @@ -193,8 +193,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -208,8 +208,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.stop_reason': 'tool_use', 'gen_ai.response.tool_calls': expect.any(String), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), @@ -244,40 +244,40 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First call: String input truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', // Messages should be present and should include truncated string input (contains only Cs) 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Second call: Array input, last message truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', // Messages should be present (truncation happened) and should be a JSON array of a single index (contains only Cs) 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Third call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -286,8 +286,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), @@ -342,7 +342,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // This should have LangChain instrumentation (origin: 'auto.ai.langchain') const langchainSpan = spans.find( span => - span.description === 'invoke_agent claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', + span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', ); // Third call: Direct Anthropic call made AFTER LangChain import @@ -387,8 +387,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First span - initChatModel with gpt-4o expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4o', @@ -401,16 +401,16 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.model': 'gpt-4o', 'gen_ai.response.stop_reason': 'stop', }), - description: 'invoke_agent gpt-4o', - op: 'gen_ai.invoke_agent', + description: 'chat gpt-4o', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - initChatModel with gpt-3.5-turbo expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-3.5-turbo', @@ -422,8 +422,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.model': 'gpt-3.5-turbo', 'gen_ai.response.stop_reason': 'stop', }), - description: 'invoke_agent gpt-3.5-turbo', - op: 'gen_ai.invoke_agent', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), diff --git a/packages/core/src/tracing/langchain/index.ts b/packages/core/src/tracing/langchain/index.ts index bb48a2efe0b7..8cf12dfcb861 100644 --- a/packages/core/src/tracing/langchain/index.ts +++ b/packages/core/src/tracing/langchain/index.ts @@ -98,10 +98,10 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): startSpanManual( { name: `${operationName} ${modelName}`, - op: 'gen_ai.invoke_agent', + op: 'gen_ai.chat', attributes: { ...attributes, - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', }, }, span => { @@ -136,10 +136,10 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): startSpanManual( { name: `${operationName} ${modelName}`, - op: 'gen_ai.invoke_agent', + op: 'gen_ai.chat', attributes: { ...attributes, - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', }, }, span => { diff --git a/packages/core/src/tracing/langchain/utils.ts b/packages/core/src/tracing/langchain/utils.ts index c79c1140e6a9..d119de798950 100644 --- a/packages/core/src/tracing/langchain/utils.ts +++ b/packages/core/src/tracing/langchain/utils.ts @@ -216,18 +216,18 @@ function extractCommonRequestAttributes( /** * Small helper to assemble boilerplate attributes shared by both request extractors. + * Always uses 'chat' as the operation type for all LLM and chat model operations. */ function baseRequestAttributes( system: unknown, modelName: unknown, - operationType: 'invoke_agent' | 'chat', serialized: LangChainSerialized, invocationParams?: Record, langSmithMetadata?: Record, ): Record { return { [GEN_AI_SYSTEM_ATTRIBUTE]: asString(system ?? 'langchain'), - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: operationType, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: asString(modelName), [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: LANGCHAIN_ORIGIN, ...extractCommonRequestAttributes(serialized, invocationParams, langSmithMetadata), @@ -237,8 +237,8 @@ function baseRequestAttributes( /** * Extracts attributes for plain LLM invocations (string prompts). * - * - Operation is tagged as `invoke_agent` following OpenTelemetry semantic conventions. - * LangChain orchestrates LLM invocations as agent operations. + * - Operation is tagged as `chat` following OpenTelemetry semantic conventions. + * LangChain LLM operations are treated as chat operations. * - When `recordInputs` is true, string prompts are wrapped into `{role:"user"}` * messages to align with the chat schema used elsewhere. */ @@ -252,7 +252,7 @@ export function extractLLMRequestAttributes( const system = langSmithMetadata?.ls_provider; const modelName = invocationParams?.model ?? langSmithMetadata?.ls_model_name ?? 'unknown'; - const attrs = baseRequestAttributes(system, modelName, 'invoke_agent', llm, invocationParams, langSmithMetadata); + const attrs = baseRequestAttributes(system, modelName, llm, invocationParams, langSmithMetadata); if (recordInputs && Array.isArray(prompts) && prompts.length > 0) { setIfDefined(attrs, GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, prompts.length); @@ -266,7 +266,8 @@ export function extractLLMRequestAttributes( /** * Extracts attributes for ChatModel invocations (array-of-arrays of messages). * - * - Operation is tagged as `invoke_agent` since LangChain orchestrates agent invocations. + * - Operation is tagged as `chat` following OpenTelemetry semantic conventions. + * LangChain chat model operations are chat operations. * - We flatten LangChain's `LangChainMessage[][]` and normalize shapes into a * consistent `{ role, content }` array when `recordInputs` is true. * - Provider system value falls back to `serialized.id?.[2]`. @@ -281,7 +282,7 @@ export function extractChatModelRequestAttributes( const system = langSmithMetadata?.ls_provider ?? llm.id?.[2]; const modelName = invocationParams?.model ?? langSmithMetadata?.ls_model_name ?? 'unknown'; - const attrs = baseRequestAttributes(system, modelName, 'invoke_agent', llm, invocationParams, langSmithMetadata); + const attrs = baseRequestAttributes(system, modelName, llm, invocationParams, langSmithMetadata); if (recordInputs && Array.isArray(langChainMessages) && langChainMessages.length > 0) { const normalized = normalizeLangChainMessages(langChainMessages.flat()); diff --git a/packages/core/src/tracing/vercel-ai/constants.ts b/packages/core/src/tracing/vercel-ai/constants.ts index fe307b03e7fb..57e8bf2a57c8 100644 --- a/packages/core/src/tracing/vercel-ai/constants.ts +++ b/packages/core/src/tracing/vercel-ai/constants.ts @@ -3,3 +3,22 @@ import type { Span } from '../../types-hoist/span'; // Global Map to track tool call IDs to their corresponding spans // This allows us to capture tool errors and link them to the correct span export const toolCallSpanMap = new Map(); + +// Operation sets for efficient mapping to OpenTelemetry semantic convention values +export const INVOKE_AGENT_OPS = new Set([ + 'ai.generateText', + 'ai.streamText', + 'ai.generateObject', + 'ai.streamObject', + 'ai.embed', + 'ai.embedMany', +]); + +export const GENERATE_CONTENT_OPS = new Set([ + 'ai.generateText.doGenerate', + 'ai.streamText.doStream', + 'ai.generateObject.doGenerate', + 'ai.streamObject.doStream', +]); + +export const EMBEDDINGS_OPS = new Set(['ai.embed.doEmbed', 'ai.embedMany.doEmbed']); diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index 0c6ca2822820..1a06937b8aae 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -19,7 +19,7 @@ import { GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, } from '../ai/gen-ai-attributes'; -import { toolCallSpanMap } from './constants'; +import { EMBEDDINGS_OPS, GENERATE_CONTENT_OPS, INVOKE_AGENT_OPS, toolCallSpanMap } from './constants'; import type { TokenSummary } from './types'; import { accumulateTokensForParent, @@ -60,26 +60,14 @@ function addOriginToSpan(span: Span, origin: SpanOrigin): void { */ function mapVercelAiOperationName(operationName: string): string { // Top-level pipeline operations map to invoke_agent - if ( - operationName === 'ai.generateText' || - operationName === 'ai.streamText' || - operationName === 'ai.generateObject' || - operationName === 'ai.streamObject' || - operationName === 'ai.embed' || - operationName === 'ai.embedMany' - ) { + if (INVOKE_AGENT_OPS.has(operationName)) { return 'invoke_agent'; } // .do* operations are the actual LLM calls - if ( - operationName === 'ai.generateText.doGenerate' || - operationName === 'ai.streamText.doStream' || - operationName === 'ai.generateObject.doGenerate' || - operationName === 'ai.streamObject.doStream' - ) { + if (GENERATE_CONTENT_OPS.has(operationName)) { return 'generate_content'; } - if (operationName === 'ai.embed.doEmbed' || operationName === 'ai.embedMany.doEmbed') { + if (EMBEDDINGS_OPS.has(operationName)) { return 'embeddings'; } if (operationName === 'ai.toolCall') { From 7111057b0c1bf3a06df5289105f4389d1096e445 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Fri, 23 Jan 2026 12:59:08 +0100 Subject: [PATCH 7/8] update with linting --- .../node-integration-tests/suites/tracing/langchain/v1/test.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts index c0aed4077b19..95e4529b3fdc 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts @@ -341,8 +341,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // Second call: LangChain call // This should have LangChain instrumentation (origin: 'auto.ai.langchain') const langchainSpan = spans.find( - span => - span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', ); // Third call: Direct Anthropic call made AFTER LangChain import From 2c9ea072b67ce990fc59c0fe5ff1b036f613d455 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Fri, 23 Jan 2026 13:45:20 +0100 Subject: [PATCH 8/8] fix cloudflare and browser tests --- .../tracing/ai-providers/langchain/test.ts | 6 +++--- .../suites/tracing/langchain/test.ts | 20 +++++++++++++++---- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/langchain/test.ts b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/langchain/test.ts index 6ea947a61baf..9cc1cc9ff98b 100644 --- a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/langchain/test.ts +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/langchain/test.ts @@ -20,11 +20,11 @@ sentryTest('manual LangChain instrumentation sends gen_ai transactions', async ( const eventData = envelopeRequestParser(req); // Verify it's a gen_ai transaction - expect(eventData.transaction).toBe('invoke_agent claude-3-haiku-20240307'); - expect(eventData.contexts?.trace?.op).toBe('gen_ai.invoke_agent'); + expect(eventData.transaction).toBe('chat claude-3-haiku-20240307'); + expect(eventData.contexts?.trace?.op).toBe('gen_ai.chat'); expect(eventData.contexts?.trace?.origin).toBe('auto.ai.langchain'); expect(eventData.contexts?.trace?.data).toMatchObject({ - 'gen_ai.operation.name': 'invoke_agent', + 'gen_ai.operation.name': 'chat', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-haiku-20240307', 'gen_ai.request.temperature': 0.7, diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts index 695ef6f7611f..d4abc4ae7220 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts @@ -1,4 +1,16 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { expect, it } from 'vitest'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_TOOL_NAME_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { createRunner } from '../../../runner'; // These tests are not exhaustive because the instrumentation is @@ -18,8 +30,8 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal } // Chat model span expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', @@ -29,8 +41,8 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal } [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', }), // Chain span