From 31671227ceb58ed6eebd682657220bf7a080c52a Mon Sep 17 00:00:00 2001 From: Nicolas Hrubec Date: Fri, 23 Jan 2026 08:30:47 +0100 Subject: [PATCH 1/3] move away from gen_ai.request.messages to gen_ai.input.messages --- .../suites/tracing/langgraph/test.ts | 4 +-- .../suites/tracing/anthropic/test.ts | 20 +++++------ .../suites/tracing/google-genai/test.ts | 30 ++++++++-------- .../suites/tracing/langchain/test.ts | 14 ++++---- .../suites/tracing/langchain/v1/test.ts | 12 +++---- .../suites/tracing/langgraph/test.ts | 10 +++--- .../tracing/openai/openai-tool-calls/test.ts | 16 ++++----- .../suites/tracing/openai/test.ts | 34 +++++++++--------- .../suites/tracing/openai/v6/test.ts | 28 +++++++-------- .../suites/tracing/vercelai/test.ts | 36 +++++++++---------- .../suites/tracing/vercelai/v5/test.ts | 32 ++++++++--------- .../suites/tracing/vercelai/v6/test.ts | 16 ++++----- .../core/src/tracing/ai/gen-ai-attributes.ts | 4 +-- .../core/src/tracing/anthropic-ai/utils.ts | 8 ++--- .../core/src/tracing/google-genai/index.ts | 8 ++--- packages/core/src/tracing/langchain/utils.ts | 12 +++---- packages/core/src/tracing/langgraph/index.ts | 8 ++--- packages/core/src/tracing/openai/index.ts | 8 ++--- packages/core/src/tracing/vercel-ai/index.ts | 4 +-- packages/core/src/tracing/vercel-ai/utils.ts | 14 ++++---- .../test/lib/utils/anthropic-utils.test.ts | 8 ++--- 21 files changed, 163 insertions(+), 163 deletions(-) diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts index da9635952632..6efa07164df5 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts @@ -2,10 +2,10 @@ import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from ' import { expect, it } from 'vitest'; import { GEN_AI_AGENT_NAME_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, GEN_AI_PIPELINE_NAME_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, @@ -49,7 +49,7 @@ it('traces langgraph compile and invoke operations', async ({ signal }) => { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant', - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in SF?"}]', + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in SF?"}]', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, diff --git a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts index 1a20a3c3dc7b..edacb12738ca 100644 --- a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts @@ -2,10 +2,10 @@ import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from ' import { afterAll, describe, expect } from 'vitest'; import { ANTHROPIC_AI_RESPONSE_TIMESTAMP_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_REQUEST_STREAM_ATTRIBUTE, GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, @@ -107,7 +107,7 @@ describe('Anthropic integration', () => { data: expect.objectContaining({ [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_mock123', @@ -147,7 +147,7 @@ describe('Anthropic integration', () => { expect.objectContaining({ data: expect.objectContaining({ [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]', + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', @@ -180,7 +180,7 @@ describe('Anthropic integration', () => { expect.objectContaining({ data: expect.objectContaining({ [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '15', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', @@ -250,7 +250,7 @@ describe('Anthropic integration', () => { expect.objectContaining({ data: expect.objectContaining({ [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_stream123', @@ -308,7 +308,7 @@ describe('Anthropic integration', () => { // Check that custom options are respected expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true }), }), @@ -316,7 +316,7 @@ describe('Anthropic integration', () => { expect.objectContaining({ data: expect.objectContaining({ [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '15', // Present because recordOutputs=true is set in options }), op: 'gen_ai.messages', @@ -667,7 +667,7 @@ describe('Anthropic integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', // Messages should be present (truncation happened) and should be a JSON array - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching( + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching( /^\[\{"role":"user","content":"C+"\}\]$/, ), }), @@ -685,7 +685,7 @@ describe('Anthropic integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', // Small message should be kept intact - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), @@ -719,7 +719,7 @@ describe('Anthropic integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', // Only the last message (with filtered media) should be kept - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', content: [ diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts index b61aea2ac7b8..303f4a4e264c 100644 --- a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts @@ -1,10 +1,10 @@ import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { afterAll, describe, expect } from 'vitest'; import { + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, GEN_AI_REQUEST_TOP_P_ATTRIBUTE, @@ -114,7 +114,7 @@ describe('Google GenAI integration', () => { [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","parts":[{"text":"Hello, how are you?"}]}]', + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","parts":[{"text":"Hello, how are you?"}]}]', }), description: 'chat gemini-1.5-pro create', op: 'gen_ai.chat', @@ -129,7 +129,7 @@ describe('Google GenAI integration', () => { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include message when recordInputs: true + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include message when recordInputs: true [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, @@ -151,7 +151,7 @@ describe('Google GenAI integration', () => { [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, @@ -170,7 +170,7 @@ describe('Google GenAI integration', () => { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true }), description: 'models error-model', op: 'gen_ai.models', @@ -186,7 +186,7 @@ describe('Google GenAI integration', () => { // Check that custom options are respected expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true }), description: expect.not.stringContaining('stream-response'), // Non-streaming span @@ -239,7 +239,7 @@ describe('Google GenAI integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), // Should include tool calls [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, @@ -260,7 +260,7 @@ describe('Google GenAI integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), // Should include tool calls @@ -283,7 +283,7 @@ describe('Google GenAI integration', () => { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, @@ -405,7 +405,7 @@ describe('Google GenAI integration', () => { [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9, [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', @@ -444,7 +444,7 @@ describe('Google GenAI integration', () => { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include message when recordInputs: true + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include message when recordInputs: true [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', @@ -467,7 +467,7 @@ describe('Google GenAI integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'blocked-model', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, }), description: 'models blocked-model stream-response', @@ -484,7 +484,7 @@ describe('Google GenAI integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true }), description: 'models error-model stream-response', op: 'gen_ai.models', @@ -531,7 +531,7 @@ describe('Google GenAI integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', // Messages should be present (truncation happened) and should be a JSON array with parts - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching( + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching( /^\[\{"role":"user","parts":\[\{"text":"C+"\}\]\}\]$/, ), }), @@ -549,7 +549,7 @@ describe('Google GenAI integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', // Small message should be kept intact - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', parts: [{ text: 'This is a small message that fits within the limit' }], diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts index eb9344a12608..d73c790ca8e8 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts @@ -1,9 +1,9 @@ import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { afterAll, describe, expect } from 'vitest'; import { + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, GEN_AI_REQUEST_TOP_P_ATTRIBUTE, @@ -102,7 +102,7 @@ describe('LangChain integration', () => { [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), @@ -127,7 +127,7 @@ describe('LangChain integration', () => { [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.9, [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.95, [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 200, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), @@ -149,7 +149,7 @@ describe('LangChain integration', () => { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true }), description: 'chat error-model', op: 'gen_ai.chat', @@ -225,7 +225,7 @@ describe('LangChain integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', // Messages should be present and should include truncated string input (contains only Cs) - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -241,7 +241,7 @@ describe('LangChain integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', // Messages should be present (truncation happened) and should be a JSON array of a single index (contains only Cs) - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -257,7 +257,7 @@ describe('LangChain integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', // Small message should be kept intact - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts index b05a70acdeb4..4e7725b2f153 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts @@ -87,7 +87,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', 'gen_ai.request.temperature': 0.7, 'gen_ai.request.max_tokens': 100, - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true + 'gen_ai.input.messages': expect.any(String), // Should include messages when recordInputs: true 'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true 'gen_ai.response.id': expect.any(String), 'gen_ai.response.model': expect.any(String), @@ -112,7 +112,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.request.temperature': 0.9, 'gen_ai.request.top_p': 0.95, 'gen_ai.request.max_tokens': 200, - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true + 'gen_ai.input.messages': expect.any(String), // Should include messages when recordInputs: true 'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true 'gen_ai.response.id': expect.any(String), 'gen_ai.response.model': expect.any(String), @@ -134,7 +134,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // 'sentry.origin': 'auto.ai.langchain', // 'gen_ai.system': 'anthropic', // 'gen_ai.request.model': 'error-model', - // 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true + // 'gen_ai.input.messages': expect.any(String), // Should include messages when recordInputs: true // }), // description: 'chat error-model', // op: 'gen_ai.chat', @@ -250,7 +250,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', // Messages should be present and should include truncated string input (contains only Cs) - 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), + 'gen_ai.input.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -266,7 +266,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', // Messages should be present (truncation happened) and should be a JSON array of a single index (contains only Cs) - 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), + 'gen_ai.input.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -282,7 +282,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', // Small message should be kept intact - 'gen_ai.request.messages': JSON.stringify([ + 'gen_ai.input.messages': JSON.stringify([ { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), diff --git a/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts b/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts index 539bce1a740e..7ce467292bb4 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts @@ -3,10 +3,10 @@ import { afterAll, describe, expect } from 'vitest'; import { GEN_AI_AGENT_NAME_ATTRIBUTE, GEN_AI_CONVERSATION_ID_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, GEN_AI_PIPELINE_NAME_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_TEXT_ATTRIBUTE, @@ -93,7 +93,7 @@ describe('LangGraph integration', () => { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant', - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringContaining('What is the weather today?'), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringContaining('What is the weather today?'), }), description: 'invoke_agent weather_assistant', op: 'gen_ai.invoke_agent', @@ -108,7 +108,7 @@ describe('LangGraph integration', () => { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph', [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant', [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant', - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringContaining('Tell me about the weather'), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringContaining('Tell me about the weather'), }), description: 'invoke_agent weather_assistant', op: 'gen_ai.invoke_agent', @@ -143,7 +143,7 @@ describe('LangGraph integration', () => { [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'tool_agent', [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'tool_agent', [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: expect.stringContaining('get_weather'), - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringContaining('What is the weather?'), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringContaining('What is the weather?'), [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4-0613', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.stringContaining('Response without calling tools'), @@ -178,7 +178,7 @@ describe('LangGraph integration', () => { [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'tool_calling_agent', [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'tool_calling_agent', [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: expect.stringContaining('get_weather'), - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringContaining('San Francisco'), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringContaining('San Francisco'), [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4-0613', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.stringMatching(/"role":"tool"/), diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts index ac40fbe94249..84689dd058f6 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts @@ -182,8 +182,8 @@ describe('OpenAI Tool Calls integration', () => { 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]', + 'gen_ai.input.messages.original_length': 1, + 'gen_ai.input.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]', 'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION, 'gen_ai.response.model': 'gpt-4', 'gen_ai.response.id': 'chatcmpl-tools-123', @@ -213,8 +213,8 @@ describe('OpenAI Tool Calls integration', () => { 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', 'gen_ai.request.stream': true, - 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]', + 'gen_ai.input.messages.original_length': 1, + 'gen_ai.input.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]', 'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION, 'gen_ai.response.model': 'gpt-4', 'gen_ai.response.id': 'chatcmpl-stream-tools-123', @@ -243,8 +243,8 @@ describe('OpenAI Tool Calls integration', () => { 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]', + 'gen_ai.input.messages.original_length': 1, + 'gen_ai.input.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]', 'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION, 'gen_ai.response.model': 'gpt-4', 'gen_ai.response.id': 'resp_tools_789', @@ -273,8 +273,8 @@ describe('OpenAI Tool Calls integration', () => { 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', 'gen_ai.request.stream': true, - 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]', + 'gen_ai.input.messages.original_length': 1, + 'gen_ai.input.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]', 'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION, 'gen_ai.response.model': 'gpt-4', 'gen_ai.response.id': 'resp_stream_tools_789', diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts index 52ddc0837097..995db8a40ad6 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts @@ -3,11 +3,11 @@ import { afterAll, describe, expect } from 'vitest'; import { GEN_AI_CONVERSATION_ID_ATTRIBUTE, GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE, GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_REQUEST_STREAM_ATTRIBUTE, GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, @@ -186,8 +186,8 @@ describe('OpenAI integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, - [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 2, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 2, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', @@ -214,7 +214,7 @@ describe('OpenAI integration', () => { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: 'Translate this to French: Hello', + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: 'Translate this to French: Hello', [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to: Translate this to French: Hello', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["completed"]', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', @@ -241,8 +241,8 @@ describe('OpenAI integration', () => { [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', - [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]', + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]', }, description: 'chat error-model', op: 'gen_ai.chat', @@ -259,8 +259,8 @@ describe('OpenAI integration', () => { [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, - [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 2, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Tell me about streaming"}]', + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 2, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Tell me about streaming"}]', [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from OpenAI streaming!', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123', @@ -289,7 +289,7 @@ describe('OpenAI integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: 'Test streaming responses API', + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: 'Test streaming responses API', [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Streaming response to: Test streaming responses APITest streaming responses API', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["in_progress","completed"]', @@ -316,8 +316,8 @@ describe('OpenAI integration', () => { [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, - [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]', + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]', [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', @@ -336,14 +336,14 @@ describe('OpenAI integration', () => { // Check that custom options are respected expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true }), }), // Check that custom options are respected for streaming expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, // Should be marked as stream }), @@ -619,7 +619,7 @@ describe('OpenAI integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', // Messages should be present (truncation happened) and should be a JSON array of a single index - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching( + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching( /^\[\{"role":"user","content":"C+"\}\]$/, ), }), @@ -637,7 +637,7 @@ describe('OpenAI integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', // Small message should be kept intact - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify([ + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), @@ -675,7 +675,7 @@ describe('OpenAI integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', // Messages should be present and should include truncated string input (contains only As) - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^A+$/), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^A+$/), }), description: 'responses gpt-3.5-turbo', op: 'gen_ai.responses', diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts index 9b4120b143e4..18b11e1b852d 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts @@ -159,8 +159,8 @@ describe('OpenAI integration (V6)', () => { 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-3.5-turbo', 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.messages.original_length': 2, - 'gen_ai.request.messages': '[{"role":"user","content":"What is the capital of France?"}]', + 'gen_ai.input.messages.original_length': 2, + 'gen_ai.input.messages': '[{"role":"user","content":"What is the capital of France?"}]', 'gen_ai.response.model': 'gpt-3.5-turbo', 'gen_ai.response.id': 'chatcmpl-mock123', 'gen_ai.response.finish_reasons': '["stop"]', @@ -187,7 +187,7 @@ describe('OpenAI integration (V6)', () => { 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.messages': 'Translate this to French: Hello', + 'gen_ai.input.messages': 'Translate this to French: Hello', 'gen_ai.response.text': 'Response to: Translate this to French: Hello', 'gen_ai.response.finish_reasons': '["completed"]', 'gen_ai.response.model': 'gpt-3.5-turbo', @@ -214,8 +214,8 @@ describe('OpenAI integration (V6)', () => { 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'error-model', - 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]', + 'gen_ai.input.messages.original_length': 1, + 'gen_ai.input.messages': '[{"role":"user","content":"This will fail"}]', }, description: 'chat error-model', op: 'gen_ai.chat', @@ -232,8 +232,8 @@ describe('OpenAI integration (V6)', () => { 'gen_ai.request.model': 'gpt-4', 'gen_ai.request.temperature': 0.8, 'gen_ai.request.stream': true, - 'gen_ai.request.messages.original_length': 2, - 'gen_ai.request.messages': '[{"role":"user","content":"Tell me about streaming"}]', + 'gen_ai.input.messages.original_length': 2, + 'gen_ai.input.messages': '[{"role":"user","content":"Tell me about streaming"}]', 'gen_ai.response.text': 'Hello from OpenAI streaming!', 'gen_ai.response.finish_reasons': '["stop"]', 'gen_ai.response.id': 'chatcmpl-stream-123', @@ -262,7 +262,7 @@ describe('OpenAI integration (V6)', () => { 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', 'gen_ai.request.stream': true, - 'gen_ai.request.messages': 'Test streaming responses API', + 'gen_ai.input.messages': 'Test streaming responses API', 'gen_ai.response.text': 'Streaming response to: Test streaming responses APITest streaming responses API', 'gen_ai.response.finish_reasons': '["in_progress","completed"]', 'gen_ai.response.id': 'resp_stream_456', @@ -288,8 +288,8 @@ describe('OpenAI integration (V6)', () => { 'gen_ai.operation.name': 'chat', 'gen_ai.request.model': 'error-model', 'gen_ai.request.stream': true, - 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]', + 'gen_ai.input.messages.original_length': 1, + 'gen_ai.input.messages': '[{"role":"user","content":"This will fail"}]', 'gen_ai.system': 'openai', 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', @@ -308,16 +308,16 @@ describe('OpenAI integration (V6)', () => { // Check that custom options are respected expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true + 'gen_ai.input.messages.original_length': expect.any(Number), + 'gen_ai.input.messages': expect.any(String), // Should include messages when recordInputs: true 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true }), }), // Check that custom options are respected for streaming expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true + 'gen_ai.input.messages.original_length': expect.any(Number), + 'gen_ai.input.messages': expect.any(String), // Should include messages when recordInputs: true 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true 'gen_ai.request.stream': true, // Should be marked as stream }), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index 05d209176179..2d50f3dd27fd 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -2,11 +2,11 @@ import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from ' import type { Event } from '@sentry/node'; import { afterAll, describe, expect } from 'vitest'; import { + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, GEN_AI_PROMPT_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, GEN_AI_RESPONSE_ID_ATTRIBUTE, @@ -90,8 +90,8 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}', - [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), @@ -118,8 +118,8 @@ describe('Vercel AI integration', () => { // Fourth span - doGenerate for explicit telemetry enabled call expect.objectContaining({ data: { - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), - [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: expect.any(Number), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: expect.any(Number), [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), @@ -230,8 +230,8 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the first span?"}', - [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', @@ -263,8 +263,8 @@ describe('Vercel AI integration', () => { // Second span - doGenerate for first call, should also include input/output fields when sendDefaultPii: true expect.objectContaining({ data: { - [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], @@ -303,8 +303,8 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}', - [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), @@ -336,8 +336,8 @@ describe('Vercel AI integration', () => { // Fourth span - doGenerate for explicitly enabled telemetry call expect.objectContaining({ data: { - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), - [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: expect.any(Number), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: expect.any(Number), [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), @@ -375,8 +375,8 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"What is the weather in San Francisco?"}', - [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Tool call completed!', @@ -410,8 +410,8 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), - [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: expect.any(Number), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: expect.any(Number), [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts index 2e769f98dbda..1c111cf9eda6 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts @@ -82,8 +82,8 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', - 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', + 'gen_ai.input.messages.original_length': 1, + 'gen_ai.input.messages': '[{"role":"user","content":"Where is the second span?"}]', 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, @@ -115,8 +115,8 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.response.id': expect.any(String), 'gen_ai.response.text': expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.messages': expect.any(String), + 'gen_ai.input.messages.original_length': expect.any(Number), + 'gen_ai.input.messages': expect.any(String), 'gen_ai.response.finish_reasons': ['stop'], 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, @@ -214,8 +214,8 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the first span?"}', - 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the first span?"}]', + 'gen_ai.input.messages.original_length': 1, + 'gen_ai.input.messages': '[{"role":"user","content":"Where is the first span?"}]', 'vercel.ai.response.finishReason': 'stop', 'gen_ai.response.text': 'First span here!', 'vercel.ai.settings.maxRetries': 2, @@ -241,8 +241,8 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', + 'gen_ai.input.messages.original_length': 1, + 'gen_ai.input.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', @@ -274,8 +274,8 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', - 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', + 'gen_ai.input.messages.original_length': 1, + 'gen_ai.input.messages': '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.response.finishReason': 'stop', 'gen_ai.response.text': expect.any(String), 'vercel.ai.settings.maxRetries': 2, @@ -312,8 +312,8 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.response.id': expect.any(String), 'gen_ai.response.text': expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.messages': expect.any(String), + 'gen_ai.input.messages.original_length': expect.any(Number), + 'gen_ai.input.messages': expect.any(String), 'gen_ai.response.finish_reasons': ['stop'], 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, @@ -334,8 +334,8 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', - 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': '[{"role":"user","content":"What is the weather in San Francisco?"}]', + 'gen_ai.input.messages.original_length': 1, + 'gen_ai.input.messages': '[{"role":"user","content":"What is the weather in San Francisco?"}]', 'vercel.ai.response.finishReason': 'tool-calls', 'gen_ai.response.tool_calls': expect.any(String), 'vercel.ai.settings.maxRetries': 2, @@ -361,8 +361,8 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'gen_ai.request.messages.original_length': expect.any(Number), - 'gen_ai.request.messages': expect.any(String), + 'gen_ai.input.messages.original_length': expect.any(Number), + 'gen_ai.input.messages': expect.any(String), 'vercel.ai.prompt.toolChoice': expect.any(String), 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, 'vercel.ai.response.finishReason': 'tool-calls', diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts index 63ca5fcde258..e417f54147de 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts @@ -85,7 +85,7 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', + 'gen_ai.input.messages': '[{"role":"user","content":"Where is the second span?"}]', 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, @@ -118,7 +118,7 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.id': expect.any(String), 'gen_ai.response.text': expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.request.messages': expect.any(String), + 'gen_ai.input.messages': expect.any(String), 'gen_ai.response.finish_reasons': ['stop'], 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, @@ -219,7 +219,7 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the first span?"}', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the first span?"}]', + 'gen_ai.input.messages': '[{"role":"user","content":"Where is the first span?"}]', 'vercel.ai.response.finishReason': 'stop', 'gen_ai.response.text': 'First span here!', 'vercel.ai.settings.maxRetries': 2, @@ -246,7 +246,7 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', + 'gen_ai.input.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', @@ -279,7 +279,7 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', + 'gen_ai.input.messages': '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.response.finishReason': 'stop', 'gen_ai.response.text': expect.any(String), 'vercel.ai.settings.maxRetries': 2, @@ -317,7 +317,7 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.id': expect.any(String), 'gen_ai.response.text': expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.request.messages': expect.any(String), + 'gen_ai.input.messages': expect.any(String), 'gen_ai.response.finish_reasons': ['stop'], 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, @@ -339,7 +339,7 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.messages': '[{"role":"user","content":"What is the weather in San Francisco?"}]', + 'gen_ai.input.messages': '[{"role":"user","content":"What is the weather in San Francisco?"}]', 'vercel.ai.response.finishReason': 'tool-calls', 'gen_ai.response.tool_calls': expect.any(String), 'vercel.ai.settings.maxRetries': 2, @@ -366,7 +366,7 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.messages': expect.any(String), + 'gen_ai.input.messages': expect.any(String), 'vercel.ai.prompt.toolChoice': expect.any(String), 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, 'vercel.ai.response.finishReason': 'tool-calls', diff --git a/packages/core/src/tracing/ai/gen-ai-attributes.ts b/packages/core/src/tracing/ai/gen-ai-attributes.ts index a52afa58a430..afe3aa0d0b32 100644 --- a/packages/core/src/tracing/ai/gen-ai-attributes.ts +++ b/packages/core/src/tracing/ai/gen-ai-attributes.ts @@ -118,13 +118,13 @@ export const GEN_AI_OPERATION_NAME_ATTRIBUTE = 'gen_ai.operation.name'; /** * Original length of messages array, used to indicate truncations had occured */ -export const GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE = 'gen_ai.request.messages.original_length'; +export const GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE = 'gen_ai.input.messages.original_length'; /** * The prompt messages * Only recorded when recordInputs is enabled */ -export const GEN_AI_REQUEST_MESSAGES_ATTRIBUTE = 'gen_ai.request.messages'; +export const GEN_AI_INPUT_MESSAGES_ATTRIBUTE = 'gen_ai.input.messages'; /** * The response text diff --git a/packages/core/src/tracing/anthropic-ai/utils.ts b/packages/core/src/tracing/anthropic-ai/utils.ts index f10b3ebe6358..81465644740a 100644 --- a/packages/core/src/tracing/anthropic-ai/utils.ts +++ b/packages/core/src/tracing/anthropic-ai/utils.ts @@ -2,8 +2,8 @@ import { captureException } from '../../exports'; import { SPAN_STATUS_ERROR } from '../../tracing'; import type { Span } from '../../types-hoist/span'; import { - GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, } from '../ai/gen-ai-attributes'; import { getTruncatedJsonString } from '../ai/utils'; import { ANTHROPIC_AI_INSTRUMENTED_METHODS } from './constants'; @@ -23,8 +23,8 @@ export function setMessagesAttribute(span: Span, messages: unknown): void { const length = Array.isArray(messages) ? messages.length : undefined; if (length !== 0) { span.setAttributes({ - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: getTruncatedJsonString(messages), - [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: length, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: getTruncatedJsonString(messages), + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: length, }); } } diff --git a/packages/core/src/tracing/google-genai/index.ts b/packages/core/src/tracing/google-genai/index.ts index 53af7a9632cb..1fda067419c4 100644 --- a/packages/core/src/tracing/google-genai/index.ts +++ b/packages/core/src/tracing/google-genai/index.ts @@ -6,12 +6,12 @@ import { startSpan, startSpanManual } from '../../tracing/trace'; import type { Span, SpanAttributeValue } from '../../types-hoist/span'; import { handleCallbackErrors } from '../../utils/handleCallbackErrors'; import { + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE, GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE, GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, @@ -168,8 +168,8 @@ function addPrivateRequestAttributes(span: Span, params: Record if (Array.isArray(messages) && messages.length) { span.setAttributes({ - [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: messages.length, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify(truncateGenAiMessages(messages)), + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: messages.length, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify(truncateGenAiMessages(messages)), }); } } diff --git a/packages/core/src/tracing/langchain/utils.ts b/packages/core/src/tracing/langchain/utils.ts index 0a07ae8df370..5ccee0f31711 100644 --- a/packages/core/src/tracing/langchain/utils.ts +++ b/packages/core/src/tracing/langchain/utils.ts @@ -1,11 +1,11 @@ import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../../semanticAttributes'; import type { SpanAttributeValue } from '../../types-hoist/span'; import { + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE, GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE, GEN_AI_REQUEST_STREAM_ATTRIBUTE, @@ -254,9 +254,9 @@ export function extractLLMRequestAttributes( const attrs = baseRequestAttributes(system, modelName, 'pipeline', llm, invocationParams, langSmithMetadata); if (recordInputs && Array.isArray(prompts) && prompts.length > 0) { - setIfDefined(attrs, GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, prompts.length); + setIfDefined(attrs, GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, prompts.length); const messages = prompts.map(p => ({ role: 'user', content: p })); - setIfDefined(attrs, GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, asString(messages)); + setIfDefined(attrs, GEN_AI_INPUT_MESSAGES_ATTRIBUTE, asString(messages)); } return attrs; @@ -284,9 +284,9 @@ export function extractChatModelRequestAttributes( if (recordInputs && Array.isArray(langChainMessages) && langChainMessages.length > 0) { const normalized = normalizeLangChainMessages(langChainMessages.flat()); - setIfDefined(attrs, GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, normalized.length); + setIfDefined(attrs, GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, normalized.length); const truncated = truncateGenAiMessages(normalized); - setIfDefined(attrs, GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, asString(truncated)); + setIfDefined(attrs, GEN_AI_INPUT_MESSAGES_ATTRIBUTE, asString(truncated)); } return attrs; diff --git a/packages/core/src/tracing/langgraph/index.ts b/packages/core/src/tracing/langgraph/index.ts index c0800e05e6da..71a9a9e1e895 100644 --- a/packages/core/src/tracing/langgraph/index.ts +++ b/packages/core/src/tracing/langgraph/index.ts @@ -4,12 +4,12 @@ import { SPAN_STATUS_ERROR } from '../../tracing'; import { GEN_AI_AGENT_NAME_ATTRIBUTE, GEN_AI_CONVERSATION_ID_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_INVOKE_AGENT_OPERATION_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, GEN_AI_PIPELINE_NAME_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, } from '../ai/gen-ai-attributes'; import { truncateGenAiMessages } from '../ai/messageTruncation'; import type { LangChainMessage } from '../langchain/types'; @@ -140,8 +140,8 @@ function instrumentCompiledGraphInvoke( const normalizedMessages = normalizeLangChainMessages(inputMessages); const truncatedMessages = truncateGenAiMessages(normalizedMessages); span.setAttributes({ - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify(truncatedMessages), - [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: normalizedMessages.length, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify(truncatedMessages), + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: normalizedMessages.length, }); } diff --git a/packages/core/src/tracing/openai/index.ts b/packages/core/src/tracing/openai/index.ts index 51ac5ac4901b..8e9cddd9a58c 100644 --- a/packages/core/src/tracing/openai/index.ts +++ b/packages/core/src/tracing/openai/index.ts @@ -6,10 +6,10 @@ import { startSpan, startSpanManual } from '../../tracing/trace'; import type { Span, SpanAttributeValue } from '../../types-hoist/span'; import { GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_TEXT_ATTRIBUTE, GEN_AI_SYSTEM_ATTRIBUTE, @@ -148,11 +148,11 @@ function addRequestAttributes(span: Span, params: Record, opera } const truncatedInput = getTruncatedJsonString(src); - span.setAttribute(GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, truncatedInput); + span.setAttribute(GEN_AI_INPUT_MESSAGES_ATTRIBUTE, truncatedInput); // Record original length if it's an array if (Array.isArray(src)) { - span.setAttribute(GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, src.length); + span.setAttribute(GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, src.length); } } diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index 375e803159be..edf133ed4b65 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -4,8 +4,8 @@ import type { Event } from '../../types-hoist/event'; import type { Span, SpanAttributes, SpanAttributeValue, SpanJSON, SpanOrigin } from '../../types-hoist/span'; import { spanToJSON } from '../../utils/spanUtils'; import { + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, GEN_AI_TOOL_CALL_ID_ATTRIBUTE, @@ -152,7 +152,7 @@ function processEndedVercelAiSpan(span: SpanJSON): void { // Rename AI SDK attributes to standardized gen_ai attributes renameAttributeKey(attributes, OPERATION_NAME_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE); - renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_MESSAGES_ATTRIBUTE); + renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ATTRIBUTE); renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text'); renameAttributeKey(attributes, AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, 'gen_ai.response.tool_calls'); renameAttributeKey(attributes, AI_RESPONSE_OBJECT_ATTRIBUTE, 'gen_ai.response.object'); diff --git a/packages/core/src/tracing/vercel-ai/utils.ts b/packages/core/src/tracing/vercel-ai/utils.ts index 05dcc1f43817..4ecdcc628b47 100644 --- a/packages/core/src/tracing/vercel-ai/utils.ts +++ b/packages/core/src/tracing/vercel-ai/utils.ts @@ -6,9 +6,9 @@ import { GEN_AI_EXECUTE_TOOL_OPERATION_ATTRIBUTE, GEN_AI_GENERATE_OBJECT_DO_GENERATE_OPERATION_ATTRIBUTE, GEN_AI_GENERATE_TEXT_DO_GENERATE_OPERATION_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_INVOKE_AGENT_OPERATION_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, - GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_STREAM_OBJECT_DO_STREAM_OPERATION_ATTRIBUTE, GEN_AI_STREAM_TEXT_DO_STREAM_OPERATION_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, @@ -139,14 +139,14 @@ export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes const prompt = attributes[AI_PROMPT_ATTRIBUTE]; if ( typeof prompt === 'string' && - !attributes[GEN_AI_REQUEST_MESSAGES_ATTRIBUTE] && + !attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE] && !attributes[AI_PROMPT_MESSAGES_ATTRIBUTE] ) { const messages = convertPromptToMessages(prompt); if (messages.length) { span.setAttributes({ - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: getTruncatedJsonString(messages), - [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: messages.length, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: getTruncatedJsonString(messages), + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: messages.length, }); } } else if (typeof attributes[AI_PROMPT_MESSAGES_ATTRIBUTE] === 'string') { @@ -155,8 +155,8 @@ export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes if (Array.isArray(messages)) { span.setAttributes({ [AI_PROMPT_MESSAGES_ATTRIBUTE]: undefined, - [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: getTruncatedJsonString(messages), - [GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: messages.length, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: getTruncatedJsonString(messages), + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: messages.length, }); } // eslint-disable-next-line no-empty diff --git a/packages/core/test/lib/utils/anthropic-utils.test.ts b/packages/core/test/lib/utils/anthropic-utils.test.ts index 74d4e6b85c17..60c1591cdadc 100644 --- a/packages/core/test/lib/utils/anthropic-utils.test.ts +++ b/packages/core/test/lib/utils/anthropic-utils.test.ts @@ -86,22 +86,22 @@ describe('anthropic-ai-utils', () => { setMessagesAttribute(span, [{ role: 'user', content }]); const result = [{ role: 'user', content: 'A'.repeat(19972) }]; expect(mock.attributes).toStrictEqual({ - 'gen_ai.request.messages.original_length': 1, - 'gen_ai.request.messages': JSON.stringify(result), + 'gen_ai.input.messages.original_length': 1, + 'gen_ai.input.messages': JSON.stringify(result), }); }); it('removes length when setting new value ', () => { setMessagesAttribute(span, { content: 'hello, world' }); expect(mock.attributes).toStrictEqual({ - 'gen_ai.request.messages': '{"content":"hello, world"}', + 'gen_ai.input.messages': '{"content":"hello, world"}', }); }); it('ignores empty array', () => { setMessagesAttribute(span, []); expect(mock.attributes).toStrictEqual({ - 'gen_ai.request.messages': '{"content":"hello, world"}', + 'gen_ai.input.messages': '{"content":"hello, world"}', }); }); }); From 6f64375d45185cab81f2fba8aa7ab8d519b50a63 Mon Sep 17 00:00:00 2001 From: Nicolas Hrubec Date: Fri, 23 Jan 2026 15:31:22 +0100 Subject: [PATCH 2/3] yarn fix --- .../node-integration-tests/suites/tracing/anthropic/test.ts | 4 +--- .../node-integration-tests/suites/tracing/openai/test.ts | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts index edacb12738ca..39bf65c8c3f7 100644 --- a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts @@ -667,9 +667,7 @@ describe('Anthropic integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', // Messages should be present (truncation happened) and should be a JSON array - [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching( - /^\[\{"role":"user","content":"C+"\}\]$/, - ), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), description: 'messages claude-3-haiku-20240307', op: 'gen_ai.messages', diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts index 995db8a40ad6..d8c6cd76de56 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts @@ -619,9 +619,7 @@ describe('OpenAI integration', () => { [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', // Messages should be present (truncation happened) and should be a JSON array of a single index - [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching( - /^\[\{"role":"user","content":"C+"\}\]$/, - ), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', From 97620eb7ac361ba12632589dee991cfa2402e65b Mon Sep 17 00:00:00 2001 From: Nicolas Hrubec Date: Fri, 23 Jan 2026 15:51:38 +0100 Subject: [PATCH 3/3] use constants in remaining tests --- .../suites/tracing/langchain/v1/test.ts | 280 ++++----- .../tracing/openai/openai-tool-calls/test.ts | 338 ++++++----- .../suites/tracing/openai/v6/test.ts | 555 +++++++++--------- .../suites/tracing/vercelai/v5/test.ts | 359 +++++------ .../suites/tracing/vercelai/v6/test.ts | 342 ++++++----- 5 files changed, 988 insertions(+), 886 deletions(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts index 4e7725b2f153..b38141b551a3 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts @@ -1,4 +1,22 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { afterAll, expect } from 'vitest'; +import { + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_REQUEST_TOP_P_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, + GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { conditionalTest } from '../../../../utils'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../../utils/runner'; @@ -15,19 +33,19 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First span - chat model with claude-3-5-sonnet expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), - 'gen_ai.response.stop_reason': expect.any(String), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String), }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -37,20 +55,20 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // Second span - chat model with claude-3-opus expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-opus-20240229', - 'gen_ai.request.temperature': 0.9, - 'gen_ai.request.top_p': 0.95, - 'gen_ai.request.max_tokens': 200, - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), - 'gen_ai.response.stop_reason': expect.any(String), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-opus-20240229', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.95, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 200, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String), }), description: 'chat claude-3-opus-20240229', op: 'gen_ai.chat', @@ -60,11 +78,11 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // Third span - error handling // expect.objectContaining({ // data: expect.objectContaining({ - // 'gen_ai.operation.name': 'chat', - // 'sentry.op': 'gen_ai.chat', - // 'sentry.origin': 'auto.ai.langchain', - // 'gen_ai.system': 'anthropic', - // 'gen_ai.request.model': 'error-model', + // [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + // [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + // [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + // [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + // [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', // }), // description: 'chat error-model', // op: 'gen_ai.chat', @@ -80,21 +98,21 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.input.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), - 'gen_ai.response.stop_reason': expect.any(String), - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -104,22 +122,22 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // Second span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-opus-20240229', - 'gen_ai.request.temperature': 0.9, - 'gen_ai.request.top_p': 0.95, - 'gen_ai.request.max_tokens': 200, - 'gen_ai.input.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), - 'gen_ai.response.stop_reason': expect.any(String), - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-opus-20240229', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.9, + [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.95, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 200, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String), + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), description: 'chat claude-3-opus-20240229', op: 'gen_ai.chat', @@ -129,12 +147,12 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // Third span - error handling with PII // expect.objectContaining({ // data: expect.objectContaining({ - // 'gen_ai.operation.name': 'chat', - // 'sentry.op': 'gen_ai.chat', - // 'sentry.origin': 'auto.ai.langchain', - // 'gen_ai.system': 'anthropic', - // 'gen_ai.request.model': 'error-model', - // 'gen_ai.input.messages': expect.any(String), // Should include messages when recordInputs: true + // [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + // [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + // [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + // [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + // [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + // [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true // }), // description: 'chat error-model', // op: 'gen_ai.chat', @@ -193,20 +211,20 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.max_tokens': 150, - 'gen_ai.usage.input_tokens': 20, - 'gen_ai.usage.output_tokens': 30, - 'gen_ai.usage.total_tokens': 50, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': expect.any(String), - 'gen_ai.response.stop_reason': 'tool_use', - 'gen_ai.response.tool_calls': expect.any(String), + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 50, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: 'tool_use', + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -244,13 +262,13 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First call: String input truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', // Messages should be present and should include truncated string input (contains only Cs) - 'gen_ai.input.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -260,13 +278,13 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // Second call: Array input, last message truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', // Messages should be present (truncation happened) and should be a JSON array of a single index (contains only Cs) - 'gen_ai.input.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), description: 'chat claude-3-5-sonnet-20241022', op: 'gen_ai.chat', @@ -276,13 +294,13 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // Third call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'anthropic', - 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022', // Small message should be kept intact - 'gen_ai.input.messages': JSON.stringify([ + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([ { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), @@ -388,19 +406,19 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First span - initChatModel with gpt-4o expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4o', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.request.max_tokens': 100, - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'gpt-4o', - 'gen_ai.response.stop_reason': 'stop', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4o', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4o', + [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: 'stop', }), description: 'chat gpt-4o', op: 'gen_ai.chat', @@ -410,18 +428,18 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // Second span - initChatModel with gpt-3.5-turbo expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.langchain', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.5, - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.stop_reason': 'stop', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.5, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: 'stop', }), description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', @@ -431,11 +449,11 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // Third span - error handling // expect.objectContaining({ // data: expect.objectContaining({ - // 'gen_ai.operation.name': 'chat', - // 'sentry.op': 'gen_ai.chat', - // 'sentry.origin': 'auto.ai.langchain', - // 'gen_ai.system': 'openai', - // 'gen_ai.request.model': 'error-model', + // [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + // [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + // [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain', + // [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + // [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', // }), // description: 'chat error-model', // op: 'gen_ai.chat', diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts index 84689dd058f6..d7382e567122 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts @@ -1,4 +1,28 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { afterAll, describe, expect } from 'vitest'; +import { + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_STREAM_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_STREAMING_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, + GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, + OPENAI_RESPONSE_ID_ATTRIBUTE, + OPENAI_RESPONSE_MODEL_ATTRIBUTE, + OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE, + OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE, +} from '../../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../../utils/runner'; describe('OpenAI Tool Calls integration', () => { @@ -63,23 +87,23 @@ describe('OpenAI Tool Calls integration', () => { // First span - chat completion with tools (non-streaming) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION, - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.response.id': 'chatcmpl-tools-123', - 'gen_ai.response.finish_reasons': '["tool_calls"]', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'openai.response.id': 'chatcmpl-tools-123', - 'openai.response.model': 'gpt-4', - 'openai.response.timestamp': '2023-03-01T06:31:40.000Z', - 'openai.usage.completion_tokens': 25, - 'openai.usage.prompt_tokens': 15, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: WEATHER_TOOL_DEFINITION, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-tools-123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["tool_calls"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-tools-123', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:40.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 25, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 15, }, description: 'chat gpt-4', op: 'gen_ai.chat', @@ -89,25 +113,25 @@ describe('OpenAI Tool Calls integration', () => { // Second span - chat completion with tools and streaming expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.stream': true, - 'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION, - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.response.id': 'chatcmpl-stream-tools-123', - 'gen_ai.response.finish_reasons': '["tool_calls"]', - 'gen_ai.response.streaming': true, - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'openai.response.id': 'chatcmpl-stream-tools-123', - 'openai.response.model': 'gpt-4', - 'openai.response.timestamp': '2023-03-01T06:31:45.000Z', - 'openai.usage.completion_tokens': 25, - 'openai.usage.prompt_tokens': 15, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: WEATHER_TOOL_DEFINITION, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-tools-123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["tool_calls"]', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-tools-123', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:45.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 25, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 15, }, description: 'chat gpt-4 stream-response', op: 'gen_ai.chat', @@ -117,23 +141,23 @@ describe('OpenAI Tool Calls integration', () => { // Third span - responses API with tools (non-streaming) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION, - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.response.id': 'resp_tools_789', - 'gen_ai.response.finish_reasons': '["completed"]', - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, - 'openai.response.id': 'resp_tools_789', - 'openai.response.model': 'gpt-4', - 'openai.response.timestamp': '2023-03-01T06:32:00.000Z', - 'openai.usage.completion_tokens': 12, - 'openai.usage.prompt_tokens': 8, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: WEATHER_TOOL_DEFINITION, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_tools_789', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["completed"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_tools_789', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:32:00.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 12, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 8, }, description: 'responses gpt-4', op: 'gen_ai.responses', @@ -143,25 +167,25 @@ describe('OpenAI Tool Calls integration', () => { // Fourth span - responses API with tools and streaming expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.stream': true, - 'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION, - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.response.id': 'resp_stream_tools_789', - 'gen_ai.response.finish_reasons': '["in_progress","completed"]', - 'gen_ai.response.streaming': true, - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, - 'openai.response.id': 'resp_stream_tools_789', - 'openai.response.model': 'gpt-4', - 'openai.response.timestamp': '2023-03-01T06:31:50.000Z', - 'openai.usage.completion_tokens': 12, - 'openai.usage.prompt_tokens': 8, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: WEATHER_TOOL_DEFINITION, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_tools_789', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["in_progress","completed"]', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_tools_789', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:50.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 12, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 8, }, description: 'responses gpt-4 stream-response', op: 'gen_ai.responses', @@ -177,27 +201,27 @@ describe('OpenAI Tool Calls integration', () => { // First span - chat completion with tools (non-streaming) with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.input.messages.original_length': 1, - 'gen_ai.input.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]', - 'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION, - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.response.id': 'chatcmpl-tools-123', - 'gen_ai.response.finish_reasons': '["tool_calls"]', - 'gen_ai.response.text': '[""]', - 'gen_ai.response.tool_calls': CHAT_TOOL_CALLS, - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'openai.response.id': 'chatcmpl-tools-123', - 'openai.response.model': 'gpt-4', - 'openai.response.timestamp': '2023-03-01T06:31:40.000Z', - 'openai.usage.completion_tokens': 25, - 'openai.usage.prompt_tokens': 15, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather like in Paris today?"}]', + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: WEATHER_TOOL_DEFINITION, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-tools-123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["tool_calls"]', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '[""]', + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: CHAT_TOOL_CALLS, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-tools-123', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:40.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 25, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 15, }, description: 'chat gpt-4', op: 'gen_ai.chat', @@ -207,28 +231,28 @@ describe('OpenAI Tool Calls integration', () => { // Second span - chat completion with tools and streaming with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.stream': true, - 'gen_ai.input.messages.original_length': 1, - 'gen_ai.input.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]', - 'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION, - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.response.id': 'chatcmpl-stream-tools-123', - 'gen_ai.response.finish_reasons': '["tool_calls"]', - 'gen_ai.response.streaming': true, - 'gen_ai.response.tool_calls': CHAT_STREAM_TOOL_CALLS, - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'openai.response.id': 'chatcmpl-stream-tools-123', - 'openai.response.model': 'gpt-4', - 'openai.response.timestamp': '2023-03-01T06:31:45.000Z', - 'openai.usage.completion_tokens': 25, - 'openai.usage.prompt_tokens': 15, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather like in Paris today?"}]', + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: WEATHER_TOOL_DEFINITION, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-tools-123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["tool_calls"]', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: CHAT_STREAM_TOOL_CALLS, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-tools-123', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:45.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 25, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 15, }, description: 'chat gpt-4 stream-response', op: 'gen_ai.chat', @@ -238,26 +262,26 @@ describe('OpenAI Tool Calls integration', () => { // Third span - responses API with tools (non-streaming) with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.input.messages.original_length': 1, - 'gen_ai.input.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]', - 'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION, - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.response.id': 'resp_tools_789', - 'gen_ai.response.finish_reasons': '["completed"]', - 'gen_ai.response.tool_calls': RESPONSES_TOOL_CALLS, - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, - 'openai.response.id': 'resp_tools_789', - 'openai.response.model': 'gpt-4', - 'openai.response.timestamp': '2023-03-01T06:32:00.000Z', - 'openai.usage.completion_tokens': 12, - 'openai.usage.prompt_tokens': 8, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather like in Paris today?"}]', + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: WEATHER_TOOL_DEFINITION, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_tools_789', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["completed"]', + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: RESPONSES_TOOL_CALLS, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_tools_789', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:32:00.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 12, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 8, }, description: 'responses gpt-4', op: 'gen_ai.responses', @@ -267,28 +291,28 @@ describe('OpenAI Tool Calls integration', () => { // Fourth span - responses API with tools and streaming with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.stream': true, - 'gen_ai.input.messages.original_length': 1, - 'gen_ai.input.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]', - 'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION, - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.response.id': 'resp_stream_tools_789', - 'gen_ai.response.finish_reasons': '["in_progress","completed"]', - 'gen_ai.response.streaming': true, - 'gen_ai.response.tool_calls': RESPONSES_TOOL_CALLS, - 'gen_ai.usage.input_tokens': 8, - 'gen_ai.usage.output_tokens': 12, - 'gen_ai.usage.total_tokens': 20, - 'openai.response.id': 'resp_stream_tools_789', - 'openai.response.model': 'gpt-4', - 'openai.response.timestamp': '2023-03-01T06:31:50.000Z', - 'openai.usage.completion_tokens': 12, - 'openai.usage.prompt_tokens': 8, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather like in Paris today?"}]', + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: WEATHER_TOOL_DEFINITION, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_tools_789', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["in_progress","completed"]', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: RESPONSES_TOOL_CALLS, + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_tools_789', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:50.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 12, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 8, }, description: 'responses gpt-4 stream-response', op: 'gen_ai.responses', diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts index 18b11e1b852d..d1040367ec47 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts @@ -1,4 +1,30 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { afterAll, describe, expect } from 'vitest'; +import { + GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE, + GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_STREAM_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_STREAMING_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, + OPENAI_RESPONSE_ID_ATTRIBUTE, + OPENAI_RESPONSE_MODEL_ATTRIBUTE, + OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE, + OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE, +} from '../../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../../utils/runner'; describe('OpenAI integration (V6)', () => { @@ -12,23 +38,23 @@ describe('OpenAI integration (V6)', () => { // First span - basic chat completion without PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'chatcmpl-mock123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'openai.response.id': 'chatcmpl-mock123', - 'openai.response.model': 'gpt-3.5-turbo', - 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', - 'openai.usage.completion_tokens': 15, - 'openai.usage.prompt_tokens': 10, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:28.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 15, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', @@ -38,22 +64,22 @@ describe('OpenAI integration (V6)', () => { // Second span - responses API expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'resp_mock456', - 'gen_ai.response.finish_reasons': '["completed"]', - 'gen_ai.usage.input_tokens': 5, - 'gen_ai.usage.output_tokens': 8, - 'gen_ai.usage.total_tokens': 13, - 'openai.response.id': 'resp_mock456', - 'openai.response.model': 'gpt-3.5-turbo', - 'openai.response.timestamp': '2023-03-01T06:31:30.000Z', - 'openai.usage.completion_tokens': 8, - 'openai.usage.prompt_tokens': 5, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["completed"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 5, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 13, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:30.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 8, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 5, }, description: 'responses gpt-3.5-turbo', op: 'gen_ai.responses', @@ -63,11 +89,11 @@ describe('OpenAI integration (V6)', () => { // Third span - error handling expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }, description: 'chat error-model', op: 'gen_ai.chat', @@ -77,25 +103,25 @@ describe('OpenAI integration (V6)', () => { // Fourth span - chat completions streaming expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.temperature': 0.8, - 'gen_ai.request.stream': true, - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.response.id': 'chatcmpl-stream-123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.usage.input_tokens': 12, - 'gen_ai.usage.output_tokens': 18, - 'gen_ai.usage.total_tokens': 30, - 'openai.response.id': 'chatcmpl-stream-123', - 'openai.response.model': 'gpt-4', - 'gen_ai.response.streaming': true, - 'openai.response.timestamp': '2023-03-01T06:31:40.000Z', - 'openai.usage.completion_tokens': 18, - 'openai.usage.prompt_tokens': 12, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 18, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:40.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 18, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 12, }, description: 'chat gpt-4 stream-response', op: 'gen_ai.chat', @@ -105,24 +131,24 @@ describe('OpenAI integration (V6)', () => { // Fifth span - responses API streaming expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.stream': true, - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.response.id': 'resp_stream_456', - 'gen_ai.response.finish_reasons': '["in_progress","completed"]', - 'gen_ai.usage.input_tokens': 6, - 'gen_ai.usage.output_tokens': 10, - 'gen_ai.usage.total_tokens': 16, - 'openai.response.id': 'resp_stream_456', - 'openai.response.model': 'gpt-4', - 'gen_ai.response.streaming': true, - 'openai.response.timestamp': '2023-03-01T06:31:50.000Z', - 'openai.usage.completion_tokens': 10, - 'openai.usage.prompt_tokens': 6, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["in_progress","completed"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 6, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 16, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:50.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 10, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 6, }, description: 'responses gpt-4 stream-response', op: 'gen_ai.responses', @@ -132,12 +158,12 @@ describe('OpenAI integration (V6)', () => { // Sixth span - error handling in streaming context expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'gen_ai.request.model': 'error-model', - 'gen_ai.request.stream': true, - 'gen_ai.system': 'openai', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', }, description: 'chat error-model stream-response', op: 'gen_ai.chat', @@ -153,26 +179,26 @@ describe('OpenAI integration (V6)', () => { // First span - basic chat completion with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.input.messages.original_length': 2, - 'gen_ai.input.messages': '[{"role":"user","content":"What is the capital of France?"}]', - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'chatcmpl-mock123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.response.text': '["Hello from OpenAI mock!"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'openai.response.id': 'chatcmpl-mock123', - 'openai.response.model': 'gpt-3.5-turbo', - 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', - 'openai.usage.completion_tokens': 15, - 'openai.usage.prompt_tokens': 10, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 2, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '["Hello from OpenAI mock!"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:28.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 15, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', @@ -182,24 +208,24 @@ describe('OpenAI integration (V6)', () => { // Second span - responses API with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.input.messages': 'Translate this to French: Hello', - 'gen_ai.response.text': 'Response to: Translate this to French: Hello', - 'gen_ai.response.finish_reasons': '["completed"]', - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'resp_mock456', - 'gen_ai.usage.input_tokens': 5, - 'gen_ai.usage.output_tokens': 8, - 'gen_ai.usage.total_tokens': 13, - 'openai.response.id': 'resp_mock456', - 'openai.response.model': 'gpt-3.5-turbo', - 'openai.response.timestamp': '2023-03-01T06:31:30.000Z', - 'openai.usage.completion_tokens': 8, - 'openai.usage.prompt_tokens': 5, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: 'Translate this to French: Hello', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to: Translate this to French: Hello', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["completed"]', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 5, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 13, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:30.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 8, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 5, }, description: 'responses gpt-3.5-turbo', op: 'gen_ai.responses', @@ -209,13 +235,13 @@ describe('OpenAI integration (V6)', () => { // Third span - error handling with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'error-model', - 'gen_ai.input.messages.original_length': 1, - 'gen_ai.input.messages': '[{"role":"user","content":"This will fail"}]', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]', }, description: 'chat error-model', op: 'gen_ai.chat', @@ -225,28 +251,28 @@ describe('OpenAI integration (V6)', () => { // Fourth span - chat completions streaming with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.temperature': 0.8, - 'gen_ai.request.stream': true, - 'gen_ai.input.messages.original_length': 2, - 'gen_ai.input.messages': '[{"role":"user","content":"Tell me about streaming"}]', - 'gen_ai.response.text': 'Hello from OpenAI streaming!', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.response.id': 'chatcmpl-stream-123', - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.usage.input_tokens': 12, - 'gen_ai.usage.output_tokens': 18, - 'gen_ai.usage.total_tokens': 30, - 'openai.response.id': 'chatcmpl-stream-123', - 'openai.response.model': 'gpt-4', - 'gen_ai.response.streaming': true, - 'openai.response.timestamp': '2023-03-01T06:31:40.000Z', - 'openai.usage.completion_tokens': 18, - 'openai.usage.prompt_tokens': 12, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8, + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 2, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Tell me about streaming"}]', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from OpenAI streaming!', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 18, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:40.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 18, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 12, }), description: 'chat gpt-4 stream-response', op: 'gen_ai.chat', @@ -256,26 +282,27 @@ describe('OpenAI integration (V6)', () => { // Fifth span - responses API streaming with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-4', - 'gen_ai.request.stream': true, - 'gen_ai.input.messages': 'Test streaming responses API', - 'gen_ai.response.text': 'Streaming response to: Test streaming responses APITest streaming responses API', - 'gen_ai.response.finish_reasons': '["in_progress","completed"]', - 'gen_ai.response.id': 'resp_stream_456', - 'gen_ai.response.model': 'gpt-4', - 'gen_ai.usage.input_tokens': 6, - 'gen_ai.usage.output_tokens': 10, - 'gen_ai.usage.total_tokens': 16, - 'openai.response.id': 'resp_stream_456', - 'openai.response.model': 'gpt-4', - 'gen_ai.response.streaming': true, - 'openai.response.timestamp': '2023-03-01T06:31:50.000Z', - 'openai.usage.completion_tokens': 10, - 'openai.usage.prompt_tokens': 6, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: 'Test streaming responses API', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: + 'Streaming response to: Test streaming responses APITest streaming responses API', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["in_progress","completed"]', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 6, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 16, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', + [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:50.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 10, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 6, }), description: 'responses gpt-4 stream-response', op: 'gen_ai.responses', @@ -285,14 +312,14 @@ describe('OpenAI integration (V6)', () => { // Sixth span - error handling in streaming context with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'chat', - 'gen_ai.request.model': 'error-model', - 'gen_ai.request.stream': true, - 'gen_ai.input.messages.original_length': 1, - 'gen_ai.input.messages': '[{"role":"user","content":"This will fail"}]', - 'gen_ai.system': 'openai', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', }, description: 'chat error-model stream-response', op: 'gen_ai.chat', @@ -308,18 +335,18 @@ describe('OpenAI integration (V6)', () => { // Check that custom options are respected expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.input.messages.original_length': expect.any(Number), - 'gen_ai.input.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: expect.any(Number), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true }), }), // Check that custom options are respected for streaming expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.input.messages.original_length': expect.any(Number), - 'gen_ai.input.messages': expect.any(String), // Should include messages when recordInputs: true - 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true - 'gen_ai.request.stream': true, // Should be marked as stream + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: expect.any(Number), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true + [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, // Should be marked as stream }), }), ]), @@ -331,18 +358,18 @@ describe('OpenAI integration (V6)', () => { // First span - embeddings API expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'text-embedding-3-small', - 'gen_ai.request.encoding_format': 'float', - 'gen_ai.request.dimensions': 1536, - 'gen_ai.response.model': 'text-embedding-3-small', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.total_tokens': 10, - 'openai.response.model': 'text-embedding-3-small', - 'openai.usage.prompt_tokens': 10, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE]: 'float', + [GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE]: 1536, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'embeddings text-embedding-3-small', op: 'gen_ai.embeddings', @@ -352,11 +379,11 @@ describe('OpenAI integration (V6)', () => { // Second span - embeddings API error model expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'error-model', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }, description: 'embeddings error-model', op: 'gen_ai.embeddings', @@ -372,19 +399,19 @@ describe('OpenAI integration (V6)', () => { // First span - embeddings API with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'text-embedding-3-small', - 'gen_ai.request.encoding_format': 'float', - 'gen_ai.request.dimensions': 1536, - 'gen_ai.embeddings.input': 'Embedding test!', - 'gen_ai.response.model': 'text-embedding-3-small', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.total_tokens': 10, - 'openai.response.model': 'text-embedding-3-small', - 'openai.usage.prompt_tokens': 10, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE]: 'float', + [GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE]: 1536, + [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: 'Embedding test!', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'embeddings text-embedding-3-small', op: 'gen_ai.embeddings', @@ -394,12 +421,12 @@ describe('OpenAI integration (V6)', () => { // Second span - embeddings API error model with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'error-model', - 'gen_ai.embeddings.input': 'Error embedding test!', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', + [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: 'Error embedding test!', }, description: 'embeddings error-model', op: 'gen_ai.embeddings', @@ -409,17 +436,17 @@ describe('OpenAI integration (V6)', () => { // Third span - embeddings API with multiple inputs (this does not get truncated) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'embeddings', - 'sentry.op': 'gen_ai.embeddings', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'text-embedding-3-small', - 'gen_ai.embeddings.input': '["First input text","Second input text","Third input text"]', - 'gen_ai.response.model': 'text-embedding-3-small', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.total_tokens': 10, - 'openai.response.model': 'text-embedding-3-small', - 'openai.usage.prompt_tokens': 10, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: '["First input text","Second input text","Third input text"]', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'embeddings text-embedding-3-small', op: 'gen_ai.embeddings', @@ -550,23 +577,23 @@ describe('OpenAI integration (V6)', () => { span_id: expect.any(String), trace_id: expect.any(String), data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'chatcmpl-mock123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'openai.response.id': 'chatcmpl-mock123', - 'openai.response.model': 'gpt-3.5-turbo', - 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', - 'openai.usage.completion_tokens': 15, - 'openai.usage.prompt_tokens': 10, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:28.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 15, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, op: 'gen_ai.chat', origin: 'auto.ai.openai', @@ -608,23 +635,23 @@ describe('OpenAI integration (V6)', () => { span_id: expect.any(String), trace_id: expect.any(String), data: { - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', - 'sentry.origin': 'auto.ai.openai', - 'gen_ai.system': 'openai', - 'gen_ai.request.model': 'gpt-3.5-turbo', - 'gen_ai.request.temperature': 0.7, - 'gen_ai.response.model': 'gpt-3.5-turbo', - 'gen_ai.response.id': 'chatcmpl-mock123', - 'gen_ai.response.finish_reasons': '["stop"]', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 15, - 'gen_ai.usage.total_tokens': 25, - 'openai.response.id': 'chatcmpl-mock123', - 'openai.response.model': 'gpt-3.5-turbo', - 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', - 'openai.usage.completion_tokens': 15, - 'openai.usage.prompt_tokens': 10, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, + [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', + [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', + [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:28.000Z', + [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 15, + [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, op: 'gen_ai.chat', origin: 'auto.ai.openai', diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts index 1c111cf9eda6..64a7b479ccd5 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts @@ -1,11 +1,27 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import type { Event } from '@sentry/node'; import { afterAll, describe, expect } from 'vitest'; import { + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, + GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_PROMPT_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, + GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, GEN_AI_TOOL_CALL_ID_ATTRIBUTE, GEN_AI_TOOL_INPUT_ATTRIBUTE, GEN_AI_TOOL_NAME_ATTRIBUTE, GEN_AI_TOOL_OUTPUT_ATTRIBUTE, GEN_AI_TOOL_TYPE_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, } from '../../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../../utils/runner'; @@ -20,20 +36,20 @@ describe('Vercel AI integration (V5)', () => { // First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -43,26 +59,26 @@ describe('Vercel AI integration (V5)', () => { // Second span - explicitly enabled telemetry but recordInputs/recordOutputs not set, should not record when sendDefaultPii: false expect.objectContaining({ data: { - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -72,25 +88,25 @@ describe('Vercel AI integration (V5)', () => { // Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', - 'gen_ai.input.messages.original_length': 1, - 'gen_ai.input.messages': '[{"role":"user","content":"Where is the second span?"}]', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}', + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -100,29 +116,29 @@ describe('Vercel AI integration (V5)', () => { // Fourth span - doGenerate for explicit telemetry enabled call expect.objectContaining({ data: { - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.input.messages.original_length': expect.any(Number), - 'gen_ai.input.messages': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: expect.any(Number), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -132,20 +148,20 @@ describe('Vercel AI integration (V5)', () => { // Fifth span - tool call generateText span expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -155,7 +171,7 @@ describe('Vercel AI integration (V5)', () => { // Sixth span - tool call doGenerate span expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -165,16 +181,16 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -188,9 +204,9 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.toolCall', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', @@ -209,25 +225,25 @@ describe('Vercel AI integration (V5)', () => { // First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the first span?"}', - 'gen_ai.input.messages.original_length': 1, - 'gen_ai.input.messages': '[{"role":"user","content":"Where is the first span?"}]', + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': 'First span here!', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.prompt': '{"prompt":"Where is the first span?"}', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the first span?"}', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -237,29 +253,30 @@ describe('Vercel AI integration (V5)', () => { // Second span - doGenerate for first call, should also include input/output fields when sendDefaultPii: true expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'gen_ai.input.messages.original_length': 1, - 'gen_ai.input.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'First span here!', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -269,25 +286,25 @@ describe('Vercel AI integration (V5)', () => { // Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', - 'gen_ai.input.messages.original_length': 1, - 'gen_ai.input.messages': '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -297,29 +314,29 @@ describe('Vercel AI integration (V5)', () => { // Fourth span - doGenerate for explicitly enabled telemetry call expect.objectContaining({ data: { - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.input.messages.original_length': expect.any(Number), - 'gen_ai.input.messages': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: expect.any(Number), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -329,25 +346,25 @@ describe('Vercel AI integration (V5)', () => { // Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', - 'gen_ai.input.messages.original_length': 1, - 'gen_ai.input.messages': '[{"role":"user","content":"What is the weather in San Francisco?"}]', + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', 'vercel.ai.response.finishReason': 'tool-calls', - 'gen_ai.response.tool_calls': expect.any(String), + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"What is the weather in San Francisco?"}', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -357,32 +374,32 @@ describe('Vercel AI integration (V5)', () => { // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'gen_ai.input.messages.original_length': expect.any(Number), - 'gen_ai.input.messages': expect.any(String), + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: expect.any(Number), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), 'vercel.ai.prompt.toolChoice': expect.any(String), - 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', // 'gen_ai.response.text': 'Tool call completed!', // TODO: look into why this is not being set 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.response.tool_calls': expect.any(String), + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -398,9 +415,9 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_TOOL_INPUT_ATTRIBUTE]: expect.any(String), [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: expect.any(String), [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.toolCall', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', @@ -453,19 +470,19 @@ describe('Vercel AI integration (V5)', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.response.finishReason': 'tool-calls', }, description: 'generateText', @@ -474,7 +491,7 @@ describe('Vercel AI integration (V5)', () => { }), expect.objectContaining({ data: { - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -484,16 +501,16 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -506,9 +523,9 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.toolCall', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }, description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts index e417f54147de..ecc0236fbcd9 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts @@ -1,11 +1,26 @@ +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import type { Event } from '@sentry/node'; import { afterAll, describe, expect } from 'vitest'; import { + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_PROMPT_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, + GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, GEN_AI_TOOL_CALL_ID_ATTRIBUTE, GEN_AI_TOOL_INPUT_ATTRIBUTE, GEN_AI_TOOL_NAME_ATTRIBUTE, GEN_AI_TOOL_OUTPUT_ATTRIBUTE, GEN_AI_TOOL_TYPE_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, } from '../../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../../utils/runner'; @@ -20,7 +35,7 @@ describe('Vercel AI integration (V6)', () => { // First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -28,13 +43,13 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generateText', op: 'gen_ai.invoke_agent', @@ -44,27 +59,27 @@ describe('Vercel AI integration (V6)', () => { // Second span - explicitly enabled telemetry but recordInputs/recordOutputs not set, should not record when sendDefaultPii: false expect.objectContaining({ data: expect.objectContaining({ - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -74,25 +89,25 @@ describe('Vercel AI integration (V6)', () => { // Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', 'vercel.ai.request.headers.user-agent': expect.any(String), 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', - 'gen_ai.input.messages': '[{"role":"user","content":"Where is the second span?"}]', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}', + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generateText', op: 'gen_ai.invoke_agent', @@ -102,29 +117,29 @@ describe('Vercel AI integration (V6)', () => { // Fourth span - doGenerate for explicit telemetry enabled call expect.objectContaining({ data: expect.objectContaining({ - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.input.messages': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -134,7 +149,7 @@ describe('Vercel AI integration (V6)', () => { // Fifth span - tool call generateText span expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', @@ -142,13 +157,13 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generateText', op: 'gen_ai.invoke_agent', @@ -158,7 +173,7 @@ describe('Vercel AI integration (V6)', () => { // Sixth span - tool call doGenerate span expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -169,16 +184,16 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -192,9 +207,9 @@ describe('Vercel AI integration (V6)', () => { [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.toolCall', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', @@ -213,25 +228,25 @@ describe('Vercel AI integration (V6)', () => { // First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the first span?"}', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.input.messages': '[{"role":"user","content":"Where is the first span?"}]', + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': 'First span here!', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.prompt': '{"prompt":"Where is the first span?"}', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the first span?"}', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generateText', op: 'gen_ai.invoke_agent', @@ -241,29 +256,30 @@ describe('Vercel AI integration (V6)', () => { // Second span - doGenerate for first call, should also include input/output fields when sendDefaultPii: true expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.input.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'First span here!', + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -273,25 +289,25 @@ describe('Vercel AI integration (V6)', () => { // Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.input.messages': '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generateText', op: 'gen_ai.invoke_agent', @@ -301,29 +317,29 @@ describe('Vercel AI integration (V6)', () => { // Fourth span - doGenerate for explicitly enabled telemetry call expect.objectContaining({ data: expect.objectContaining({ - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - 'gen_ai.response.text': expect.any(String), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.input.messages': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -333,25 +349,25 @@ describe('Vercel AI integration (V6)', () => { // Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.input.messages': '[{"role":"user","content":"What is the weather in San Francisco?"}]', + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', 'vercel.ai.response.finishReason': 'tool-calls', - 'gen_ai.response.tool_calls': expect.any(String), + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"What is the weather in San Francisco?"}', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generateText', op: 'gen_ai.invoke_agent', @@ -361,32 +377,32 @@ describe('Vercel AI integration (V6)', () => { // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.request.headers.user-agent': expect.any(String), - 'gen_ai.input.messages': expect.any(String), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), 'vercel.ai.prompt.toolChoice': expect.any(String), - 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, + [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', // 'gen_ai.response.text': 'Tool call completed!', // TODO: look into why this is not being set 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.response.tool_calls': expect.any(String), + [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -402,9 +418,9 @@ describe('Vercel AI integration (V6)', () => { [GEN_AI_TOOL_INPUT_ATTRIBUTE]: expect.any(String), [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: expect.any(String), [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.toolCall', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', @@ -457,20 +473,20 @@ describe('Vercel AI integration (V6)', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.request.headers.user-agent': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', - 'sentry.op': 'gen_ai.invoke_agent', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.response.finishReason': 'tool-calls', }), description: 'generateText', @@ -479,7 +495,7 @@ describe('Vercel AI integration (V6)', () => { }), expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.request.model': 'mock-model-id', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', @@ -490,16 +506,16 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - 'gen_ai.response.finish_reasons': ['tool-calls'], - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.system': 'mock-provider', - 'gen_ai.usage.input_tokens': 15, - 'gen_ai.usage.output_tokens': 25, - 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'gen_ai.generate_text', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -512,9 +528,9 @@ describe('Vercel AI integration (V6)', () => { [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', - 'gen_ai.operation.name': 'ai.toolCall', - 'sentry.op': 'gen_ai.execute_tool', - 'sentry.origin': 'auto.vercelai.otel', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.toolCall', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', }), description: 'execute_tool getWeather', op: 'gen_ai.execute_tool',