From a4e6f395188ea58efd87c613d0223e006a680731 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Wed, 21 Jan 2026 17:02:30 +0100 Subject: [PATCH 1/9] fix(Core): Report well known values in gen_ai.operation.name attribute --- .../suites/tracing/anthropic/test.ts | 140 +++++++++--------- .../suites/tracing/langchain/test.ts | 4 +- .../suites/tracing/langchain/v1/test.ts | 4 +- .../tracing/openai/openai-tool-calls/test.ts | 32 ++-- .../suites/tracing/openai/test.ts | 62 ++++---- .../suites/tracing/openai/v6/test.ts | 32 ++-- .../tracing/vercelai/test-generate-object.ts | 4 +- .../suites/tracing/vercelai/test.ts | 44 +++--- .../suites/tracing/vercelai/v5/test.ts | 34 ++--- .../suites/tracing/vercelai/v6/test.ts | 34 ++--- .../core/src/tracing/ai/gen-ai-attributes.ts | 5 +- packages/core/src/tracing/ai/utils.ts | 7 +- packages/core/src/tracing/langchain/utils.ts | 12 +- packages/core/src/tracing/openai/utils.ts | 7 +- packages/core/src/tracing/vercel-ai/index.ts | 40 ++++- 15 files changed, 251 insertions(+), 210 deletions(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts index a70e51858113..5d0b747728e3 100644 --- a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts @@ -12,8 +12,8 @@ describe('Anthropic integration', () => { // First span - basic message completion without PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.anthropic', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-haiku-20240307', @@ -25,36 +25,36 @@ describe('Anthropic integration', () => { 'gen_ai.usage.output_tokens': 15, 'gen_ai.usage.total_tokens': 25, }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), // Second span - error handling expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.anthropic', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'error-model', }), - description: 'messages error-model', - op: 'gen_ai.messages', + description: 'chat error-model', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'internal_error', }), // Third span - token counting (no response.text because recordOutputs=false by default) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.anthropic', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-haiku-20240307', }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -84,7 +84,7 @@ describe('Anthropic integration', () => { // First span - basic message completion with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', + 'gen_ai.operation.name': 'chat', 'gen_ai.request.max_tokens': 100, 'gen_ai.request.messages': '[{"role":"user","content":"What is the capital of France?"}]', 'gen_ai.request.model': 'claude-3-haiku-20240307', @@ -96,11 +96,11 @@ describe('Anthropic integration', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 15, 'gen_ai.usage.total_tokens': 25, - 'sentry.op': 'gen_ai.messages', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.anthropic', }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -125,15 +125,15 @@ describe('Anthropic integration', () => { // Second - error handling with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', + 'gen_ai.operation.name': 'chat', 'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]', 'gen_ai.request.model': 'error-model', 'gen_ai.system': 'anthropic', - 'sentry.op': 'gen_ai.messages', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.anthropic', }), - description: 'messages error-model', - op: 'gen_ai.messages', + description: 'chat error-model', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'internal_error', }), @@ -158,16 +158,16 @@ describe('Anthropic integration', () => { // Third - token counting with PII (response.text is present because sendDefaultPii=true enables recordOutputs) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', + 'gen_ai.operation.name': 'chat', 'gen_ai.request.messages': '[{"role":"user","content":"What is the capital of France?"}]', 'gen_ai.request.model': 'claude-3-haiku-20240307', 'gen_ai.response.text': '15', 'gen_ai.system': 'anthropic', - 'sentry.op': 'gen_ai.messages', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.anthropic', }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -228,7 +228,7 @@ describe('Anthropic integration', () => { // Fifth - messages.create with stream: true expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', + 'gen_ai.operation.name': 'chat', 'gen_ai.request.messages': '[{"role":"user","content":"What is the capital of France?"}]', 'gen_ai.request.model': 'claude-3-haiku-20240307', 'gen_ai.request.stream': true, @@ -240,11 +240,11 @@ describe('Anthropic integration', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 15, 'gen_ai.usage.total_tokens': 25, - 'sentry.op': 'gen_ai.messages', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.anthropic', }), - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -269,12 +269,12 @@ describe('Anthropic integration', () => { // Sixth - messages.stream expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', + 'gen_ai.operation.name': 'chat', 'gen_ai.request.model': 'claude-3-haiku-20240307', 'gen_ai.request.stream': true, }), - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -294,11 +294,11 @@ describe('Anthropic integration', () => { // Check token counting with options expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', + 'gen_ai.operation.name': 'chat', 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true 'gen_ai.response.text': '15', // Present because recordOutputs=true is set in options }), - op: 'gen_ai.messages', + op: 'gen_ai.chat', }), // Check models.retrieve with options expect.objectContaining({ @@ -378,11 +378,11 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ // messages.create with stream: true expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ 'gen_ai.system': 'anthropic', - 'gen_ai.operation.name': 'messages', + 'gen_ai.operation.name': 'chat', 'gen_ai.request.model': 'claude-3-haiku-20240307', 'gen_ai.request.stream': true, 'gen_ai.response.streaming': true, @@ -396,11 +396,11 @@ describe('Anthropic integration', () => { }), // messages.stream expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ 'gen_ai.system': 'anthropic', - 'gen_ai.operation.name': 'messages', + 'gen_ai.operation.name': 'chat', 'gen_ai.request.model': 'claude-3-haiku-20240307', 'gen_ai.response.streaming': true, 'gen_ai.response.model': 'claude-3-haiku-20240307', @@ -412,11 +412,11 @@ describe('Anthropic integration', () => { }), // messages.stream with redundant stream: true param expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ 'gen_ai.system': 'anthropic', - 'gen_ai.operation.name': 'messages', + 'gen_ai.operation.name': 'chat', 'gen_ai.request.model': 'claude-3-haiku-20240307', 'gen_ai.request.stream': true, 'gen_ai.response.streaming': true, @@ -434,8 +434,8 @@ describe('Anthropic integration', () => { transaction: 'main', spans: expect.arrayContaining([ expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ 'gen_ai.response.streaming': true, // streamed text concatenated @@ -443,16 +443,16 @@ describe('Anthropic integration', () => { }), }), expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ 'gen_ai.response.streaming': true, 'gen_ai.response.text': 'Hello from stream!', }), }), expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ 'gen_ai.response.streaming': true, 'gen_ai.response.text': 'Hello from stream!', @@ -486,7 +486,7 @@ describe('Anthropic integration', () => { transaction: { spans: expect.arrayContaining([ expect.objectContaining({ - op: 'gen_ai.messages', + op: 'gen_ai.chat', data: expect.objectContaining({ 'gen_ai.request.available_tools': EXPECTED_TOOLS_JSON, 'gen_ai.response.tool_calls': EXPECTED_TOOL_CALLS_JSON, @@ -514,7 +514,7 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ description: expect.stringContaining('stream-response'), - op: 'gen_ai.messages', + op: 'gen_ai.chat', data: expect.objectContaining({ 'gen_ai.request.available_tools': EXPECTED_TOOLS_JSON, 'gen_ai.response.tool_calls': EXPECTED_TOOL_CALLS_JSON, @@ -534,8 +534,8 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ // Error with messages.create on stream initialization expect.objectContaining({ - description: 'messages error-stream-init stream-response', - op: 'gen_ai.messages', + description: 'chat error-stream-init stream-response', + op: 'gen_ai.chat', status: 'internal_error', // Actual status coming from the instrumentation data: expect.objectContaining({ 'gen_ai.request.model': 'error-stream-init', @@ -544,8 +544,8 @@ describe('Anthropic integration', () => { }), // Error with messages.stream on stream initialization expect.objectContaining({ - description: 'messages error-stream-init stream-response', - op: 'gen_ai.messages', + description: 'chat error-stream-init stream-response', + op: 'gen_ai.chat', status: 'internal_error', // Actual status coming from the instrumentation data: expect.objectContaining({ 'gen_ai.request.model': 'error-stream-init', @@ -554,8 +554,8 @@ describe('Anthropic integration', () => { // Error midway with messages.create on streaming - note: The stream is started successfully // so we get a successful span with the content that was streamed before the error expect.objectContaining({ - description: 'messages error-stream-midway stream-response', - op: 'gen_ai.messages', + description: 'chat error-stream-midway stream-response', + op: 'gen_ai.chat', status: 'ok', data: expect.objectContaining({ 'gen_ai.request.model': 'error-stream-midway', @@ -566,8 +566,8 @@ describe('Anthropic integration', () => { }), // Error midway with messages.stream - same behavior, we get a span with the streamed data expect.objectContaining({ - description: 'messages error-stream-midway stream-response', - op: 'gen_ai.messages', + description: 'chat error-stream-midway stream-response', + op: 'gen_ai.chat', status: 'ok', data: expect.objectContaining({ 'gen_ai.request.model': 'error-stream-midway', @@ -590,8 +590,8 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ // Invalid tool format error expect.objectContaining({ - description: 'messages invalid-format', - op: 'gen_ai.messages', + description: 'chat invalid-format', + op: 'gen_ai.chat', status: 'internal_error', data: expect.objectContaining({ 'gen_ai.request.model': 'invalid-format', @@ -608,8 +608,8 @@ describe('Anthropic integration', () => { }), // Successful tool usage (for comparison) expect.objectContaining({ - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', status: 'ok', data: expect.objectContaining({ 'gen_ai.request.model': 'claude-3-haiku-20240307', @@ -640,16 +640,16 @@ describe('Anthropic integration', () => { // First call: Last message is large and gets truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.anthropic', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-haiku-20240307', // Messages should be present (truncation happened) and should be a JSON array 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -690,8 +690,8 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.anthropic', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-haiku-20240307', @@ -712,8 +712,8 @@ describe('Anthropic integration', () => { }, ]), }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts index 8d8f1d542f70..cd309ff486c9 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts @@ -286,7 +286,7 @@ describe('LangChain integration', () => { // This should have Anthropic instrumentation (origin: 'auto.ai.anthropic') const firstAnthropicSpan = spans.find( span => - span.description === 'messages claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Second call: LangChain call @@ -300,7 +300,7 @@ describe('LangChain integration', () => { // Count how many Anthropic spans we have - should be exactly 1 const anthropicSpans = spans.filter( span => - span.description === 'messages claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Verify the edge case limitation: diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts index b05a70acdeb4..2389715d9307 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts @@ -336,7 +336,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // This should have Anthropic instrumentation (origin: 'auto.ai.anthropic') const firstAnthropicSpan = spans.find( span => - span.description === 'messages claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Second call: LangChain call @@ -350,7 +350,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // Count how many Anthropic spans we have - should be exactly 1 const anthropicSpans = spans.filter( span => - span.description === 'messages claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Verify the edge case limitation: diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts index ac40fbe94249..9010e203924f 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts @@ -117,8 +117,8 @@ describe('OpenAI Tool Calls integration', () => { // Third span - responses API with tools (non-streaming) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -135,16 +135,16 @@ describe('OpenAI Tool Calls integration', () => { 'openai.usage.completion_tokens': 12, 'openai.usage.prompt_tokens': 8, }, - description: 'responses gpt-4', - op: 'gen_ai.responses', + description: 'chat gpt-4', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Fourth span - responses API with tools and streaming expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -163,8 +163,8 @@ describe('OpenAI Tool Calls integration', () => { 'openai.usage.completion_tokens': 12, 'openai.usage.prompt_tokens': 8, }, - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -238,8 +238,8 @@ describe('OpenAI Tool Calls integration', () => { // Third span - responses API with tools (non-streaming) with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -259,16 +259,16 @@ describe('OpenAI Tool Calls integration', () => { 'openai.usage.completion_tokens': 12, 'openai.usage.prompt_tokens': 8, }, - description: 'responses gpt-4', - op: 'gen_ai.responses', + description: 'chat gpt-4', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Fourth span - responses API with tools and streaming with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -290,8 +290,8 @@ describe('OpenAI Tool Calls integration', () => { 'openai.usage.completion_tokens': 12, 'openai.usage.prompt_tokens': 8, }, - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts index bf64d2b92b72..3421f3e7ceb6 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts @@ -38,8 +38,8 @@ describe('OpenAI integration', () => { // Second span - responses API expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-3.5-turbo', @@ -55,8 +55,8 @@ describe('OpenAI integration', () => { 'openai.usage.completion_tokens': 8, 'openai.usage.prompt_tokens': 5, }, - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -105,8 +105,8 @@ describe('OpenAI integration', () => { // Fifth span - responses API streaming expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -124,8 +124,8 @@ describe('OpenAI integration', () => { 'openai.usage.completion_tokens': 10, 'openai.usage.prompt_tokens': 6, }, - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -182,8 +182,8 @@ describe('OpenAI integration', () => { // Second span - responses API with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-3.5-turbo', @@ -201,8 +201,8 @@ describe('OpenAI integration', () => { 'openai.usage.completion_tokens': 8, 'openai.usage.prompt_tokens': 5, }, - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -256,8 +256,8 @@ describe('OpenAI integration', () => { // Fifth span - responses API streaming with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -277,8 +277,8 @@ describe('OpenAI integration', () => { 'openai.usage.completion_tokens': 10, 'openai.usage.prompt_tokens': 6, }), - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -639,16 +639,16 @@ describe('OpenAI integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-3.5-turbo', // Messages should be present and should include truncated string input (contains only As) 'gen_ai.request.messages': expect.stringMatching(/^A+$/), }), - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -668,30 +668,30 @@ describe('OpenAI integration', () => { // First span - conversations.create returns conversation object with id expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'conversations', - 'sentry.op': 'gen_ai.conversations', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', // The conversation ID should be captured from the response 'gen_ai.conversation.id': 'conv_689667905b048191b4740501625afd940c7533ace33a2dab', }), - description: 'conversations unknown', - op: 'gen_ai.conversations', + description: 'chat unknown', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Second span - responses.create with conversation parameter expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', // The conversation ID should be captured from the request 'gen_ai.conversation.id': 'conv_689667905b048191b4740501625afd940c7533ace33a2dab', }), - op: 'gen_ai.responses', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -700,22 +700,22 @@ describe('OpenAI integration', () => { data: expect.not.objectContaining({ 'gen_ai.conversation.id': expect.anything(), }), - op: 'gen_ai.responses', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Fourth span - responses.create with previous_response_id (chaining) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', // The previous_response_id should be captured as conversation.id 'gen_ai.conversation.id': 'resp_mock_conv_123', }), - op: 'gen_ai.responses', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts index 9b4120b143e4..4115738a19c5 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts @@ -38,8 +38,8 @@ describe('OpenAI integration (V6)', () => { // Second span - responses API expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-3.5-turbo', @@ -55,8 +55,8 @@ describe('OpenAI integration (V6)', () => { 'openai.usage.completion_tokens': 8, 'openai.usage.prompt_tokens': 5, }, - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -105,8 +105,8 @@ describe('OpenAI integration (V6)', () => { // Fifth span - responses API streaming expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -124,8 +124,8 @@ describe('OpenAI integration (V6)', () => { 'openai.usage.completion_tokens': 10, 'openai.usage.prompt_tokens': 6, }, - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -182,8 +182,8 @@ describe('OpenAI integration (V6)', () => { // Second span - responses API with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-3.5-turbo', @@ -201,8 +201,8 @@ describe('OpenAI integration (V6)', () => { 'openai.usage.completion_tokens': 8, 'openai.usage.prompt_tokens': 5, }, - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -256,8 +256,8 @@ describe('OpenAI integration (V6)', () => { // Fifth span - responses API streaming with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -277,8 +277,8 @@ describe('OpenAI integration (V6)', () => { 'openai.usage.completion_tokens': 10, 'openai.usage.prompt_tokens': 6, }), - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts index 2e8e8711e9e9..4261248da349 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts @@ -24,7 +24,7 @@ describe('Vercel AI integration - generateObject', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateObject', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -38,7 +38,7 @@ describe('Vercel AI integration - generateObject', () => { data: expect.objectContaining({ 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_object', - 'gen_ai.operation.name': 'ai.generateObject.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateObject.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.model.id': 'mock-model-id', diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index 8112bcadd5f5..d17975dbf55d 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -18,7 +18,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -45,7 +45,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -75,7 +75,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -106,7 +106,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -133,7 +133,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -160,7 +160,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -184,7 +184,7 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', + 'gen_ai.operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -215,7 +215,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -251,7 +251,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -287,7 +287,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -323,7 +323,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -360,7 +360,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -398,7 +398,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -431,7 +431,7 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', + 'gen_ai.operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -473,7 +473,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -498,7 +498,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -521,7 +521,7 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', + 'gen_ai.operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -593,7 +593,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -618,7 +618,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -641,7 +641,7 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', + 'gen_ai.operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -722,7 +722,7 @@ describe('Vercel AI integration', () => { data: expect.objectContaining({ 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', }), }), // The doGenerate span - name stays as 'generateText.doGenerate' since model ID is missing @@ -734,7 +734,7 @@ describe('Vercel AI integration', () => { data: expect.objectContaining({ 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', }), }), ]), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts index 179644bbcd73..adcdd24bd7e9 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts @@ -24,7 +24,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -38,7 +38,7 @@ describe('Vercel AI integration (V5)', () => { data: { 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'gen_ai.request.model': 'mock-model-id', @@ -81,7 +81,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -95,7 +95,7 @@ describe('Vercel AI integration (V5)', () => { data: { 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'gen_ai.request.model': 'mock-model-id', @@ -136,7 +136,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -165,7 +165,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }, @@ -181,7 +181,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', + 'gen_ai.operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', }, @@ -218,7 +218,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -250,7 +250,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }, @@ -278,7 +278,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -292,7 +292,7 @@ describe('Vercel AI integration (V5)', () => { data: { 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'gen_ai.request.model': 'mock-model-id', @@ -338,7 +338,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -373,7 +373,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }, @@ -391,7 +391,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.tool.input': expect.any(String), 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', + 'gen_ai.operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', }, @@ -456,7 +456,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.response.finishReason': 'tool-calls', @@ -484,7 +484,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }, @@ -499,7 +499,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', + 'gen_ai.operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', }, diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts index 98a16618d77d..ecb856b98267 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts @@ -25,7 +25,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -39,7 +39,7 @@ describe('Vercel AI integration (V6)', () => { data: expect.objectContaining({ 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.request.headers.user-agent': expect.any(String), @@ -83,7 +83,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -97,7 +97,7 @@ describe('Vercel AI integration (V6)', () => { data: expect.objectContaining({ 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.request.headers.user-agent': expect.any(String), @@ -139,7 +139,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -169,7 +169,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }), @@ -185,7 +185,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', + 'gen_ai.operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', }), @@ -222,7 +222,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -254,7 +254,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }), @@ -282,7 +282,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -296,7 +296,7 @@ describe('Vercel AI integration (V6)', () => { data: expect.objectContaining({ 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.request.headers.user-agent': expect.any(String), @@ -342,7 +342,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -377,7 +377,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }), @@ -395,7 +395,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.tool.input': expect.any(String), 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', + 'gen_ai.operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', }), @@ -461,7 +461,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.response.finishReason': 'tool-calls', @@ -490,7 +490,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }), @@ -505,7 +505,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', - 'gen_ai.operation.name': 'ai.toolCall', + 'gen_ai.operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', }), diff --git a/packages/core/src/tracing/ai/gen-ai-attributes.ts b/packages/core/src/tracing/ai/gen-ai-attributes.ts index 4fa7274d7281..36e6d76851b8 100644 --- a/packages/core/src/tracing/ai/gen-ai-attributes.ts +++ b/packages/core/src/tracing/ai/gen-ai-attributes.ts @@ -266,13 +266,12 @@ export const OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE = 'openai.usage.prompt_tokens' // ============================================================================= /** - * OpenAI API operations + * OpenAI API operations following OpenTelemetry semantic conventions + * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#llm-request-spans */ export const OPENAI_OPERATIONS = { CHAT: 'chat', - RESPONSES: 'responses', EMBEDDINGS: 'embeddings', - CONVERSATIONS: 'conversations', } as const; // ============================================================================= diff --git a/packages/core/src/tracing/ai/utils.ts b/packages/core/src/tracing/ai/utils.ts index 4a7a14eea554..17e062c976c8 100644 --- a/packages/core/src/tracing/ai/utils.ts +++ b/packages/core/src/tracing/ai/utils.ts @@ -9,14 +9,15 @@ import { } from './gen-ai-attributes'; import { truncateGenAiMessages, truncateGenAiStringInput } from './messageTruncation'; /** - * Maps AI method paths to Sentry operation name + * Maps AI method paths to OpenTelemetry semantic convention operation names + * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#llm-request-spans */ export function getFinalOperationName(methodPath: string): string { if (methodPath.includes('messages')) { - return 'messages'; + return 'chat'; } if (methodPath.includes('completions')) { - return 'completions'; + return 'text_completion'; } if (methodPath.includes('models')) { return 'models'; diff --git a/packages/core/src/tracing/langchain/utils.ts b/packages/core/src/tracing/langchain/utils.ts index 0a07ae8df370..0e36b5542ba5 100644 --- a/packages/core/src/tracing/langchain/utils.ts +++ b/packages/core/src/tracing/langchain/utils.ts @@ -216,18 +216,19 @@ function extractCommonRequestAttributes( /** * Small helper to assemble boilerplate attributes shared by both request extractors. + * Both LLM and ChatModel invocations use 'chat' as the operation name since modern + * LLM interactions are chat-based and this aligns with OpenTelemetry semantic conventions. */ function baseRequestAttributes( system: unknown, modelName: unknown, - operation: 'pipeline' | 'chat', serialized: LangChainSerialized, invocationParams?: Record, langSmithMetadata?: Record, ): Record { return { [GEN_AI_SYSTEM_ATTRIBUTE]: asString(system ?? 'langchain'), - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: operation, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: asString(modelName), [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: LANGCHAIN_ORIGIN, ...extractCommonRequestAttributes(serialized, invocationParams, langSmithMetadata), @@ -237,7 +238,8 @@ function baseRequestAttributes( /** * Extracts attributes for plain LLM invocations (string prompts). * - * - Operation is tagged as `pipeline` to distinguish from chat-style invocations. + * - Operation is tagged as `chat` following OpenTelemetry semantic conventions. + * Modern LLM invocations typically use chat-based models even when called with string prompts. * - When `recordInputs` is true, string prompts are wrapped into `{role:"user"}` * messages to align with the chat schema used elsewhere. */ @@ -251,7 +253,7 @@ export function extractLLMRequestAttributes( const system = langSmithMetadata?.ls_provider; const modelName = invocationParams?.model ?? langSmithMetadata?.ls_model_name ?? 'unknown'; - const attrs = baseRequestAttributes(system, modelName, 'pipeline', llm, invocationParams, langSmithMetadata); + const attrs = baseRequestAttributes(system, modelName, llm, invocationParams, langSmithMetadata); if (recordInputs && Array.isArray(prompts) && prompts.length > 0) { setIfDefined(attrs, GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, prompts.length); @@ -280,7 +282,7 @@ export function extractChatModelRequestAttributes( const system = langSmithMetadata?.ls_provider ?? llm.id?.[2]; const modelName = invocationParams?.model ?? langSmithMetadata?.ls_model_name ?? 'unknown'; - const attrs = baseRequestAttributes(system, modelName, 'chat', llm, invocationParams, langSmithMetadata); + const attrs = baseRequestAttributes(system, modelName, llm, invocationParams, langSmithMetadata); if (recordInputs && Array.isArray(langChainMessages) && langChainMessages.length > 0) { const normalized = normalizeLangChainMessages(langChainMessages.flat()); diff --git a/packages/core/src/tracing/openai/utils.ts b/packages/core/src/tracing/openai/utils.ts index 007dd93a91b1..82494f7ae018 100644 --- a/packages/core/src/tracing/openai/utils.ts +++ b/packages/core/src/tracing/openai/utils.ts @@ -35,20 +35,21 @@ import type { } from './types'; /** - * Maps OpenAI method paths to Sentry operation names + * Maps OpenAI method paths to OpenTelemetry semantic convention operation names + * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#llm-request-spans */ export function getOperationName(methodPath: string): string { if (methodPath.includes('chat.completions')) { return OPENAI_OPERATIONS.CHAT; } if (methodPath.includes('responses')) { - return OPENAI_OPERATIONS.RESPONSES; + return OPENAI_OPERATIONS.CHAT; } if (methodPath.includes('embeddings')) { return OPENAI_OPERATIONS.EMBEDDINGS; } if (methodPath.includes('conversations')) { - return OPENAI_OPERATIONS.CONVERSATIONS; + return OPENAI_OPERATIONS.CHAT; } return methodPath.split('.').pop() || 'unknown'; } diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index 9b95e8aa91ad..d93b5ee8949e 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -48,6 +48,39 @@ function addOriginToSpan(span: Span, origin: SpanOrigin): void { span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN, origin); } +/** + * Maps Vercel AI SDK operation names to OpenTelemetry semantic convention values + * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#llm-request-spans + */ +function mapVercelAiOperationName(operationName: string): string { + // Map to OpenTelemetry well-known values + if ( + operationName === 'ai.generateText' || + operationName === 'ai.streamText' || + operationName === 'ai.generateObject' || + operationName === 'ai.streamObject' || + operationName === 'ai.generateText.doGenerate' || + operationName === 'ai.streamText.doStream' || + operationName === 'ai.generateObject.doGenerate' || + operationName === 'ai.streamObject.doStream' + ) { + return 'generate_content'; + } + if ( + operationName === 'ai.embed' || + operationName === 'ai.embedMany' || + operationName === 'ai.embed.doEmbed' || + operationName === 'ai.embedMany.doEmbed' + ) { + return 'embeddings'; + } + if (operationName === 'ai.toolCall') { + return 'execute_tool'; + } + // Return the original value for unknown operations + return operationName; +} + /** * Post-process spans emitted by the Vercel AI SDK. * This is supposed to be used in `client.on('spanStart', ...) @@ -145,7 +178,11 @@ function processEndedVercelAiSpan(span: SpanJSON): void { } // Rename AI SDK attributes to standardized gen_ai attributes - renameAttributeKey(attributes, OPERATION_NAME_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE); + // Map operation.name to OpenTelemetry semantic convention values + if (attributes[OPERATION_NAME_ATTRIBUTE]) { + const operationName = mapVercelAiOperationName(attributes[OPERATION_NAME_ATTRIBUTE] as string); + attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE] = operationName; + } renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_MESSAGES_ATTRIBUTE); renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text'); renameAttributeKey(attributes, AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, 'gen_ai.response.tool_calls'); @@ -183,6 +220,7 @@ function renameAttributeKey(attributes: Record, oldKey: string, function processToolCallSpan(span: Span, attributes: SpanAttributes): void { addOriginToSpan(span, 'auto.vercelai.otel'); span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.execute_tool'); + span.setAttribute(GEN_AI_OPERATION_NAME_ATTRIBUTE, 'execute_tool'); renameAttributeKey(attributes, AI_TOOL_CALL_NAME_ATTRIBUTE, 'gen_ai.tool.name'); renameAttributeKey(attributes, AI_TOOL_CALL_ID_ATTRIBUTE, 'gen_ai.tool.call.id'); From 79c9f4cc7087ecc00ed0e7f46db6b965e78701fb Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Thu, 22 Jan 2026 10:22:20 +0100 Subject: [PATCH 2/9] update tests --- packages/core/test/lib/utils/openai-utils.test.ts | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/core/test/lib/utils/openai-utils.test.ts b/packages/core/test/lib/utils/openai-utils.test.ts index ff951e8be40b..25cd873ace08 100644 --- a/packages/core/test/lib/utils/openai-utils.test.ts +++ b/packages/core/test/lib/utils/openai-utils.test.ts @@ -18,14 +18,14 @@ describe('openai-utils', () => { expect(getOperationName('some.path.chat.completions.method')).toBe('chat'); }); - it('should return responses for responses methods', () => { - expect(getOperationName('responses.create')).toBe('responses'); - expect(getOperationName('some.path.responses.method')).toBe('responses'); + it('should return chat for responses methods', () => { + expect(getOperationName('responses.create')).toBe('chat'); + expect(getOperationName('some.path.responses.method')).toBe('chat'); }); - it('should return conversations for conversations methods', () => { - expect(getOperationName('conversations.create')).toBe('conversations'); - expect(getOperationName('some.path.conversations.method')).toBe('conversations'); + it('should return chat for conversations methods', () => { + expect(getOperationName('conversations.create')).toBe('chat'); + expect(getOperationName('some.path.conversations.method')).toBe('chat'); }); it('should return the last part of path for unknown methods', () => { @@ -41,7 +41,7 @@ describe('openai-utils', () => { describe('getSpanOperation', () => { it('should prefix operation with gen_ai', () => { expect(getSpanOperation('chat.completions.create')).toBe('gen_ai.chat'); - expect(getSpanOperation('responses.create')).toBe('gen_ai.responses'); + expect(getSpanOperation('responses.create')).toBe('gen_ai.chat'); expect(getSpanOperation('some.custom.operation')).toBe('gen_ai.operation'); }); }); From 6a6acc4b012a3b9a8eaa788c8c846e245de10a26 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Thu, 22 Jan 2026 10:50:08 +0100 Subject: [PATCH 3/9] fix vercel ai op name, and fix tests --- .../suites/tracing/google-genai/test.ts | 112 +++++++++--------- .../suites/tracing/langchain/test.ts | 6 +- .../suites/tracing/langchain/v1/test.ts | 6 +- .../suites/tracing/vercelai/test.ts | 20 ++++ .../suites/tracing/vercelai/v5/test.ts | 17 +++ packages/core/src/tracing/ai/utils.ts | 5 + packages/core/src/tracing/vercel-ai/index.ts | 1 + 7 files changed, 103 insertions(+), 64 deletions(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts index d6ff72cde6d8..11a180fc4d64 100644 --- a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts @@ -46,8 +46,8 @@ describe('Google GenAI integration', () => { // Third span - models.generateContent expect.objectContaining({ data: { - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', + 'gen_ai.operation.name': 'generate_content', + 'sentry.op': 'gen_ai.generate_content', 'sentry.origin': 'auto.ai.google_genai', 'gen_ai.system': 'google_genai', 'gen_ai.request.model': 'gemini-1.5-flash', @@ -58,22 +58,22 @@ describe('Google GenAI integration', () => { 'gen_ai.usage.output_tokens': 12, 'gen_ai.usage.total_tokens': 20, }, - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Fourth span - error handling expect.objectContaining({ data: { - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', + 'gen_ai.operation.name': 'generate_content', + 'sentry.op': 'gen_ai.generate_content', 'sentry.origin': 'auto.ai.google_genai', 'gen_ai.system': 'google_genai', 'gen_ai.request.model': 'error-model', }, - description: 'models error-model', - op: 'gen_ai.models', + description: 'generate_content error-model', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), @@ -123,8 +123,8 @@ describe('Google GenAI integration', () => { // Third span - models.generateContent with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', + 'gen_ai.operation.name': 'generate_content', + 'sentry.op': 'gen_ai.generate_content', 'sentry.origin': 'auto.ai.google_genai', 'gen_ai.system': 'google_genai', 'gen_ai.request.model': 'gemini-1.5-flash', @@ -137,23 +137,23 @@ describe('Google GenAI integration', () => { 'gen_ai.usage.output_tokens': 12, 'gen_ai.usage.total_tokens': 20, }), - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Fourth span - error handling with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', + 'gen_ai.operation.name': 'generate_content', + 'sentry.op': 'gen_ai.generate_content', 'sentry.origin': 'auto.ai.google_genai', 'gen_ai.system': 'google_genai', 'gen_ai.request.model': 'error-model', 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true }), - description: 'models error-model', - op: 'gen_ai.models', + description: 'generate_content error-model', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), @@ -213,8 +213,8 @@ describe('Google GenAI integration', () => { // Non-streaming with tools expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', + 'gen_ai.operation.name': 'generate_content', + 'sentry.op': 'gen_ai.generate_content', 'sentry.origin': 'auto.ai.google_genai', 'gen_ai.system': 'google_genai', 'gen_ai.request.model': 'gemini-2.0-flash-001', @@ -226,16 +226,16 @@ describe('Google GenAI integration', () => { 'gen_ai.usage.output_tokens': 8, 'gen_ai.usage.total_tokens': 23, }), - description: 'models gemini-2.0-flash-001', - op: 'gen_ai.models', + description: 'generate_content gemini-2.0-flash-001', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Streaming with tools expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', + 'gen_ai.operation.name': 'generate_content', + 'sentry.op': 'gen_ai.generate_content', 'sentry.origin': 'auto.ai.google_genai', 'gen_ai.system': 'google_genai', 'gen_ai.request.model': 'gemini-2.0-flash-001', @@ -250,16 +250,16 @@ describe('Google GenAI integration', () => { 'gen_ai.usage.output_tokens': 10, 'gen_ai.usage.total_tokens': 22, }), - description: 'models gemini-2.0-flash-001 stream-response', - op: 'gen_ai.models', + description: 'generate_content gemini-2.0-flash-001 stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Without tools for comparison expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', + 'gen_ai.operation.name': 'generate_content', + 'sentry.op': 'gen_ai.generate_content', 'sentry.origin': 'auto.ai.google_genai', 'gen_ai.system': 'google_genai', 'gen_ai.request.model': 'gemini-2.0-flash-001', @@ -269,8 +269,8 @@ describe('Google GenAI integration', () => { 'gen_ai.usage.output_tokens': 12, 'gen_ai.usage.total_tokens': 20, }), - description: 'models gemini-2.0-flash-001', - op: 'gen_ai.models', + description: 'generate_content gemini-2.0-flash-001', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), @@ -289,8 +289,8 @@ describe('Google GenAI integration', () => { // First span - models.generateContentStream (streaming) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', + 'gen_ai.operation.name': 'generate_content', + 'sentry.op': 'gen_ai.generate_content', 'sentry.origin': 'auto.ai.google_genai', 'gen_ai.system': 'google_genai', 'gen_ai.request.model': 'gemini-1.5-flash', @@ -305,8 +305,8 @@ describe('Google GenAI integration', () => { 'gen_ai.usage.output_tokens': 12, 'gen_ai.usage.total_tokens': 22, }), - description: 'models gemini-1.5-flash stream-response', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), @@ -347,24 +347,24 @@ describe('Google GenAI integration', () => { // Fourth span - blocked content streaming expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', + 'gen_ai.operation.name': 'generate_content', + 'sentry.op': 'gen_ai.generate_content', 'sentry.origin': 'auto.ai.google_genai', }), - description: 'models blocked-model stream-response', - op: 'gen_ai.models', + description: 'generate_content blocked-model stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), // Fifth span - error handling for streaming expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', + 'gen_ai.operation.name': 'generate_content', + 'sentry.op': 'gen_ai.generate_content', 'sentry.origin': 'auto.ai.google_genai', }), - description: 'models error-model stream-response', - op: 'gen_ai.models', + description: 'generate_content error-model stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), @@ -377,8 +377,8 @@ describe('Google GenAI integration', () => { // First span - models.generateContentStream (streaming) with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', + 'gen_ai.operation.name': 'generate_content', + 'sentry.op': 'gen_ai.generate_content', 'sentry.origin': 'auto.ai.google_genai', 'gen_ai.system': 'google_genai', 'gen_ai.request.model': 'gemini-1.5-flash', @@ -394,8 +394,8 @@ describe('Google GenAI integration', () => { 'gen_ai.usage.output_tokens': 12, 'gen_ai.usage.total_tokens': 22, }), - description: 'models gemini-1.5-flash stream-response', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), @@ -441,8 +441,8 @@ describe('Google GenAI integration', () => { // Fourth span - blocked content stream with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', + 'gen_ai.operation.name': 'generate_content', + 'sentry.op': 'gen_ai.generate_content', 'sentry.origin': 'auto.ai.google_genai', 'gen_ai.system': 'google_genai', 'gen_ai.request.model': 'blocked-model', @@ -450,24 +450,24 @@ describe('Google GenAI integration', () => { 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true 'gen_ai.response.streaming': true, }), - description: 'models blocked-model stream-response', - op: 'gen_ai.models', + description: 'generate_content blocked-model stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), // Fifth span - error handling for streaming with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', + 'gen_ai.operation.name': 'generate_content', + 'sentry.op': 'gen_ai.generate_content', 'sentry.origin': 'auto.ai.google_genai', 'gen_ai.system': 'google_genai', 'gen_ai.request.model': 'error-model', 'gen_ai.request.temperature': 0.7, 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true }), - description: 'models error-model stream-response', - op: 'gen_ai.models', + description: 'generate_content error-model stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), @@ -505,8 +505,8 @@ describe('Google GenAI integration', () => { // First call: Last message is large and gets truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', + 'gen_ai.operation.name': 'generate_content', + 'sentry.op': 'gen_ai.generate_content', 'sentry.origin': 'auto.ai.google_genai', 'gen_ai.system': 'google_genai', 'gen_ai.request.model': 'gemini-1.5-flash', @@ -515,8 +515,8 @@ describe('Google GenAI integration', () => { /^\[\{"role":"user","parts":\[\{"text":"C+"\}\]\}\]$/, ), }), - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts index cd309ff486c9..2416ed9c773a 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts @@ -285,8 +285,7 @@ describe('LangChain integration', () => { // First call: Direct Anthropic call made BEFORE LangChain import // This should have Anthropic instrumentation (origin: 'auto.ai.anthropic') const firstAnthropicSpan = spans.find( - span => - span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Second call: LangChain call @@ -299,8 +298,7 @@ describe('LangChain integration', () => { // This should NOT have Anthropic instrumentation (skip works correctly) // Count how many Anthropic spans we have - should be exactly 1 const anthropicSpans = spans.filter( - span => - span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Verify the edge case limitation: diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts index 2389715d9307..6f2654a86260 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts @@ -335,8 +335,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First call: Direct Anthropic call made BEFORE LangChain import // This should have Anthropic instrumentation (origin: 'auto.ai.anthropic') const firstAnthropicSpan = spans.find( - span => - span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Second call: LangChain call @@ -349,8 +348,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // This should NOT have Anthropic instrumentation (skip works correctly) // Count how many Anthropic spans we have - should be exactly 1 const anthropicSpans = spans.filter( - span => - span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Verify the edge case limitation: diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index d17975dbf55d..38426510f764 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -19,6 +19,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -46,6 +47,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -76,6 +78,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -107,6 +110,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -134,6 +138,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -161,6 +166,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -185,6 +191,7 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', 'gen_ai.operation.name': 'execute_tool', + 'operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -216,6 +223,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -252,6 +260,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -288,6 +297,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -324,6 +334,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -361,6 +372,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -399,6 +411,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -432,6 +445,7 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', 'gen_ai.operation.name': 'execute_tool', + 'operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -474,6 +488,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -499,6 +514,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -522,6 +538,7 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', 'gen_ai.operation.name': 'execute_tool', + 'operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -594,6 +611,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -619,6 +637,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -642,6 +661,7 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', 'gen_ai.operation.name': 'execute_tool', + 'operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts index adcdd24bd7e9..398d97e35a72 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts @@ -25,6 +25,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -39,6 +40,7 @@ describe('Vercel AI integration (V5)', () => { 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'gen_ai.request.model': 'mock-model-id', @@ -82,6 +84,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -96,6 +99,7 @@ describe('Vercel AI integration (V5)', () => { 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'gen_ai.request.model': 'mock-model-id', @@ -137,6 +141,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -166,6 +171,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }, @@ -182,6 +188,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', 'gen_ai.operation.name': 'execute_tool', + 'operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', }, @@ -219,6 +226,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -251,6 +259,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }, @@ -279,6 +288,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -293,6 +303,7 @@ describe('Vercel AI integration (V5)', () => { 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'gen_ai.request.model': 'mock-model-id', @@ -339,6 +350,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -374,6 +386,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }, @@ -392,6 +405,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', 'gen_ai.operation.name': 'execute_tool', + 'operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', }, @@ -457,6 +471,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.response.finishReason': 'tool-calls', @@ -485,6 +500,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', + 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }, @@ -500,6 +516,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', 'gen_ai.operation.name': 'execute_tool', + 'operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', }, diff --git a/packages/core/src/tracing/ai/utils.ts b/packages/core/src/tracing/ai/utils.ts index 17e062c976c8..53cd696ba909 100644 --- a/packages/core/src/tracing/ai/utils.ts +++ b/packages/core/src/tracing/ai/utils.ts @@ -19,6 +19,11 @@ export function getFinalOperationName(methodPath: string): string { if (methodPath.includes('completions')) { return 'text_completion'; } + // Google GenAI: models.generateContent* -> generate_content (actually generates AI responses) + if (methodPath.includes('generateContent')) { + return 'generate_content'; + } + // Anthropic: models.get/retrieve -> models (metadata retrieval only) if (methodPath.includes('models')) { return 'models'; } diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index d93b5ee8949e..bc37012c5939 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -182,6 +182,7 @@ function processEndedVercelAiSpan(span: SpanJSON): void { if (attributes[OPERATION_NAME_ATTRIBUTE]) { const operationName = mapVercelAiOperationName(attributes[OPERATION_NAME_ATTRIBUTE] as string); attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE] = operationName; + attributes[OPERATION_NAME_ATTRIBUTE] = operationName; } renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_MESSAGES_ATTRIBUTE); renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text'); From ea1883c05afc96bf2abdf1dc9652d823ad501c26 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Thu, 22 Jan 2026 11:28:35 +0100 Subject: [PATCH 4/9] fix cloudflare tests --- .../suites/tracing/anthropic-ai/test.ts | 8 ++++---- .../suites/tracing/google-genai/test.ts | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts index c9e112b32241..eb3fe90b237a 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts @@ -17,8 +17,8 @@ it('traces a basic message creation request', async ({ signal }) => { expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.anthropic', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-haiku-20240307', @@ -28,8 +28,8 @@ it('traces a basic message creation request', async ({ signal }) => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 15, }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', }), ]), diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts index 3c36e832a17a..b931936de98a 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts @@ -50,8 +50,8 @@ it('traces Google GenAI chat creation and message sending', async () => { // Third span - models.generateContent expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', + 'gen_ai.operation.name': 'generate_content', + 'sentry.op': 'gen_ai.generate_content', 'sentry.origin': 'auto.ai.google_genai', 'gen_ai.system': 'google_genai', 'gen_ai.request.model': 'gemini-1.5-flash', @@ -62,8 +62,8 @@ it('traces Google GenAI chat creation and message sending', async () => { 'gen_ai.usage.output_tokens': 12, 'gen_ai.usage.total_tokens': 20, }), - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', }), ]), From 7f1a8f8b5790d73279bafcd45518a9ab6496c547 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Thu, 22 Jan 2026 17:23:37 +0100 Subject: [PATCH 5/9] fix tests --- .../tracing/ai-providers/anthropic/test.ts | 6 +- .../tracing/ai-providers/langchain/test.ts | 6 +- .../suites/tracing/langchain/test.ts | 8 +- .../suites/tracing/anthropic/test.ts | 8 +- .../suites/tracing/google-genai/test.ts | 8 +- .../suites/tracing/langchain/test.ts | 83 ++++++++--------- .../suites/tracing/langchain/v1/test.ts | 89 ++++++++++--------- .../tracing/vercelai/test-generate-object.ts | 2 +- .../suites/tracing/vercelai/test.ts | 38 ++------ .../suites/tracing/vercelai/v5/test.ts | 39 +++----- .../suites/tracing/vercelai/v6/test.ts | 14 +-- packages/core/src/tracing/langchain/index.ts | 8 +- packages/core/src/tracing/langchain/utils.ts | 15 ++-- packages/core/src/tracing/vercel-ai/index.ts | 19 ++-- 14 files changed, 155 insertions(+), 188 deletions(-) diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/test.ts b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/test.ts index 206e29be16e5..8f14f0318456 100644 --- a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/test.ts +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/test.ts @@ -20,11 +20,11 @@ sentryTest('manual Anthropic instrumentation sends gen_ai transactions', async ( const eventData = envelopeRequestParser(req); // Verify it's a gen_ai transaction - expect(eventData.transaction).toBe('messages claude-3-haiku-20240307'); - expect(eventData.contexts?.trace?.op).toBe('gen_ai.messages'); + expect(eventData.transaction).toBe('chat claude-3-haiku-20240307'); + expect(eventData.contexts?.trace?.op).toBe('gen_ai.chat'); expect(eventData.contexts?.trace?.origin).toBe('auto.ai.anthropic'); expect(eventData.contexts?.trace?.data).toMatchObject({ - 'gen_ai.operation.name': 'messages', + 'gen_ai.operation.name': 'chat', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-haiku-20240307', 'gen_ai.request.temperature': 0.7, diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/langchain/test.ts b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/langchain/test.ts index 9cc1cc9ff98b..6ea947a61baf 100644 --- a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/langchain/test.ts +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/langchain/test.ts @@ -20,11 +20,11 @@ sentryTest('manual LangChain instrumentation sends gen_ai transactions', async ( const eventData = envelopeRequestParser(req); // Verify it's a gen_ai transaction - expect(eventData.transaction).toBe('chat claude-3-haiku-20240307'); - expect(eventData.contexts?.trace?.op).toBe('gen_ai.chat'); + expect(eventData.transaction).toBe('invoke_agent claude-3-haiku-20240307'); + expect(eventData.contexts?.trace?.op).toBe('gen_ai.invoke_agent'); expect(eventData.contexts?.trace?.origin).toBe('auto.ai.langchain'); expect(eventData.contexts?.trace?.data).toMatchObject({ - 'gen_ai.operation.name': 'chat', + 'gen_ai.operation.name': 'invoke_agent', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-haiku-20240307', 'gen_ai.request.temperature': 0.7, diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts index 875b4191b84b..56273ed6e0e8 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts @@ -18,8 +18,8 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal } // Chat model span expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -29,8 +29,8 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal } 'gen_ai.usage.output_tokens': 15, 'gen_ai.usage.total_tokens': 25, }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', }), // Chain span diff --git a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts index 5d0b747728e3..5b4b6612e7c7 100644 --- a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts @@ -656,8 +656,8 @@ describe('Anthropic integration', () => { // Second call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'messages', - 'sentry.op': 'gen_ai.messages', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.anthropic', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-haiku-20240307', @@ -666,8 +666,8 @@ describe('Anthropic integration', () => { { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts index 11a180fc4d64..16f8ce9cd5e1 100644 --- a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts @@ -523,8 +523,8 @@ describe('Google GenAI integration', () => { // Second call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'models', - 'sentry.op': 'gen_ai.models', + 'gen_ai.operation.name': 'generate_content', + 'sentry.op': 'gen_ai.generate_content', 'sentry.origin': 'auto.ai.google_genai', 'gen_ai.system': 'google_genai', 'gen_ai.request.model': 'gemini-1.5-flash', @@ -536,8 +536,8 @@ describe('Google GenAI integration', () => { }, ]), }), - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts index 2416ed9c773a..2db04e986afa 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts @@ -12,8 +12,8 @@ describe('LangChain integration', () => { // First span - chat model with claude-3-5-sonnet expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -26,16 +26,16 @@ describe('LangChain integration', () => { 'gen_ai.response.model': expect.any(String), 'gen_ai.response.stop_reason': expect.any(String), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - chat model with claude-3-opus expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-opus-20240229', @@ -49,22 +49,22 @@ describe('LangChain integration', () => { 'gen_ai.response.model': expect.any(String), 'gen_ai.response.stop_reason': expect.any(String), }), - description: 'chat claude-3-opus-20240229', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-opus-20240229', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Third span - error handling expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'error-model', }), - description: 'chat error-model', - op: 'gen_ai.chat', + description: 'invoke_agent error-model', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'internal_error', }), @@ -77,8 +77,8 @@ describe('LangChain integration', () => { // First span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -93,16 +93,16 @@ describe('LangChain integration', () => { 'gen_ai.usage.output_tokens': 15, 'gen_ai.usage.total_tokens': 25, }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-opus-20240229', @@ -118,23 +118,23 @@ describe('LangChain integration', () => { 'gen_ai.usage.output_tokens': 15, 'gen_ai.usage.total_tokens': 25, }), - description: 'chat claude-3-opus-20240229', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-opus-20240229', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Third span - error handling with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'error-model', 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true }), - description: 'chat error-model', - op: 'gen_ai.chat', + description: 'invoke_agent error-model', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'internal_error', }), @@ -166,8 +166,8 @@ describe('LangChain integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -181,8 +181,8 @@ describe('LangChain integration', () => { 'gen_ai.response.stop_reason': 'tool_use', 'gen_ai.response.tool_calls': expect.any(String), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), @@ -201,40 +201,40 @@ describe('LangChain integration', () => { // First call: String input truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', // Messages should be present and should include truncated string input (contains only Cs) 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Second call: Array input, last message truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', // Messages should be present (truncation happened) and should be a JSON array of a single index (contains only Cs) 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Third call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -243,8 +243,8 @@ describe('LangChain integration', () => { { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), @@ -291,7 +291,8 @@ describe('LangChain integration', () => { // Second call: LangChain call // This should have LangChain instrumentation (origin: 'auto.ai.langchain') const langchainSpan = spans.find( - span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', + span => + span.description === 'invoke_agent claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', ); // Third call: Direct Anthropic call made AFTER LangChain import diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts index 6f2654a86260..eab8421f17f6 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts @@ -15,8 +15,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First span - chat model with claude-3-5-sonnet expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -29,16 +29,16 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.model': expect.any(String), 'gen_ai.response.stop_reason': expect.any(String), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - chat model with claude-3-opus expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-opus-20240229', @@ -52,8 +52,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.model': expect.any(String), 'gen_ai.response.stop_reason': expect.any(String), }), - description: 'chat claude-3-opus-20240229', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-opus-20240229', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), @@ -66,7 +66,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // 'gen_ai.system': 'anthropic', // 'gen_ai.request.model': 'error-model', // }), - // description: 'chat error-model', + // description: 'invoke_agent error-model', // op: 'gen_ai.chat', // origin: 'auto.ai.langchain', // status: 'internal_error', @@ -80,8 +80,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -96,16 +96,16 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.usage.output_tokens': 15, 'gen_ai.usage.total_tokens': 25, }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-opus-20240229', @@ -121,8 +121,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.usage.output_tokens': 15, 'gen_ai.usage.total_tokens': 25, }), - description: 'chat claude-3-opus-20240229', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-opus-20240229', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), @@ -136,7 +136,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // 'gen_ai.request.model': 'error-model', // 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true // }), - // description: 'chat error-model', + // description: 'invoke_agent error-model', // op: 'gen_ai.chat', // origin: 'auto.ai.langchain', // status: 'internal_error', @@ -193,8 +193,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -208,8 +208,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.stop_reason': 'tool_use', 'gen_ai.response.tool_calls': expect.any(String), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), @@ -244,40 +244,40 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First call: String input truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', // Messages should be present and should include truncated string input (contains only Cs) 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Second call: Array input, last message truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', // Messages should be present (truncation happened) and should be a JSON array of a single index (contains only Cs) 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Third call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -286,8 +286,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), - description: 'chat claude-3-5-sonnet-20241022', - op: 'gen_ai.chat', + description: 'invoke_agent claude-3-5-sonnet-20241022', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), @@ -341,7 +341,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // Second call: LangChain call // This should have LangChain instrumentation (origin: 'auto.ai.langchain') const langchainSpan = spans.find( - span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', + span => + span.description === 'invoke_agent claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', ); // Third call: Direct Anthropic call made AFTER LangChain import @@ -386,8 +387,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First span - initChatModel with gpt-4o expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4o', @@ -400,16 +401,16 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.model': 'gpt-4o', 'gen_ai.response.stop_reason': 'stop', }), - description: 'chat gpt-4o', - op: 'gen_ai.chat', + description: 'invoke_agent gpt-4o', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - initChatModel with gpt-3.5-turbo expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'chat', - 'sentry.op': 'gen_ai.chat', + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-3.5-turbo', @@ -421,8 +422,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.model': 'gpt-3.5-turbo', 'gen_ai.response.stop_reason': 'stop', }), - description: 'chat gpt-3.5-turbo', - op: 'gen_ai.chat', + description: 'invoke_agent gpt-3.5-turbo', + op: 'gen_ai.invoke_agent', origin: 'auto.ai.langchain', status: 'ok', }), @@ -435,7 +436,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // 'gen_ai.system': 'openai', // 'gen_ai.request.model': 'error-model', // }), - // description: 'chat error-model', + // description: 'invoke_agent error-model', // op: 'gen_ai.chat', // origin: 'auto.ai.langchain', // status: 'internal_error', diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts index 4261248da349..ac6614af7502 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts @@ -24,7 +24,7 @@ describe('Vercel AI integration - generateObject', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index 38426510f764..1436b1e96016 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -18,8 +18,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -47,7 +46,6 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -77,8 +75,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -110,7 +107,6 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -137,8 +133,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -166,7 +161,6 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -191,7 +185,6 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', 'gen_ai.operation.name': 'execute_tool', - 'operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -222,8 +215,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -260,7 +252,6 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -296,8 +287,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -334,7 +324,6 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -371,8 +360,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -411,7 +399,6 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -445,7 +432,6 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', 'gen_ai.operation.name': 'execute_tool', - 'operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -487,8 +473,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -514,7 +499,6 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -538,7 +522,6 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', 'gen_ai.operation.name': 'execute_tool', - 'operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -610,8 +593,7 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -637,7 +619,6 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -661,7 +642,6 @@ describe('Vercel AI integration', () => { 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', 'gen_ai.operation.name': 'execute_tool', - 'operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -742,7 +722,7 @@ describe('Vercel AI integration', () => { data: expect.objectContaining({ 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', - 'gen_ai.operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', }), }), // The doGenerate span - name stays as 'generateText.doGenerate' since model ID is missing diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts index 398d97e35a72..eedcb43cf7a2 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts @@ -24,8 +24,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -40,7 +39,6 @@ describe('Vercel AI integration (V5)', () => { 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'gen_ai.request.model': 'mock-model-id', @@ -83,8 +81,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -99,7 +96,6 @@ describe('Vercel AI integration (V5)', () => { 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'gen_ai.request.model': 'mock-model-id', @@ -140,8 +136,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -171,7 +166,6 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }, @@ -188,7 +182,6 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', 'gen_ai.operation.name': 'execute_tool', - 'operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', }, @@ -225,8 +218,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -259,7 +251,6 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }, @@ -287,8 +278,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -303,7 +293,6 @@ describe('Vercel AI integration (V5)', () => { 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'gen_ai.request.model': 'mock-model-id', @@ -349,8 +338,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -361,7 +349,7 @@ describe('Vercel AI integration (V5)', () => { }), // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true) expect.objectContaining({ - data: { + data: expect.objectContaining({ 'gen_ai.request.model': 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', @@ -386,10 +374,9 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', - }, + }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', origin: 'auto.vercelai.otel', @@ -397,7 +384,7 @@ describe('Vercel AI integration (V5)', () => { }), // Seventh span - tool call execution span expect.objectContaining({ - data: { + data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', @@ -405,10 +392,9 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', 'gen_ai.operation.name': 'execute_tool', - 'operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', - }, + }), description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', origin: 'auto.vercelai.otel', @@ -470,8 +456,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.response.finishReason': 'tool-calls', @@ -500,7 +485,6 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'gen_ai.operation.name': 'generate_content', - 'operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }, @@ -516,7 +500,6 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', 'gen_ai.operation.name': 'execute_tool', - 'operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', }, diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts index ecb856b98267..c8b18a58b803 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts @@ -25,7 +25,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -83,7 +83,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -139,7 +139,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -222,7 +222,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -282,7 +282,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -342,7 +342,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -461,7 +461,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'generate_content', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.response.finishReason': 'tool-calls', diff --git a/packages/core/src/tracing/langchain/index.ts b/packages/core/src/tracing/langchain/index.ts index 1930be794be5..153e2427e12a 100644 --- a/packages/core/src/tracing/langchain/index.ts +++ b/packages/core/src/tracing/langchain/index.ts @@ -92,10 +92,10 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): startSpanManual( { name: `${operationName} ${modelName}`, - op: 'gen_ai.pipeline', + op: 'gen_ai.invoke_agent', attributes: { ...attributes, - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.pipeline', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', }, }, span => { @@ -130,10 +130,10 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): startSpanManual( { name: `${operationName} ${modelName}`, - op: 'gen_ai.chat', + op: 'gen_ai.invoke_agent', attributes: { ...attributes, - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', }, }, span => { diff --git a/packages/core/src/tracing/langchain/utils.ts b/packages/core/src/tracing/langchain/utils.ts index 0e36b5542ba5..c79c1140e6a9 100644 --- a/packages/core/src/tracing/langchain/utils.ts +++ b/packages/core/src/tracing/langchain/utils.ts @@ -216,19 +216,18 @@ function extractCommonRequestAttributes( /** * Small helper to assemble boilerplate attributes shared by both request extractors. - * Both LLM and ChatModel invocations use 'chat' as the operation name since modern - * LLM interactions are chat-based and this aligns with OpenTelemetry semantic conventions. */ function baseRequestAttributes( system: unknown, modelName: unknown, + operationType: 'invoke_agent' | 'chat', serialized: LangChainSerialized, invocationParams?: Record, langSmithMetadata?: Record, ): Record { return { [GEN_AI_SYSTEM_ATTRIBUTE]: asString(system ?? 'langchain'), - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: operationType, [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: asString(modelName), [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: LANGCHAIN_ORIGIN, ...extractCommonRequestAttributes(serialized, invocationParams, langSmithMetadata), @@ -238,8 +237,8 @@ function baseRequestAttributes( /** * Extracts attributes for plain LLM invocations (string prompts). * - * - Operation is tagged as `chat` following OpenTelemetry semantic conventions. - * Modern LLM invocations typically use chat-based models even when called with string prompts. + * - Operation is tagged as `invoke_agent` following OpenTelemetry semantic conventions. + * LangChain orchestrates LLM invocations as agent operations. * - When `recordInputs` is true, string prompts are wrapped into `{role:"user"}` * messages to align with the chat schema used elsewhere. */ @@ -253,7 +252,7 @@ export function extractLLMRequestAttributes( const system = langSmithMetadata?.ls_provider; const modelName = invocationParams?.model ?? langSmithMetadata?.ls_model_name ?? 'unknown'; - const attrs = baseRequestAttributes(system, modelName, llm, invocationParams, langSmithMetadata); + const attrs = baseRequestAttributes(system, modelName, 'invoke_agent', llm, invocationParams, langSmithMetadata); if (recordInputs && Array.isArray(prompts) && prompts.length > 0) { setIfDefined(attrs, GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, prompts.length); @@ -267,7 +266,7 @@ export function extractLLMRequestAttributes( /** * Extracts attributes for ChatModel invocations (array-of-arrays of messages). * - * - Operation is tagged as `chat`. + * - Operation is tagged as `invoke_agent` since LangChain orchestrates agent invocations. * - We flatten LangChain's `LangChainMessage[][]` and normalize shapes into a * consistent `{ role, content }` array when `recordInputs` is true. * - Provider system value falls back to `serialized.id?.[2]`. @@ -282,7 +281,7 @@ export function extractChatModelRequestAttributes( const system = langSmithMetadata?.ls_provider ?? llm.id?.[2]; const modelName = invocationParams?.model ?? langSmithMetadata?.ls_model_name ?? 'unknown'; - const attrs = baseRequestAttributes(system, modelName, llm, invocationParams, langSmithMetadata); + const attrs = baseRequestAttributes(system, modelName, 'invoke_agent', llm, invocationParams, langSmithMetadata); if (recordInputs && Array.isArray(langChainMessages) && langChainMessages.length > 0) { const normalized = normalizeLangChainMessages(langChainMessages.flat()); diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index bc37012c5939..601ddaecd501 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -53,12 +53,19 @@ function addOriginToSpan(span: Span, origin: SpanOrigin): void { * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#llm-request-spans */ function mapVercelAiOperationName(operationName: string): string { - // Map to OpenTelemetry well-known values + // Top-level pipeline operations map to invoke_agent if ( operationName === 'ai.generateText' || operationName === 'ai.streamText' || operationName === 'ai.generateObject' || operationName === 'ai.streamObject' || + operationName === 'ai.embed' || + operationName === 'ai.embedMany' + ) { + return 'invoke_agent'; + } + // .do* operations are the actual LLM calls + if ( operationName === 'ai.generateText.doGenerate' || operationName === 'ai.streamText.doStream' || operationName === 'ai.generateObject.doGenerate' || @@ -66,12 +73,7 @@ function mapVercelAiOperationName(operationName: string): string { ) { return 'generate_content'; } - if ( - operationName === 'ai.embed' || - operationName === 'ai.embedMany' || - operationName === 'ai.embed.doEmbed' || - operationName === 'ai.embedMany.doEmbed' - ) { + if (operationName === 'ai.embed.doEmbed' || operationName === 'ai.embedMany.doEmbed') { return 'embeddings'; } if (operationName === 'ai.toolCall') { @@ -182,7 +184,8 @@ function processEndedVercelAiSpan(span: SpanJSON): void { if (attributes[OPERATION_NAME_ATTRIBUTE]) { const operationName = mapVercelAiOperationName(attributes[OPERATION_NAME_ATTRIBUTE] as string); attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE] = operationName; - attributes[OPERATION_NAME_ATTRIBUTE] = operationName; + // eslint-disable-next-line @typescript-eslint/no-dynamic-delete + delete attributes[OPERATION_NAME_ATTRIBUTE]; } renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_MESSAGES_ATTRIBUTE); renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text'); From 0fc28ad7fc282ef7f95928006d75bcee25dd9d23 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Fri, 23 Jan 2026 12:58:44 +0100 Subject: [PATCH 6/9] refactor to use chat for llms and chat models --- .../suites/tracing/langchain/test.ts | 83 +++++++++---------- .../suites/tracing/langchain/v1/test.ts | 82 +++++++++--------- packages/core/src/tracing/langchain/index.ts | 8 +- packages/core/src/tracing/langchain/utils.ts | 15 ++-- .../core/src/tracing/vercel-ai/constants.ts | 19 +++++ packages/core/src/tracing/vercel-ai/index.ts | 20 +---- 6 files changed, 117 insertions(+), 110 deletions(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts index 2db04e986afa..2416ed9c773a 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts @@ -12,8 +12,8 @@ describe('LangChain integration', () => { // First span - chat model with claude-3-5-sonnet expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -26,16 +26,16 @@ describe('LangChain integration', () => { 'gen_ai.response.model': expect.any(String), 'gen_ai.response.stop_reason': expect.any(String), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - chat model with claude-3-opus expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-opus-20240229', @@ -49,22 +49,22 @@ describe('LangChain integration', () => { 'gen_ai.response.model': expect.any(String), 'gen_ai.response.stop_reason': expect.any(String), }), - description: 'invoke_agent claude-3-opus-20240229', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-opus-20240229', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Third span - error handling expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'error-model', }), - description: 'invoke_agent error-model', - op: 'gen_ai.invoke_agent', + description: 'chat error-model', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'internal_error', }), @@ -77,8 +77,8 @@ describe('LangChain integration', () => { // First span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -93,16 +93,16 @@ describe('LangChain integration', () => { 'gen_ai.usage.output_tokens': 15, 'gen_ai.usage.total_tokens': 25, }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-opus-20240229', @@ -118,23 +118,23 @@ describe('LangChain integration', () => { 'gen_ai.usage.output_tokens': 15, 'gen_ai.usage.total_tokens': 25, }), - description: 'invoke_agent claude-3-opus-20240229', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-opus-20240229', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Third span - error handling with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'error-model', 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true }), - description: 'invoke_agent error-model', - op: 'gen_ai.invoke_agent', + description: 'chat error-model', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'internal_error', }), @@ -166,8 +166,8 @@ describe('LangChain integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -181,8 +181,8 @@ describe('LangChain integration', () => { 'gen_ai.response.stop_reason': 'tool_use', 'gen_ai.response.tool_calls': expect.any(String), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), @@ -201,40 +201,40 @@ describe('LangChain integration', () => { // First call: String input truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', // Messages should be present and should include truncated string input (contains only Cs) 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Second call: Array input, last message truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', // Messages should be present (truncation happened) and should be a JSON array of a single index (contains only Cs) 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Third call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -243,8 +243,8 @@ describe('LangChain integration', () => { { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), @@ -291,8 +291,7 @@ describe('LangChain integration', () => { // Second call: LangChain call // This should have LangChain instrumentation (origin: 'auto.ai.langchain') const langchainSpan = spans.find( - span => - span.description === 'invoke_agent claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', ); // Third call: Direct Anthropic call made AFTER LangChain import diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts index eab8421f17f6..c0aed4077b19 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts @@ -15,8 +15,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First span - chat model with claude-3-5-sonnet expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -29,16 +29,16 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.model': expect.any(String), 'gen_ai.response.stop_reason': expect.any(String), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - chat model with claude-3-opus expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-opus-20240229', @@ -52,8 +52,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.model': expect.any(String), 'gen_ai.response.stop_reason': expect.any(String), }), - description: 'invoke_agent claude-3-opus-20240229', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-opus-20240229', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), @@ -80,8 +80,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -96,16 +96,16 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.usage.output_tokens': 15, 'gen_ai.usage.total_tokens': 25, }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - chat model with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-opus-20240229', @@ -121,8 +121,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.usage.output_tokens': 15, 'gen_ai.usage.total_tokens': 25, }), - description: 'invoke_agent claude-3-opus-20240229', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-opus-20240229', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), @@ -193,8 +193,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -208,8 +208,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.stop_reason': 'tool_use', 'gen_ai.response.tool_calls': expect.any(String), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), @@ -244,40 +244,40 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First call: String input truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', // Messages should be present and should include truncated string input (contains only Cs) 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Second call: Array input, last message truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', // Messages should be present (truncation happened) and should be a JSON array of a single index (contains only Cs) 'gen_ai.request.messages': expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Third call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -286,8 +286,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), @@ -342,7 +342,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // This should have LangChain instrumentation (origin: 'auto.ai.langchain') const langchainSpan = spans.find( span => - span.description === 'invoke_agent claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', + span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', ); // Third call: Direct Anthropic call made AFTER LangChain import @@ -387,8 +387,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First span - initChatModel with gpt-4o expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4o', @@ -401,16 +401,16 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.model': 'gpt-4o', 'gen_ai.response.stop_reason': 'stop', }), - description: 'invoke_agent gpt-4o', - op: 'gen_ai.invoke_agent', + description: 'chat gpt-4o', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), // Second span - initChatModel with gpt-3.5-turbo expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-3.5-turbo', @@ -422,8 +422,8 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { 'gen_ai.response.model': 'gpt-3.5-turbo', 'gen_ai.response.stop_reason': 'stop', }), - description: 'invoke_agent gpt-3.5-turbo', - op: 'gen_ai.invoke_agent', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', status: 'ok', }), diff --git a/packages/core/src/tracing/langchain/index.ts b/packages/core/src/tracing/langchain/index.ts index 153e2427e12a..8f52f960ca69 100644 --- a/packages/core/src/tracing/langchain/index.ts +++ b/packages/core/src/tracing/langchain/index.ts @@ -92,10 +92,10 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): startSpanManual( { name: `${operationName} ${modelName}`, - op: 'gen_ai.invoke_agent', + op: 'gen_ai.chat', attributes: { ...attributes, - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', }, }, span => { @@ -130,10 +130,10 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): startSpanManual( { name: `${operationName} ${modelName}`, - op: 'gen_ai.invoke_agent', + op: 'gen_ai.chat', attributes: { ...attributes, - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', }, }, span => { diff --git a/packages/core/src/tracing/langchain/utils.ts b/packages/core/src/tracing/langchain/utils.ts index c79c1140e6a9..d119de798950 100644 --- a/packages/core/src/tracing/langchain/utils.ts +++ b/packages/core/src/tracing/langchain/utils.ts @@ -216,18 +216,18 @@ function extractCommonRequestAttributes( /** * Small helper to assemble boilerplate attributes shared by both request extractors. + * Always uses 'chat' as the operation type for all LLM and chat model operations. */ function baseRequestAttributes( system: unknown, modelName: unknown, - operationType: 'invoke_agent' | 'chat', serialized: LangChainSerialized, invocationParams?: Record, langSmithMetadata?: Record, ): Record { return { [GEN_AI_SYSTEM_ATTRIBUTE]: asString(system ?? 'langchain'), - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: operationType, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: asString(modelName), [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: LANGCHAIN_ORIGIN, ...extractCommonRequestAttributes(serialized, invocationParams, langSmithMetadata), @@ -237,8 +237,8 @@ function baseRequestAttributes( /** * Extracts attributes for plain LLM invocations (string prompts). * - * - Operation is tagged as `invoke_agent` following OpenTelemetry semantic conventions. - * LangChain orchestrates LLM invocations as agent operations. + * - Operation is tagged as `chat` following OpenTelemetry semantic conventions. + * LangChain LLM operations are treated as chat operations. * - When `recordInputs` is true, string prompts are wrapped into `{role:"user"}` * messages to align with the chat schema used elsewhere. */ @@ -252,7 +252,7 @@ export function extractLLMRequestAttributes( const system = langSmithMetadata?.ls_provider; const modelName = invocationParams?.model ?? langSmithMetadata?.ls_model_name ?? 'unknown'; - const attrs = baseRequestAttributes(system, modelName, 'invoke_agent', llm, invocationParams, langSmithMetadata); + const attrs = baseRequestAttributes(system, modelName, llm, invocationParams, langSmithMetadata); if (recordInputs && Array.isArray(prompts) && prompts.length > 0) { setIfDefined(attrs, GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, prompts.length); @@ -266,7 +266,8 @@ export function extractLLMRequestAttributes( /** * Extracts attributes for ChatModel invocations (array-of-arrays of messages). * - * - Operation is tagged as `invoke_agent` since LangChain orchestrates agent invocations. + * - Operation is tagged as `chat` following OpenTelemetry semantic conventions. + * LangChain chat model operations are chat operations. * - We flatten LangChain's `LangChainMessage[][]` and normalize shapes into a * consistent `{ role, content }` array when `recordInputs` is true. * - Provider system value falls back to `serialized.id?.[2]`. @@ -281,7 +282,7 @@ export function extractChatModelRequestAttributes( const system = langSmithMetadata?.ls_provider ?? llm.id?.[2]; const modelName = invocationParams?.model ?? langSmithMetadata?.ls_model_name ?? 'unknown'; - const attrs = baseRequestAttributes(system, modelName, 'invoke_agent', llm, invocationParams, langSmithMetadata); + const attrs = baseRequestAttributes(system, modelName, llm, invocationParams, langSmithMetadata); if (recordInputs && Array.isArray(langChainMessages) && langChainMessages.length > 0) { const normalized = normalizeLangChainMessages(langChainMessages.flat()); diff --git a/packages/core/src/tracing/vercel-ai/constants.ts b/packages/core/src/tracing/vercel-ai/constants.ts index fe307b03e7fb..57e8bf2a57c8 100644 --- a/packages/core/src/tracing/vercel-ai/constants.ts +++ b/packages/core/src/tracing/vercel-ai/constants.ts @@ -3,3 +3,22 @@ import type { Span } from '../../types-hoist/span'; // Global Map to track tool call IDs to their corresponding spans // This allows us to capture tool errors and link them to the correct span export const toolCallSpanMap = new Map(); + +// Operation sets for efficient mapping to OpenTelemetry semantic convention values +export const INVOKE_AGENT_OPS = new Set([ + 'ai.generateText', + 'ai.streamText', + 'ai.generateObject', + 'ai.streamObject', + 'ai.embed', + 'ai.embedMany', +]); + +export const GENERATE_CONTENT_OPS = new Set([ + 'ai.generateText.doGenerate', + 'ai.streamText.doStream', + 'ai.generateObject.doGenerate', + 'ai.streamObject.doStream', +]); + +export const EMBEDDINGS_OPS = new Set(['ai.embed.doEmbed', 'ai.embedMany.doEmbed']); diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index 601ddaecd501..c7d5cdf82c23 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -13,7 +13,7 @@ import { GEN_AI_USAGE_INPUT_TOKENS_CACHED_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, } from '../ai/gen-ai-attributes'; -import { toolCallSpanMap } from './constants'; +import { EMBEDDINGS_OPS, GENERATE_CONTENT_OPS, INVOKE_AGENT_OPS, toolCallSpanMap } from './constants'; import type { TokenSummary } from './types'; import { accumulateTokensForParent, @@ -54,26 +54,14 @@ function addOriginToSpan(span: Span, origin: SpanOrigin): void { */ function mapVercelAiOperationName(operationName: string): string { // Top-level pipeline operations map to invoke_agent - if ( - operationName === 'ai.generateText' || - operationName === 'ai.streamText' || - operationName === 'ai.generateObject' || - operationName === 'ai.streamObject' || - operationName === 'ai.embed' || - operationName === 'ai.embedMany' - ) { + if (INVOKE_AGENT_OPS.has(operationName)) { return 'invoke_agent'; } // .do* operations are the actual LLM calls - if ( - operationName === 'ai.generateText.doGenerate' || - operationName === 'ai.streamText.doStream' || - operationName === 'ai.generateObject.doGenerate' || - operationName === 'ai.streamObject.doStream' - ) { + if (GENERATE_CONTENT_OPS.has(operationName)) { return 'generate_content'; } - if (operationName === 'ai.embed.doEmbed' || operationName === 'ai.embedMany.doEmbed') { + if (EMBEDDINGS_OPS.has(operationName)) { return 'embeddings'; } if (operationName === 'ai.toolCall') { From 0c4e97130ad414ad90acbf41d02dc720d274d7ad Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Fri, 23 Jan 2026 12:59:08 +0100 Subject: [PATCH 7/9] update with linting --- .../node-integration-tests/suites/tracing/langchain/v1/test.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts index c0aed4077b19..95e4529b3fdc 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts @@ -341,8 +341,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // Second call: LangChain call // This should have LangChain instrumentation (origin: 'auto.ai.langchain') const langchainSpan = spans.find( - span => - span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain', ); // Third call: Direct Anthropic call made AFTER LangChain import From 02d816cd3032c0634adc08339381ad5310823432 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Fri, 23 Jan 2026 13:45:20 +0100 Subject: [PATCH 8/9] fix cloudflare and browser tests --- .../suites/tracing/ai-providers/langchain/test.ts | 6 +++--- .../suites/tracing/langchain/test.ts | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/langchain/test.ts b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/langchain/test.ts index 6ea947a61baf..9cc1cc9ff98b 100644 --- a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/langchain/test.ts +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/langchain/test.ts @@ -20,11 +20,11 @@ sentryTest('manual LangChain instrumentation sends gen_ai transactions', async ( const eventData = envelopeRequestParser(req); // Verify it's a gen_ai transaction - expect(eventData.transaction).toBe('invoke_agent claude-3-haiku-20240307'); - expect(eventData.contexts?.trace?.op).toBe('gen_ai.invoke_agent'); + expect(eventData.transaction).toBe('chat claude-3-haiku-20240307'); + expect(eventData.contexts?.trace?.op).toBe('gen_ai.chat'); expect(eventData.contexts?.trace?.origin).toBe('auto.ai.langchain'); expect(eventData.contexts?.trace?.data).toMatchObject({ - 'gen_ai.operation.name': 'invoke_agent', + 'gen_ai.operation.name': 'chat', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-haiku-20240307', 'gen_ai.request.temperature': 0.7, diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts index 56273ed6e0e8..875b4191b84b 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts @@ -18,8 +18,8 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal } // Chat model span expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'invoke_agent', - 'sentry.op': 'gen_ai.invoke_agent', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.langchain', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-5-sonnet-20241022', @@ -29,8 +29,8 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal } 'gen_ai.usage.output_tokens': 15, 'gen_ai.usage.total_tokens': 25, }), - description: 'invoke_agent claude-3-5-sonnet-20241022', - op: 'gen_ai.invoke_agent', + description: 'chat claude-3-5-sonnet-20241022', + op: 'gen_ai.chat', origin: 'auto.ai.langchain', }), // Chain span From b917bd498b196296dd370e82a2c31c7a16d79559 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Fri, 23 Jan 2026 16:48:14 +0100 Subject: [PATCH 9/9] fix tests --- .../node-integration-tests/suites/tracing/vercelai/test.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index c38049ffec58..ca4a8521b27b 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -347,7 +347,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -522,7 +522,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_text', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider',