From a0fabf098fd382f76b397f9773a908c763e8f172 Mon Sep 17 00:00:00 2001 From: Nicolas Hrubec Date: Wed, 28 Jan 2026 16:59:21 +0100 Subject: [PATCH 1/7] fix(core): Fix truncation to only keep last message in vercel --- .../vercelai/scenario-message-truncation.mjs | 51 +++++++++++++++++++ .../suites/tracing/vercelai/test.ts | 39 ++++++++++++++ 2 files changed, 90 insertions(+) create mode 100644 dev-packages/node-integration-tests/suites/tracing/vercelai/scenario-message-truncation.mjs diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario-message-truncation.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario-message-truncation.mjs new file mode 100644 index 000000000000..6c3f7327e64b --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario-message-truncation.mjs @@ -0,0 +1,51 @@ +import * as Sentry from '@sentry/node'; +import { generateText } from 'ai'; +import { MockLanguageModelV1 } from 'ai/test'; + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + const largeContent1 = 'A'.repeat(15000); // ~15KB + const largeContent2 = 'B'.repeat(15000); // ~15KB + const largeContent3 = 'C'.repeat(25000) + 'D'.repeat(25000); // ~50KB (will be truncated) + + // Test 1: Messages array with large last message that gets truncated + // Only the last message should be kept, and it should be truncated to only Cs + await generateText({ + experimental_telemetry: { isEnabled: true }, + model: new MockLanguageModelV1({ + doGenerate: async () => ({ + rawCall: { rawPrompt: null, rawSettings: {} }, + finishReason: 'stop', + usage: { promptTokens: 10, completionTokens: 5 }, + text: 'Response to truncated messages', + }), + }), + messages: [ + { role: 'user', content: largeContent1 }, + { role: 'assistant', content: largeContent2 }, + { role: 'user', content: largeContent3 }, + ], + }); + + // Test 2: Messages array where last message is small and kept intact + const smallContent = 'This is a small message that fits within the limit'; + await generateText({ + experimental_telemetry: { isEnabled: true }, + model: new MockLanguageModelV1({ + doGenerate: async () => ({ + rawCall: { rawPrompt: null, rawSettings: {} }, + finishReason: 'stop', + usage: { promptTokens: 10, completionTokens: 5 }, + text: 'Response to small message', + }), + }), + messages: [ + { role: 'user', content: largeContent1 }, + { role: 'assistant', content: largeContent2 }, + { role: 'user', content: smallContent }, + ], + }); + }); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index a98e7b97e919..a79ca3c26a0e 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -796,4 +796,43 @@ describe('Vercel AI integration', () => { }); }, ); + + createEsmAndCjsTests( + __dirname, + 'scenario-message-truncation.mjs', + 'instrument-with-pii.mjs', + (createRunner, test) => { + test('truncates messages when they exceed byte limit', async () => { + await createRunner() + .ignore('event') + .expect({ + transaction: { + transaction: 'main', + spans: expect.arrayContaining([ + // First call: Last message truncated (only C's remain, D's are cropped) + expect.objectContaining({ + data: expect.objectContaining({ + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[.*"text":"C+".*\]$/), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to truncated messages', + }), + }), + // Second call: Last message is small and kept intact + expect.objectContaining({ + data: expect.objectContaining({ + [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringContaining( + 'This is a small message that fits within the limit', + ), + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to small message', + }), + }), + ]), + }, + }) + .start() + .completed(); + }); + }, + ); }); From a14a7413630044c40f85025cbc74b0d0607f3c36 Mon Sep 17 00:00:00 2001 From: Nicolas Hrubec Date: Thu, 29 Jan 2026 11:47:07 +0100 Subject: [PATCH 2/7] no longer set gen_ai.prompt in vercel --- .../node-integration-tests/suites/tracing/vercelai/test.ts | 5 ----- .../suites/tracing/vercelai/v5/test.ts | 5 ----- .../suites/tracing/vercelai/v6/test.ts | 5 ----- packages/core/src/tracing/vercel-ai/utils.ts | 4 ---- 4 files changed, 19 deletions(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index a79ca3c26a0e..6954779e97c3 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -5,7 +5,6 @@ import { GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, - GEN_AI_PROMPT_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, @@ -90,7 +89,6 @@ describe('Vercel AI integration', () => { // Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: { - [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}', [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -230,7 +228,6 @@ describe('Vercel AI integration', () => { // First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true expect.objectContaining({ data: { - [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the first span?"}', [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -303,7 +300,6 @@ describe('Vercel AI integration', () => { // Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: { - [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}', [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -375,7 +371,6 @@ describe('Vercel AI integration', () => { // Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: { - [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"What is the weather in San Francisco?"}', [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts index 332f84777264..42c6f160f05d 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts @@ -5,7 +5,6 @@ import { GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, - GEN_AI_PROMPT_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, @@ -97,7 +96,6 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}', [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -236,7 +234,6 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the first span?"}', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, @@ -297,7 +294,6 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, @@ -357,7 +353,6 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"What is the weather in San Francisco?"}', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts index f779eebdf0e3..42dd0ac0fefe 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts @@ -4,7 +4,6 @@ import { afterAll, describe, expect } from 'vitest'; import { GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, - GEN_AI_PROMPT_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, @@ -99,7 +98,6 @@ describe('Vercel AI integration (V6)', () => { [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}', [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, @@ -239,7 +237,6 @@ describe('Vercel AI integration (V6)', () => { [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the first span?"}', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, @@ -300,7 +297,6 @@ describe('Vercel AI integration (V6)', () => { [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"Where is the second span?"}', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, @@ -360,7 +356,6 @@ describe('Vercel AI integration (V6)', () => { [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, - [GEN_AI_PROMPT_ATTRIBUTE]: '{"prompt":"What is the weather in San Francisco?"}', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, diff --git a/packages/core/src/tracing/vercel-ai/utils.ts b/packages/core/src/tracing/vercel-ai/utils.ts index 2a0878f1e591..a822dbf57bde 100644 --- a/packages/core/src/tracing/vercel-ai/utils.ts +++ b/packages/core/src/tracing/vercel-ai/utils.ts @@ -133,10 +133,6 @@ export function convertPromptToMessages(prompt: string): { role: string; content * invoke_agent op */ export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes): void { - if (attributes[AI_PROMPT_ATTRIBUTE]) { - const truncatedPrompt = getTruncatedJsonString(attributes[AI_PROMPT_ATTRIBUTE] as string | string[]); - span.setAttribute('gen_ai.prompt', truncatedPrompt); - } const prompt = attributes[AI_PROMPT_ATTRIBUTE]; if ( typeof prompt === 'string' && From 0a8a9766db502a22a75281bee35d5f9a2af55f2d Mon Sep 17 00:00:00 2001 From: Nicolas Hrubec Date: Thu, 29 Jan 2026 12:00:51 +0100 Subject: [PATCH 3/7] clean --- packages/core/src/tracing/vercel-ai/utils.ts | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/packages/core/src/tracing/vercel-ai/utils.ts b/packages/core/src/tracing/vercel-ai/utils.ts index a822dbf57bde..9244e42dd8c1 100644 --- a/packages/core/src/tracing/vercel-ai/utils.ts +++ b/packages/core/src/tracing/vercel-ai/utils.ts @@ -133,12 +133,13 @@ export function convertPromptToMessages(prompt: string): { role: string; content * invoke_agent op */ export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes): void { - const prompt = attributes[AI_PROMPT_ATTRIBUTE]; if ( - typeof prompt === 'string' && + typeof attributes[AI_PROMPT_ATTRIBUTE] === 'string' && !attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE] && !attributes[AI_PROMPT_MESSAGES_ATTRIBUTE] ) { + // No messages array is present, so we need to convert the prompt to the proper messages format + const prompt = attributes[AI_PROMPT_ATTRIBUTE]; const messages = convertPromptToMessages(prompt); if (messages.length) { const { systemInstructions, filteredMessages } = extractSystemInstructions(messages); @@ -154,6 +155,7 @@ export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes }); } } else if (typeof attributes[AI_PROMPT_MESSAGES_ATTRIBUTE] === 'string') { + // In this case we already get a properly formatted messages array, this is the preferred way to get the messages try { const messages = JSON.parse(attributes[AI_PROMPT_MESSAGES_ATTRIBUTE]); if (Array.isArray(messages)) { From c1d8eea9dd2502f06cc7a3936c46e10eb3e5d8a6 Mon Sep 17 00:00:00 2001 From: Nicolas Hrubec Date: Thu, 29 Jan 2026 13:44:27 +0100 Subject: [PATCH 4/7] handle messages array input --- packages/core/src/tracing/vercel-ai/utils.ts | 10 ++++ .../test/lib/utils/vercelai-utils.test.ts | 48 +++++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/packages/core/src/tracing/vercel-ai/utils.ts b/packages/core/src/tracing/vercel-ai/utils.ts index 9244e42dd8c1..f2f7008b8b3a 100644 --- a/packages/core/src/tracing/vercel-ai/utils.ts +++ b/packages/core/src/tracing/vercel-ai/utils.ts @@ -111,6 +111,16 @@ export function convertPromptToMessages(prompt: string): { role: string; content try { const p = JSON.parse(prompt); if (!!p && typeof p === 'object') { + // Handle messages array format: { messages: [...] } + const { messages } = p as { messages?: unknown }; + if (Array.isArray(messages)) { + return messages.filter( + (m: unknown): m is { role: string; content: string } => + !!m && typeof m === 'object' && 'role' in m && 'content' in m, + ); + } + + // Handle prompt/system string format: { prompt: "...", system: "..." } const { prompt, system } = p; if (typeof prompt === 'string' || typeof system === 'string') { const messages: { role: string; content: string }[] = []; diff --git a/packages/core/test/lib/utils/vercelai-utils.test.ts b/packages/core/test/lib/utils/vercelai-utils.test.ts index be329e6f5970..190902ed78e7 100644 --- a/packages/core/test/lib/utils/vercelai-utils.test.ts +++ b/packages/core/test/lib/utils/vercelai-utils.test.ts @@ -37,6 +37,54 @@ describe('vercel-ai-utils', () => { ).toStrictEqual([{ role: 'user', content: 'Hello, robot' }]); }); + it('should convert a messages array with multiple messages', () => { + expect( + convertPromptToMessages( + JSON.stringify({ + messages: [ + { role: 'user', content: 'What is the weather?' }, + { role: 'assistant', content: "I'll check." }, + { role: 'user', content: 'Also New York?' }, + ], + }), + ), + ).toStrictEqual([ + { role: 'user', content: 'What is the weather?' }, + { role: 'assistant', content: "I'll check." }, + { role: 'user', content: 'Also New York?' }, + ]); + }); + + it('should convert a messages array with a single message', () => { + expect( + convertPromptToMessages( + JSON.stringify({ + messages: [{ role: 'user', content: 'Hello' }], + }), + ), + ).toStrictEqual([{ role: 'user', content: 'Hello' }]); + }); + + it('should filter out invalid entries in messages array', () => { + expect( + convertPromptToMessages( + JSON.stringify({ + messages: [ + { role: 'user', content: 'Hello' }, + 'not an object', + null, + { role: 'user' }, + { content: 'missing role' }, + { role: 'assistant', content: 'Valid' }, + ], + }), + ), + ).toStrictEqual([ + { role: 'user', content: 'Hello' }, + { role: 'assistant', content: 'Valid' }, + ]); + }); + it('should ignore unexpected data', () => { expect( convertPromptToMessages( From 85f963b861e61c460cc15ce3704815a213e2fbea Mon Sep 17 00:00:00 2001 From: Nicolas Hrubec Date: Thu, 29 Jan 2026 14:03:36 +0100 Subject: [PATCH 5/7] truncate ai.prompt as well --- packages/core/src/tracing/vercel-ai/utils.ts | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/packages/core/src/tracing/vercel-ai/utils.ts b/packages/core/src/tracing/vercel-ai/utils.ts index f2f7008b8b3a..e797e2f8762f 100644 --- a/packages/core/src/tracing/vercel-ai/utils.ts +++ b/packages/core/src/tracing/vercel-ai/utils.ts @@ -159,8 +159,11 @@ export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes } const filteredLength = Array.isArray(filteredMessages) ? filteredMessages.length : 0; + const truncatedMessages = getTruncatedJsonString(filteredMessages); + span.setAttributes({ - [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: getTruncatedJsonString(filteredMessages), + [AI_PROMPT_ATTRIBUTE]: truncatedMessages, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: truncatedMessages, [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: filteredLength, }); } @@ -176,9 +179,11 @@ export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes } const filteredLength = Array.isArray(filteredMessages) ? filteredMessages.length : 0; + const truncatedMessages = getTruncatedJsonString(filteredMessages); + span.setAttributes({ - [AI_PROMPT_MESSAGES_ATTRIBUTE]: undefined, - [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: getTruncatedJsonString(filteredMessages), + [AI_PROMPT_MESSAGES_ATTRIBUTE]: truncatedMessages, + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: truncatedMessages, [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: filteredLength, }); } From d746121224cf92ec932f32551fd460b28237a6f1 Mon Sep 17 00:00:00 2001 From: Nicolas Hrubec Date: Thu, 29 Jan 2026 14:12:13 +0100 Subject: [PATCH 6/7] fix tests --- .../suites/tracing/vercelai/test.ts | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index 6954779e97c3..0f1efb26d1f0 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -103,7 +103,7 @@ describe('Vercel AI integration', () => { 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', + 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.settings.maxSteps': 1, @@ -242,7 +242,7 @@ describe('Vercel AI integration', () => { 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"Where is the first span?"}', + 'vercel.ai.prompt': '[{"role":"user","content":"Where is the first span?"}]', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.settings.maxSteps': 1, @@ -314,7 +314,7 @@ describe('Vercel AI integration', () => { 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', + 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.settings.maxSteps': 1, @@ -386,7 +386,7 @@ describe('Vercel AI integration', () => { 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', + 'vercel.ai.prompt': '[{"role":"user","content":"What is the weather in San Francisco?"}]', 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.settings.maxSteps': 1, @@ -808,7 +808,7 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: expect.objectContaining({ [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3, - [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[.*"text":"C+".*\]$/), + [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[.*"(?:text|content)":"C+".*\]$/), [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to truncated messages', }), }), From ed69ebda5c49e83df97f804763e63f7e4d660634 Mon Sep 17 00:00:00 2001 From: Nicolas Hrubec Date: Thu, 29 Jan 2026 16:06:12 +0100 Subject: [PATCH 7/7] let's try again --- .../suites/tracing/vercelai/v5/test.ts | 8 ++++---- .../suites/tracing/vercelai/v6/test.ts | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts index 42c6f160f05d..eb42156920e9 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts @@ -91,7 +91,7 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', + 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.response.finishReason': 'stop', [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, @@ -227,7 +227,7 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"Where is the first span?"}', + 'vercel.ai.prompt': '[{"role":"user","content":"Where is the first span?"}]', [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', 'vercel.ai.response.finishReason': 'stop', @@ -287,7 +287,7 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', + 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]', [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.response.finishReason': 'stop', @@ -346,7 +346,7 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', + 'vercel.ai.prompt': '[{"role":"user","content":"What is the weather in San Francisco?"}]', [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', 'vercel.ai.response.finishReason': 'tool-calls', diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts index 42dd0ac0fefe..2a75cfdfbfca 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts @@ -92,7 +92,7 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', + 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.request.headers.user-agent': expect.any(String), 'vercel.ai.response.finishReason': 'stop', [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), @@ -230,7 +230,7 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"Where is the first span?"}', + 'vercel.ai.prompt': '[{"role":"user","content":"Where is the first span?"}]', 'vercel.ai.request.headers.user-agent': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', 'vercel.ai.response.finishReason': 'stop', @@ -290,7 +290,7 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', + 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.request.headers.user-agent': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.response.finishReason': 'stop', @@ -349,7 +349,7 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText', 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', + 'vercel.ai.prompt': '[{"role":"user","content":"What is the weather in San Francisco?"}]', 'vercel.ai.request.headers.user-agent': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', 'vercel.ai.response.finishReason': 'tool-calls',