diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/test.ts b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/test.ts index 206e29be16e5..8f14f0318456 100644 --- a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/test.ts +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/test.ts @@ -20,11 +20,11 @@ sentryTest('manual Anthropic instrumentation sends gen_ai transactions', async ( const eventData = envelopeRequestParser(req); // Verify it's a gen_ai transaction - expect(eventData.transaction).toBe('messages claude-3-haiku-20240307'); - expect(eventData.contexts?.trace?.op).toBe('gen_ai.messages'); + expect(eventData.transaction).toBe('chat claude-3-haiku-20240307'); + expect(eventData.contexts?.trace?.op).toBe('gen_ai.chat'); expect(eventData.contexts?.trace?.origin).toBe('auto.ai.anthropic'); expect(eventData.contexts?.trace?.data).toMatchObject({ - 'gen_ai.operation.name': 'messages', + 'gen_ai.operation.name': 'chat', 'gen_ai.system': 'anthropic', 'gen_ai.request.model': 'claude-3-haiku-20240307', 'gen_ai.request.temperature': 0.7, diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts index d8087ebf79fe..17cea5dbf95b 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts @@ -28,8 +28,8 @@ it('traces a basic message creation request', async ({ signal }) => { expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', @@ -39,8 +39,8 @@ it('traces a basic message creation request', async ({ signal }) => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', }), ]), diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts index 4579baf7d5de..d2657f55b1ed 100644 --- a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts @@ -62,8 +62,8 @@ it('traces Google GenAI chat creation and message sending', async () => { // Third span - models.generateContent expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', @@ -74,8 +74,8 @@ it('traces Google GenAI chat creation and message sending', async () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', }), ]), diff --git a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts index 1a20a3c3dc7b..7f8fd5c0bc42 100644 --- a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts @@ -33,8 +33,8 @@ describe('Anthropic integration', () => { // First span - basic message completion without PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', @@ -46,36 +46,36 @@ describe('Anthropic integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), // Second span - error handling expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }), - description: 'messages error-model', - op: 'gen_ai.messages', + description: 'chat error-model', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'internal_error', }), // Third span - token counting (no response.text because recordOutputs=false by default) expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -105,7 +105,7 @@ describe('Anthropic integration', () => { // First span - basic message completion with PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100, [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', @@ -117,11 +117,11 @@ describe('Anthropic integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -146,15 +146,15 @@ describe('Anthropic integration', () => { // Second - error handling with PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', }), - description: 'messages error-model', - op: 'gen_ai.messages', + description: 'chat error-model', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'internal_error', }), @@ -179,16 +179,16 @@ describe('Anthropic integration', () => { // Third - token counting with PII (response.text is present because sendDefaultPii=true enables recordOutputs) expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '15', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -249,7 +249,7 @@ describe('Anthropic integration', () => { // Fifth - messages.create with stream: true expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, @@ -261,11 +261,11 @@ describe('Anthropic integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', }), - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -290,12 +290,12 @@ describe('Anthropic integration', () => { // Sixth - messages.stream expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, }), - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -315,11 +315,11 @@ describe('Anthropic integration', () => { // Check token counting with options expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '15', // Present because recordOutputs=true is set in options }), - op: 'gen_ai.messages', + op: 'gen_ai.chat', }), // Check models.retrieve with options expect.objectContaining({ @@ -399,11 +399,11 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ // messages.create with stream: true expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, @@ -417,11 +417,11 @@ describe('Anthropic integration', () => { }), // messages.stream expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', @@ -433,11 +433,11 @@ describe('Anthropic integration', () => { }), // messages.stream with redundant stream: true param expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, @@ -455,8 +455,8 @@ describe('Anthropic integration', () => { transaction: 'main', spans: expect.arrayContaining([ expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, // streamed text concatenated @@ -464,16 +464,16 @@ describe('Anthropic integration', () => { }), }), expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from stream!', }), }), expect.objectContaining({ - description: 'messages claude-3-haiku-20240307 stream-response', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307 stream-response', + op: 'gen_ai.chat', data: expect.objectContaining({ [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from stream!', @@ -507,7 +507,7 @@ describe('Anthropic integration', () => { transaction: { spans: expect.arrayContaining([ expect.objectContaining({ - op: 'gen_ai.messages', + op: 'gen_ai.chat', data: expect.objectContaining({ [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_TOOLS_JSON, [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: EXPECTED_TOOL_CALLS_JSON, @@ -535,7 +535,7 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ description: expect.stringContaining('stream-response'), - op: 'gen_ai.messages', + op: 'gen_ai.chat', data: expect.objectContaining({ [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_TOOLS_JSON, [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: EXPECTED_TOOL_CALLS_JSON, @@ -555,8 +555,8 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ // Error with messages.create on stream initialization expect.objectContaining({ - description: 'messages error-stream-init stream-response', - op: 'gen_ai.messages', + description: 'chat error-stream-init stream-response', + op: 'gen_ai.chat', status: 'internal_error', // Actual status coming from the instrumentation data: expect.objectContaining({ [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-init', @@ -565,8 +565,8 @@ describe('Anthropic integration', () => { }), // Error with messages.stream on stream initialization expect.objectContaining({ - description: 'messages error-stream-init stream-response', - op: 'gen_ai.messages', + description: 'chat error-stream-init stream-response', + op: 'gen_ai.chat', status: 'internal_error', // Actual status coming from the instrumentation data: expect.objectContaining({ [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-init', @@ -575,8 +575,8 @@ describe('Anthropic integration', () => { // Error midway with messages.create on streaming - note: The stream is started successfully // so we get a successful span with the content that was streamed before the error expect.objectContaining({ - description: 'messages error-stream-midway stream-response', - op: 'gen_ai.messages', + description: 'chat error-stream-midway stream-response', + op: 'gen_ai.chat', status: 'ok', data: expect.objectContaining({ [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-midway', @@ -587,8 +587,8 @@ describe('Anthropic integration', () => { }), // Error midway with messages.stream - same behavior, we get a span with the streamed data expect.objectContaining({ - description: 'messages error-stream-midway stream-response', - op: 'gen_ai.messages', + description: 'chat error-stream-midway stream-response', + op: 'gen_ai.chat', status: 'ok', data: expect.objectContaining({ [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-midway', @@ -611,8 +611,8 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ // Invalid tool format error expect.objectContaining({ - description: 'messages invalid-format', - op: 'gen_ai.messages', + description: 'chat invalid-format', + op: 'gen_ai.chat', status: 'internal_error', data: expect.objectContaining({ [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'invalid-format', @@ -629,8 +629,8 @@ describe('Anthropic integration', () => { }), // Successful tool usage (for comparison) expect.objectContaining({ - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', status: 'ok', data: expect.objectContaining({ [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', @@ -661,8 +661,8 @@ describe('Anthropic integration', () => { // First call: Last message is large and gets truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', @@ -671,16 +671,16 @@ describe('Anthropic integration', () => { /^\[\{"role":"user","content":"C+"\}\]$/, ), }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), // Second call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', @@ -689,8 +689,8 @@ describe('Anthropic integration', () => { { role: 'user', content: 'This is a small message that fits within the limit' }, ]), }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), @@ -713,8 +713,8 @@ describe('Anthropic integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', @@ -735,8 +735,8 @@ describe('Anthropic integration', () => { }, ]), }), - description: 'messages claude-3-haiku-20240307', - op: 'gen_ai.messages', + description: 'chat claude-3-haiku-20240307', + op: 'gen_ai.chat', origin: 'auto.ai.anthropic', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts index b61aea2ac7b8..948b7316b194 100644 --- a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts @@ -66,8 +66,8 @@ describe('Google GenAI integration', () => { // Third span - models.generateContent expect.objectContaining({ data: { - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', @@ -78,22 +78,22 @@ describe('Google GenAI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }, - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Fourth span - error handling expect.objectContaining({ data: { - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', }, - description: 'models error-model', - op: 'gen_ai.models', + description: 'generate_content error-model', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), @@ -143,8 +143,8 @@ describe('Google GenAI integration', () => { // Third span - models.generateContent with PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', @@ -157,23 +157,23 @@ describe('Google GenAI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Fourth span - error handling with PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true }), - description: 'models error-model', - op: 'gen_ai.models', + description: 'generate_content error-model', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), @@ -233,8 +233,8 @@ describe('Google GenAI integration', () => { // Non-streaming with tools expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', @@ -246,16 +246,16 @@ describe('Google GenAI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 23, }), - description: 'models gemini-2.0-flash-001', - op: 'gen_ai.models', + description: 'generate_content gemini-2.0-flash-001', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Streaming with tools expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', @@ -270,16 +270,16 @@ describe('Google GenAI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22, }), - description: 'models gemini-2.0-flash-001 stream-response', - op: 'gen_ai.models', + description: 'generate_content gemini-2.0-flash-001 stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Without tools for comparison expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001', @@ -289,8 +289,8 @@ describe('Google GenAI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, }), - description: 'models gemini-2.0-flash-001', - op: 'gen_ai.models', + description: 'generate_content gemini-2.0-flash-001', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), @@ -309,8 +309,8 @@ describe('Google GenAI integration', () => { // First span - models.generateContentStream (streaming) expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', @@ -325,8 +325,8 @@ describe('Google GenAI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22, }), - description: 'models gemini-1.5-flash stream-response', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), @@ -367,24 +367,24 @@ describe('Google GenAI integration', () => { // Fourth span - blocked content streaming expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', }), - description: 'models blocked-model stream-response', - op: 'gen_ai.models', + description: 'generate_content blocked-model stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), // Fifth span - error handling for streaming expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', }), - description: 'models error-model stream-response', - op: 'gen_ai.models', + description: 'generate_content error-model stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), @@ -397,8 +397,8 @@ describe('Google GenAI integration', () => { // First span - models.generateContentStream (streaming) with PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', @@ -414,8 +414,8 @@ describe('Google GenAI integration', () => { [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22, }), - description: 'models gemini-1.5-flash stream-response', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), @@ -461,8 +461,8 @@ describe('Google GenAI integration', () => { // Fourth span - blocked content stream with PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'blocked-model', @@ -470,24 +470,24 @@ describe('Google GenAI integration', () => { [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, }), - description: 'models blocked-model stream-response', - op: 'gen_ai.models', + description: 'generate_content blocked-model stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), // Fifth span - error handling for streaming with PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model', [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7, [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true }), - description: 'models error-model stream-response', - op: 'gen_ai.models', + description: 'generate_content error-model stream-response', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'internal_error', }), @@ -525,8 +525,8 @@ describe('Google GenAI integration', () => { // First call: Last message is large and gets truncated (only C's remain, D's are cropped) expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', @@ -535,16 +535,16 @@ describe('Google GenAI integration', () => { /^\[\{"role":"user","parts":\[\{"text":"C+"\}\]\}\]$/, ), }), - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), // Second call: Last message is small and kept without truncation expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash', @@ -556,8 +556,8 @@ describe('Google GenAI integration', () => { }, ]), }), - description: 'models gemini-1.5-flash', - op: 'gen_ai.models', + description: 'generate_content gemini-1.5-flash', + op: 'gen_ai.generate_content', origin: 'auto.ai.google_genai', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts index eb9344a12608..14f396d1a9c5 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts @@ -303,8 +303,7 @@ describe('LangChain integration', () => { // First call: Direct Anthropic call made BEFORE LangChain import // This should have Anthropic instrumentation (origin: 'auto.ai.anthropic') const firstAnthropicSpan = spans.find( - span => - span.description === 'messages claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Second call: LangChain call @@ -317,8 +316,7 @@ describe('LangChain integration', () => { // This should NOT have Anthropic instrumentation (skip works correctly) // Count how many Anthropic spans we have - should be exactly 1 const anthropicSpans = spans.filter( - span => - span.description === 'messages claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Verify the edge case limitation: diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts index b05a70acdeb4..95e4529b3fdc 100644 --- a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts @@ -66,7 +66,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // 'gen_ai.system': 'anthropic', // 'gen_ai.request.model': 'error-model', // }), - // description: 'chat error-model', + // description: 'invoke_agent error-model', // op: 'gen_ai.chat', // origin: 'auto.ai.langchain', // status: 'internal_error', @@ -136,7 +136,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // 'gen_ai.request.model': 'error-model', // 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true // }), - // description: 'chat error-model', + // description: 'invoke_agent error-model', // op: 'gen_ai.chat', // origin: 'auto.ai.langchain', // status: 'internal_error', @@ -335,8 +335,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // First call: Direct Anthropic call made BEFORE LangChain import // This should have Anthropic instrumentation (origin: 'auto.ai.anthropic') const firstAnthropicSpan = spans.find( - span => - span.description === 'messages claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Second call: LangChain call @@ -349,8 +348,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // This should NOT have Anthropic instrumentation (skip works correctly) // Count how many Anthropic spans we have - should be exactly 1 const anthropicSpans = spans.filter( - span => - span.description === 'messages claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', + span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic', ); // Verify the edge case limitation: @@ -437,7 +435,7 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => { // 'gen_ai.system': 'openai', // 'gen_ai.request.model': 'error-model', // }), - // description: 'chat error-model', + // description: 'invoke_agent error-model', // op: 'gen_ai.chat', // origin: 'auto.ai.langchain', // status: 'internal_error', diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts index ac40fbe94249..9010e203924f 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts @@ -117,8 +117,8 @@ describe('OpenAI Tool Calls integration', () => { // Third span - responses API with tools (non-streaming) expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -135,16 +135,16 @@ describe('OpenAI Tool Calls integration', () => { 'openai.usage.completion_tokens': 12, 'openai.usage.prompt_tokens': 8, }, - description: 'responses gpt-4', - op: 'gen_ai.responses', + description: 'chat gpt-4', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Fourth span - responses API with tools and streaming expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -163,8 +163,8 @@ describe('OpenAI Tool Calls integration', () => { 'openai.usage.completion_tokens': 12, 'openai.usage.prompt_tokens': 8, }, - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -238,8 +238,8 @@ describe('OpenAI Tool Calls integration', () => { // Third span - responses API with tools (non-streaming) with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -259,16 +259,16 @@ describe('OpenAI Tool Calls integration', () => { 'openai.usage.completion_tokens': 12, 'openai.usage.prompt_tokens': 8, }, - description: 'responses gpt-4', - op: 'gen_ai.responses', + description: 'chat gpt-4', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Fourth span - responses API with tools and streaming with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -290,8 +290,8 @@ describe('OpenAI Tool Calls integration', () => { 'openai.usage.completion_tokens': 12, 'openai.usage.prompt_tokens': 8, }, - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts index 52ddc0837097..afae781afaf7 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts @@ -65,8 +65,8 @@ describe('OpenAI integration', () => { // Second span - responses API expect.objectContaining({ data: { - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', @@ -82,8 +82,8 @@ describe('OpenAI integration', () => { [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 8, [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 5, }, - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -132,8 +132,8 @@ describe('OpenAI integration', () => { // Fifth span - responses API streaming expect.objectContaining({ data: { - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', @@ -151,8 +151,8 @@ describe('OpenAI integration', () => { [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 10, [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 6, }, - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -209,8 +209,8 @@ describe('OpenAI integration', () => { // Second span - responses API with PII expect.objectContaining({ data: { - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', @@ -228,8 +228,8 @@ describe('OpenAI integration', () => { [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 8, [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 5, }, - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -283,8 +283,8 @@ describe('OpenAI integration', () => { // Fifth span - responses API streaming with PII expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', @@ -305,8 +305,8 @@ describe('OpenAI integration', () => { [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 10, [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 6, }), - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -669,16 +669,16 @@ describe('OpenAI integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', // Messages should be present and should include truncated string input (contains only As) [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^A+$/), }), - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -698,30 +698,30 @@ describe('OpenAI integration', () => { // First span - conversations.create returns conversation object with id expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'conversations', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.conversations', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', // The conversation ID should be captured from the response [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: 'conv_689667905b048191b4740501625afd940c7533ace33a2dab', }), - description: 'conversations unknown', - op: 'gen_ai.conversations', + description: 'chat unknown', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Second span - responses.create with conversation parameter expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', // The conversation ID should be captured from the request [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: 'conv_689667905b048191b4740501625afd940c7533ace33a2dab', }), - op: 'gen_ai.responses', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -730,22 +730,22 @@ describe('OpenAI integration', () => { data: expect.not.objectContaining({ [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: expect.anything(), }), - op: 'gen_ai.responses', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), // Fourth span - responses.create with previous_response_id (chaining) expect.objectContaining({ data: expect.objectContaining({ - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'responses', - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.responses', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4', // The previous_response_id should be captured as conversation.id [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: 'resp_mock_conv_123', }), - op: 'gen_ai.responses', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts index 9b4120b143e4..4115738a19c5 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts @@ -38,8 +38,8 @@ describe('OpenAI integration (V6)', () => { // Second span - responses API expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-3.5-turbo', @@ -55,8 +55,8 @@ describe('OpenAI integration (V6)', () => { 'openai.usage.completion_tokens': 8, 'openai.usage.prompt_tokens': 5, }, - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -105,8 +105,8 @@ describe('OpenAI integration (V6)', () => { // Fifth span - responses API streaming expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -124,8 +124,8 @@ describe('OpenAI integration (V6)', () => { 'openai.usage.completion_tokens': 10, 'openai.usage.prompt_tokens': 6, }, - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -182,8 +182,8 @@ describe('OpenAI integration (V6)', () => { // Second span - responses API with PII expect.objectContaining({ data: { - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-3.5-turbo', @@ -201,8 +201,8 @@ describe('OpenAI integration (V6)', () => { 'openai.usage.completion_tokens': 8, 'openai.usage.prompt_tokens': 5, }, - description: 'responses gpt-3.5-turbo', - op: 'gen_ai.responses', + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), @@ -256,8 +256,8 @@ describe('OpenAI integration (V6)', () => { // Fifth span - responses API streaming with PII expect.objectContaining({ data: expect.objectContaining({ - 'gen_ai.operation.name': 'responses', - 'sentry.op': 'gen_ai.responses', + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', 'sentry.origin': 'auto.ai.openai', 'gen_ai.system': 'openai', 'gen_ai.request.model': 'gpt-4', @@ -277,8 +277,8 @@ describe('OpenAI integration (V6)', () => { 'openai.usage.completion_tokens': 10, 'openai.usage.prompt_tokens': 6, }), - description: 'responses gpt-4 stream-response', - op: 'gen_ai.responses', + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', origin: 'auto.ai.openai', status: 'ok', }), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts index 2e8e8711e9e9..ac6614af7502 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts @@ -24,7 +24,7 @@ describe('Vercel AI integration - generateObject', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateObject', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -38,7 +38,7 @@ describe('Vercel AI integration - generateObject', () => { data: expect.objectContaining({ 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_object', - 'gen_ai.operation.name': 'ai.generateObject.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateObject.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.model.id': 'mock-model-id', diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index 05d209176179..ca4a8521b27b 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -41,7 +41,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -68,7 +68,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -98,7 +98,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -129,7 +129,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -156,7 +156,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -183,7 +183,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -207,7 +207,7 @@ describe('Vercel AI integration', () => { [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.toolCall', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -238,7 +238,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -275,7 +275,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -311,7 +311,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -347,7 +347,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -384,7 +384,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -422,7 +422,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -455,7 +455,7 @@ describe('Vercel AI integration', () => { [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: expect.any(String), [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.toolCall', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -497,7 +497,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -522,7 +522,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -545,7 +545,7 @@ describe('Vercel AI integration', () => { [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.toolCall', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -617,7 +617,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -642,7 +642,7 @@ describe('Vercel AI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', @@ -665,7 +665,7 @@ describe('Vercel AI integration', () => { [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.toolCall', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', 'vercel.ai.operationId': 'ai.toolCall', @@ -746,7 +746,7 @@ describe('Vercel AI integration', () => { data: expect.objectContaining({ [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', }), }), // The doGenerate span - name stays as 'generateText.doGenerate' since model ID is missing @@ -758,7 +758,7 @@ describe('Vercel AI integration', () => { data: expect.objectContaining({ [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_text', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'ai.generateText.doGenerate', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content', }), }), ]), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts index 2e769f98dbda..baa0453f4e23 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts @@ -31,7 +31,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -45,7 +45,7 @@ describe('Vercel AI integration (V5)', () => { data: { 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'gen_ai.request.model': 'mock-model-id', @@ -88,7 +88,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -102,7 +102,7 @@ describe('Vercel AI integration (V5)', () => { data: { 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'gen_ai.request.model': 'mock-model-id', @@ -143,7 +143,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -172,7 +172,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }, @@ -188,7 +188,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', - 'gen_ai.operation.name': 'ai.toolCall', + 'gen_ai.operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', }, @@ -225,7 +225,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -257,7 +257,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }, @@ -285,7 +285,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -299,7 +299,7 @@ describe('Vercel AI integration (V5)', () => { data: { 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'gen_ai.request.model': 'mock-model-id', @@ -345,7 +345,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }, @@ -356,7 +356,7 @@ describe('Vercel AI integration (V5)', () => { }), // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true) expect.objectContaining({ - data: { + data: expect.objectContaining({ 'gen_ai.request.model': 'mock-model-id', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.operationId': 'ai.generateText.doGenerate', @@ -380,10 +380,10 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', - }, + }), description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', origin: 'auto.vercelai.otel', @@ -391,17 +391,17 @@ describe('Vercel AI integration (V5)', () => { }), // Seventh span - tool call execution span expect.objectContaining({ - data: { + data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_INPUT_ATTRIBUTE]: expect.any(String), [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: expect.any(String), [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', - 'gen_ai.operation.name': 'ai.toolCall', + 'gen_ai.operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', - }, + }), description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', origin: 'auto.vercelai.otel', @@ -463,7 +463,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.response.finishReason': 'tool-calls', @@ -491,7 +491,7 @@ describe('Vercel AI integration (V5)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }, @@ -506,7 +506,7 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', - 'gen_ai.operation.name': 'ai.toolCall', + 'gen_ai.operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', }, diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts index 63ca5fcde258..9a8ec768ecc9 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts @@ -32,7 +32,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -46,7 +46,7 @@ describe('Vercel AI integration (V6)', () => { data: expect.objectContaining({ 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.request.headers.user-agent': expect.any(String), @@ -90,7 +90,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -104,7 +104,7 @@ describe('Vercel AI integration (V6)', () => { data: expect.objectContaining({ 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.request.headers.user-agent': expect.any(String), @@ -146,7 +146,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -176,7 +176,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }), @@ -192,7 +192,7 @@ describe('Vercel AI integration (V6)', () => { [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', - 'gen_ai.operation.name': 'ai.toolCall', + 'gen_ai.operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', }), @@ -229,7 +229,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -261,7 +261,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }), @@ -289,7 +289,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -303,7 +303,7 @@ describe('Vercel AI integration (V6)', () => { data: expect.objectContaining({ 'sentry.origin': 'auto.vercelai.otel', 'sentry.op': 'gen_ai.generate_text', - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.model.provider': 'mock-provider', 'vercel.ai.request.headers.user-agent': expect.any(String), @@ -349,7 +349,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', }), @@ -384,7 +384,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }), @@ -402,7 +402,7 @@ describe('Vercel AI integration (V6)', () => { [GEN_AI_TOOL_INPUT_ATTRIBUTE]: expect.any(String), [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: expect.any(String), [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', - 'gen_ai.operation.name': 'ai.toolCall', + 'gen_ai.operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', }), @@ -468,7 +468,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText', + 'gen_ai.operation.name': 'invoke_agent', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.response.finishReason': 'tool-calls', @@ -497,7 +497,7 @@ describe('Vercel AI integration (V6)', () => { 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, - 'gen_ai.operation.name': 'ai.generateText.doGenerate', + 'gen_ai.operation.name': 'generate_content', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }), @@ -512,7 +512,7 @@ describe('Vercel AI integration (V6)', () => { [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function', - 'gen_ai.operation.name': 'ai.toolCall', + 'gen_ai.operation.name': 'execute_tool', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', }), diff --git a/packages/core/src/tracing/ai/gen-ai-attributes.ts b/packages/core/src/tracing/ai/gen-ai-attributes.ts index a52afa58a430..8fbc0f30c014 100644 --- a/packages/core/src/tracing/ai/gen-ai-attributes.ts +++ b/packages/core/src/tracing/ai/gen-ai-attributes.ts @@ -291,13 +291,12 @@ export const OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE = 'openai.usage.prompt_tokens' // ============================================================================= /** - * OpenAI API operations + * OpenAI API operations following OpenTelemetry semantic conventions + * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#llm-request-spans */ export const OPENAI_OPERATIONS = { CHAT: 'chat', - RESPONSES: 'responses', EMBEDDINGS: 'embeddings', - CONVERSATIONS: 'conversations', } as const; // ============================================================================= diff --git a/packages/core/src/tracing/ai/utils.ts b/packages/core/src/tracing/ai/utils.ts index 4a7a14eea554..53cd696ba909 100644 --- a/packages/core/src/tracing/ai/utils.ts +++ b/packages/core/src/tracing/ai/utils.ts @@ -9,15 +9,21 @@ import { } from './gen-ai-attributes'; import { truncateGenAiMessages, truncateGenAiStringInput } from './messageTruncation'; /** - * Maps AI method paths to Sentry operation name + * Maps AI method paths to OpenTelemetry semantic convention operation names + * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#llm-request-spans */ export function getFinalOperationName(methodPath: string): string { if (methodPath.includes('messages')) { - return 'messages'; + return 'chat'; } if (methodPath.includes('completions')) { - return 'completions'; + return 'text_completion'; + } + // Google GenAI: models.generateContent* -> generate_content (actually generates AI responses) + if (methodPath.includes('generateContent')) { + return 'generate_content'; } + // Anthropic: models.get/retrieve -> models (metadata retrieval only) if (methodPath.includes('models')) { return 'models'; } diff --git a/packages/core/src/tracing/langchain/index.ts b/packages/core/src/tracing/langchain/index.ts index e5ad6cc14189..8cf12dfcb861 100644 --- a/packages/core/src/tracing/langchain/index.ts +++ b/packages/core/src/tracing/langchain/index.ts @@ -98,10 +98,10 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}): startSpanManual( { name: `${operationName} ${modelName}`, - op: 'gen_ai.pipeline', + op: 'gen_ai.chat', attributes: { ...attributes, - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.pipeline', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat', }, }, span => { diff --git a/packages/core/src/tracing/langchain/utils.ts b/packages/core/src/tracing/langchain/utils.ts index 0a07ae8df370..d119de798950 100644 --- a/packages/core/src/tracing/langchain/utils.ts +++ b/packages/core/src/tracing/langchain/utils.ts @@ -216,18 +216,18 @@ function extractCommonRequestAttributes( /** * Small helper to assemble boilerplate attributes shared by both request extractors. + * Always uses 'chat' as the operation type for all LLM and chat model operations. */ function baseRequestAttributes( system: unknown, modelName: unknown, - operation: 'pipeline' | 'chat', serialized: LangChainSerialized, invocationParams?: Record, langSmithMetadata?: Record, ): Record { return { [GEN_AI_SYSTEM_ATTRIBUTE]: asString(system ?? 'langchain'), - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: operation, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: asString(modelName), [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: LANGCHAIN_ORIGIN, ...extractCommonRequestAttributes(serialized, invocationParams, langSmithMetadata), @@ -237,7 +237,8 @@ function baseRequestAttributes( /** * Extracts attributes for plain LLM invocations (string prompts). * - * - Operation is tagged as `pipeline` to distinguish from chat-style invocations. + * - Operation is tagged as `chat` following OpenTelemetry semantic conventions. + * LangChain LLM operations are treated as chat operations. * - When `recordInputs` is true, string prompts are wrapped into `{role:"user"}` * messages to align with the chat schema used elsewhere. */ @@ -251,7 +252,7 @@ export function extractLLMRequestAttributes( const system = langSmithMetadata?.ls_provider; const modelName = invocationParams?.model ?? langSmithMetadata?.ls_model_name ?? 'unknown'; - const attrs = baseRequestAttributes(system, modelName, 'pipeline', llm, invocationParams, langSmithMetadata); + const attrs = baseRequestAttributes(system, modelName, llm, invocationParams, langSmithMetadata); if (recordInputs && Array.isArray(prompts) && prompts.length > 0) { setIfDefined(attrs, GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, prompts.length); @@ -265,7 +266,8 @@ export function extractLLMRequestAttributes( /** * Extracts attributes for ChatModel invocations (array-of-arrays of messages). * - * - Operation is tagged as `chat`. + * - Operation is tagged as `chat` following OpenTelemetry semantic conventions. + * LangChain chat model operations are chat operations. * - We flatten LangChain's `LangChainMessage[][]` and normalize shapes into a * consistent `{ role, content }` array when `recordInputs` is true. * - Provider system value falls back to `serialized.id?.[2]`. @@ -280,7 +282,7 @@ export function extractChatModelRequestAttributes( const system = langSmithMetadata?.ls_provider ?? llm.id?.[2]; const modelName = invocationParams?.model ?? langSmithMetadata?.ls_model_name ?? 'unknown'; - const attrs = baseRequestAttributes(system, modelName, 'chat', llm, invocationParams, langSmithMetadata); + const attrs = baseRequestAttributes(system, modelName, llm, invocationParams, langSmithMetadata); if (recordInputs && Array.isArray(langChainMessages) && langChainMessages.length > 0) { const normalized = normalizeLangChainMessages(langChainMessages.flat()); diff --git a/packages/core/src/tracing/openai/utils.ts b/packages/core/src/tracing/openai/utils.ts index 007dd93a91b1..82494f7ae018 100644 --- a/packages/core/src/tracing/openai/utils.ts +++ b/packages/core/src/tracing/openai/utils.ts @@ -35,20 +35,21 @@ import type { } from './types'; /** - * Maps OpenAI method paths to Sentry operation names + * Maps OpenAI method paths to OpenTelemetry semantic convention operation names + * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#llm-request-spans */ export function getOperationName(methodPath: string): string { if (methodPath.includes('chat.completions')) { return OPENAI_OPERATIONS.CHAT; } if (methodPath.includes('responses')) { - return OPENAI_OPERATIONS.RESPONSES; + return OPENAI_OPERATIONS.CHAT; } if (methodPath.includes('embeddings')) { return OPENAI_OPERATIONS.EMBEDDINGS; } if (methodPath.includes('conversations')) { - return OPENAI_OPERATIONS.CONVERSATIONS; + return OPENAI_OPERATIONS.CHAT; } return methodPath.split('.').pop() || 'unknown'; } diff --git a/packages/core/src/tracing/vercel-ai/constants.ts b/packages/core/src/tracing/vercel-ai/constants.ts index fe307b03e7fb..57e8bf2a57c8 100644 --- a/packages/core/src/tracing/vercel-ai/constants.ts +++ b/packages/core/src/tracing/vercel-ai/constants.ts @@ -3,3 +3,22 @@ import type { Span } from '../../types-hoist/span'; // Global Map to track tool call IDs to their corresponding spans // This allows us to capture tool errors and link them to the correct span export const toolCallSpanMap = new Map(); + +// Operation sets for efficient mapping to OpenTelemetry semantic convention values +export const INVOKE_AGENT_OPS = new Set([ + 'ai.generateText', + 'ai.streamText', + 'ai.generateObject', + 'ai.streamObject', + 'ai.embed', + 'ai.embedMany', +]); + +export const GENERATE_CONTENT_OPS = new Set([ + 'ai.generateText.doGenerate', + 'ai.streamText.doStream', + 'ai.generateObject.doGenerate', + 'ai.streamObject.doStream', +]); + +export const EMBEDDINGS_OPS = new Set(['ai.embed.doEmbed', 'ai.embedMany.doEmbed']); diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index 375e803159be..1a06937b8aae 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -19,7 +19,7 @@ import { GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, } from '../ai/gen-ai-attributes'; -import { toolCallSpanMap } from './constants'; +import { EMBEDDINGS_OPS, GENERATE_CONTENT_OPS, INVOKE_AGENT_OPS, toolCallSpanMap } from './constants'; import type { TokenSummary } from './types'; import { accumulateTokensForParent, @@ -54,6 +54,29 @@ function addOriginToSpan(span: Span, origin: SpanOrigin): void { span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN, origin); } +/** + * Maps Vercel AI SDK operation names to OpenTelemetry semantic convention values + * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#llm-request-spans + */ +function mapVercelAiOperationName(operationName: string): string { + // Top-level pipeline operations map to invoke_agent + if (INVOKE_AGENT_OPS.has(operationName)) { + return 'invoke_agent'; + } + // .do* operations are the actual LLM calls + if (GENERATE_CONTENT_OPS.has(operationName)) { + return 'generate_content'; + } + if (EMBEDDINGS_OPS.has(operationName)) { + return 'embeddings'; + } + if (operationName === 'ai.toolCall') { + return 'execute_tool'; + } + // Return the original value for unknown operations + return operationName; +} + /** * Post-process spans emitted by the Vercel AI SDK. * This is supposed to be used in `client.on('spanStart', ...) @@ -151,7 +174,13 @@ function processEndedVercelAiSpan(span: SpanJSON): void { } // Rename AI SDK attributes to standardized gen_ai attributes - renameAttributeKey(attributes, OPERATION_NAME_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE); + // Map operation.name to OpenTelemetry semantic convention values + if (attributes[OPERATION_NAME_ATTRIBUTE]) { + const operationName = mapVercelAiOperationName(attributes[OPERATION_NAME_ATTRIBUTE] as string); + attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE] = operationName; + // eslint-disable-next-line @typescript-eslint/no-dynamic-delete + delete attributes[OPERATION_NAME_ATTRIBUTE]; + } renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_MESSAGES_ATTRIBUTE); renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text'); renameAttributeKey(attributes, AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, 'gen_ai.response.tool_calls'); @@ -189,6 +218,7 @@ function renameAttributeKey(attributes: Record, oldKey: string, function processToolCallSpan(span: Span, attributes: SpanAttributes): void { addOriginToSpan(span, 'auto.vercelai.otel'); span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.execute_tool'); + span.setAttribute(GEN_AI_OPERATION_NAME_ATTRIBUTE, 'execute_tool'); renameAttributeKey(attributes, AI_TOOL_CALL_NAME_ATTRIBUTE, GEN_AI_TOOL_NAME_ATTRIBUTE); renameAttributeKey(attributes, AI_TOOL_CALL_ID_ATTRIBUTE, GEN_AI_TOOL_CALL_ID_ATTRIBUTE); diff --git a/packages/core/test/lib/utils/openai-utils.test.ts b/packages/core/test/lib/utils/openai-utils.test.ts index ff951e8be40b..25cd873ace08 100644 --- a/packages/core/test/lib/utils/openai-utils.test.ts +++ b/packages/core/test/lib/utils/openai-utils.test.ts @@ -18,14 +18,14 @@ describe('openai-utils', () => { expect(getOperationName('some.path.chat.completions.method')).toBe('chat'); }); - it('should return responses for responses methods', () => { - expect(getOperationName('responses.create')).toBe('responses'); - expect(getOperationName('some.path.responses.method')).toBe('responses'); + it('should return chat for responses methods', () => { + expect(getOperationName('responses.create')).toBe('chat'); + expect(getOperationName('some.path.responses.method')).toBe('chat'); }); - it('should return conversations for conversations methods', () => { - expect(getOperationName('conversations.create')).toBe('conversations'); - expect(getOperationName('some.path.conversations.method')).toBe('conversations'); + it('should return chat for conversations methods', () => { + expect(getOperationName('conversations.create')).toBe('chat'); + expect(getOperationName('some.path.conversations.method')).toBe('chat'); }); it('should return the last part of path for unknown methods', () => { @@ -41,7 +41,7 @@ describe('openai-utils', () => { describe('getSpanOperation', () => { it('should prefix operation with gen_ai', () => { expect(getSpanOperation('chat.completions.create')).toBe('gen_ai.chat'); - expect(getSpanOperation('responses.create')).toBe('gen_ai.responses'); + expect(getSpanOperation('responses.create')).toBe('gen_ai.chat'); expect(getSpanOperation('some.custom.operation')).toBe('gen_ai.operation'); }); });