Skip to content

Commit 1b354ed

Browse files
committed
resolve
1 parent a17a3cf commit 1b354ed

39 files changed

Lines changed: 1994 additions & 1237 deletions

File tree

CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44

55
- "You miss 100 percent of the chances you don't take. — Wayne Gretzky" — Michael Scott
66

7+
Work in this release was contributed by @sebws. Thank you for your contribution!
8+
79
- **feat(tanstackstart-react): Auto-instrument global middleware in `sentryTanstackStart` Vite plugin ([#18884](https://github.com/getsentry/sentry-javascript/pull/18844))**
810

911
The `sentryTanstackStart` Vite plugin now automatically instruments `requestMiddleware` and `functionMiddleware` arrays in `createStart()`. This captures performance data without requiring manual wrapping.

dev-packages/browser-integration-tests/suites/profiling/manualMode/test.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ sentryTest('sends profile_chunk envelopes in manual mode', async ({ page, getLoc
4848
const envelopeItemHeader = profileChunkEnvelopeItem[0];
4949
const envelopeItemPayload1 = profileChunkEnvelopeItem[1];
5050

51-
expect(envelopeItemHeader).toHaveProperty('type', 'profile_chunk');
51+
expect(envelopeItemHeader).toEqual({ type: 'profile_chunk', platform: 'javascript' });
5252
expect(envelopeItemPayload1.profile).toBeDefined();
5353

5454
const profilerId1 = envelopeItemPayload1.profiler_id;
@@ -71,7 +71,7 @@ sentryTest('sends profile_chunk envelopes in manual mode', async ({ page, getLoc
7171
const envelopeItemHeader2 = profileChunkEnvelopeItem2[0];
7272
const envelopeItemPayload2 = profileChunkEnvelopeItem2[1];
7373

74-
expect(envelopeItemHeader2).toHaveProperty('type', 'profile_chunk');
74+
expect(envelopeItemHeader2).toEqual({ type: 'profile_chunk', platform: 'javascript' });
7575
expect(envelopeItemPayload2.profile).toBeDefined();
7676

7777
expect(envelopeItemPayload2.profiler_id).toBe(profilerId1); // same profiler id for the whole session

dev-packages/browser-integration-tests/suites/profiling/traceLifecycleMode_multiple-chunks/test.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ sentryTest(
5151
const envelopeItemHeader = profileChunkEnvelopeItem[0];
5252
const envelopeItemPayload1 = profileChunkEnvelopeItem[1];
5353

54-
expect(envelopeItemHeader).toHaveProperty('type', 'profile_chunk');
54+
expect(envelopeItemHeader).toEqual({ type: 'profile_chunk', platform: 'javascript' });
5555
expect(envelopeItemPayload1.profile).toBeDefined();
5656

5757
validateProfilePayloadMetadata(envelopeItemPayload1);
@@ -77,7 +77,7 @@ sentryTest(
7777
const envelopeItemHeader2 = profileChunkEnvelopeItem2[0];
7878
const envelopeItemPayload2 = profileChunkEnvelopeItem2[1];
7979

80-
expect(envelopeItemHeader2).toHaveProperty('type', 'profile_chunk');
80+
expect(envelopeItemHeader2).toEqual({ type: 'profile_chunk', platform: 'javascript' });
8181
expect(envelopeItemPayload2.profile).toBeDefined();
8282

8383
validateProfilePayloadMetadata(envelopeItemPayload2);

dev-packages/browser-integration-tests/suites/profiling/traceLifecycleMode_overlapping-spans/test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ sentryTest(
5252
const envelopeItemHeader = profileChunkEnvelopeItem[0];
5353
const envelopeItemPayload = profileChunkEnvelopeItem[1];
5454

55-
expect(envelopeItemHeader).toHaveProperty('type', 'profile_chunk');
55+
expect(envelopeItemHeader).toEqual({ type: 'profile_chunk', platform: 'javascript' });
5656
expect(envelopeItemPayload.profile).toBeDefined();
5757

5858
validateProfilePayloadMetadata(envelopeItemPayload);

dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts

Lines changed: 21 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,15 @@
1+
import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
12
import { expect, it } from 'vitest';
3+
import {
4+
GEN_AI_OPERATION_NAME_ATTRIBUTE,
5+
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
6+
GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE,
7+
GEN_AI_RESPONSE_ID_ATTRIBUTE,
8+
GEN_AI_RESPONSE_MODEL_ATTRIBUTE,
9+
GEN_AI_SYSTEM_ATTRIBUTE,
10+
GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE,
11+
GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE,
12+
} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes';
213
import { createRunner } from '../../../runner';
314

415
// These tests are not exhaustive because the instrumentation is
@@ -17,16 +28,16 @@ it('traces a basic message creation request', async ({ signal }) => {
1728
expect.arrayContaining([
1829
expect.objectContaining({
1930
data: expect.objectContaining({
20-
'gen_ai.operation.name': 'messages',
21-
'sentry.op': 'gen_ai.messages',
22-
'sentry.origin': 'auto.ai.anthropic',
23-
'gen_ai.system': 'anthropic',
24-
'gen_ai.request.model': 'claude-3-haiku-20240307',
25-
'gen_ai.request.temperature': 0.7,
26-
'gen_ai.response.model': 'claude-3-haiku-20240307',
27-
'gen_ai.response.id': 'msg_mock123',
28-
'gen_ai.usage.input_tokens': 10,
29-
'gen_ai.usage.output_tokens': 15,
31+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'messages',
32+
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.messages',
33+
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic',
34+
[GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
35+
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
36+
[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
37+
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
38+
[GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_mock123',
39+
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
40+
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
3041
}),
3142
description: 'messages claude-3-haiku-20240307',
3243
op: 'gen_ai.messages',

dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts

Lines changed: 39 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,16 @@
1+
import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
12
import { expect, it } from 'vitest';
3+
import {
4+
GEN_AI_OPERATION_NAME_ATTRIBUTE,
5+
GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE,
6+
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
7+
GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE,
8+
GEN_AI_REQUEST_TOP_P_ATTRIBUTE,
9+
GEN_AI_SYSTEM_ATTRIBUTE,
10+
GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE,
11+
GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE,
12+
GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE,
13+
} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes';
214
import { createRunner } from '../../../runner';
315

416
// These tests are not exhaustive because the instrumentation is
@@ -18,14 +30,14 @@ it('traces Google GenAI chat creation and message sending', async () => {
1830
// First span - chats.create
1931
expect.objectContaining({
2032
data: expect.objectContaining({
21-
'gen_ai.operation.name': 'chat',
22-
'sentry.op': 'gen_ai.chat',
23-
'sentry.origin': 'auto.ai.google_genai',
24-
'gen_ai.system': 'google_genai',
25-
'gen_ai.request.model': 'gemini-1.5-pro',
26-
'gen_ai.request.temperature': 0.8,
27-
'gen_ai.request.top_p': 0.9,
28-
'gen_ai.request.max_tokens': 150,
33+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
34+
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
35+
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
36+
[GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
37+
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro',
38+
[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8,
39+
[GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9,
40+
[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150,
2941
}),
3042
description: 'chat gemini-1.5-pro create',
3143
op: 'gen_ai.chat',
@@ -34,14 +46,14 @@ it('traces Google GenAI chat creation and message sending', async () => {
3446
// Second span - chat.sendMessage
3547
expect.objectContaining({
3648
data: expect.objectContaining({
37-
'gen_ai.operation.name': 'chat',
38-
'sentry.op': 'gen_ai.chat',
39-
'sentry.origin': 'auto.ai.google_genai',
40-
'gen_ai.system': 'google_genai',
41-
'gen_ai.request.model': 'gemini-1.5-pro',
42-
'gen_ai.usage.input_tokens': 8,
43-
'gen_ai.usage.output_tokens': 12,
44-
'gen_ai.usage.total_tokens': 20,
49+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
50+
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
51+
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
52+
[GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
53+
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro',
54+
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
55+
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
56+
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20,
4557
}),
4658
description: 'chat gemini-1.5-pro',
4759
op: 'gen_ai.chat',
@@ -50,17 +62,17 @@ it('traces Google GenAI chat creation and message sending', async () => {
5062
// Third span - models.generateContent
5163
expect.objectContaining({
5264
data: expect.objectContaining({
53-
'gen_ai.operation.name': 'models',
54-
'sentry.op': 'gen_ai.models',
55-
'sentry.origin': 'auto.ai.google_genai',
56-
'gen_ai.system': 'google_genai',
57-
'gen_ai.request.model': 'gemini-1.5-flash',
58-
'gen_ai.request.temperature': 0.7,
59-
'gen_ai.request.top_p': 0.9,
60-
'gen_ai.request.max_tokens': 100,
61-
'gen_ai.usage.input_tokens': 8,
62-
'gen_ai.usage.output_tokens': 12,
63-
'gen_ai.usage.total_tokens': 20,
65+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models',
66+
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models',
67+
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
68+
[GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
69+
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash',
70+
[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
71+
[GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9,
72+
[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100,
73+
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
74+
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
75+
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20,
6476
}),
6577
description: 'models gemini-1.5-flash',
6678
op: 'gen_ai.models',

dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts

Lines changed: 27 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,16 @@
1+
import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
12
import { expect, it } from 'vitest';
3+
import {
4+
GEN_AI_OPERATION_NAME_ATTRIBUTE,
5+
GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE,
6+
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
7+
GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE,
8+
GEN_AI_SYSTEM_ATTRIBUTE,
9+
GEN_AI_TOOL_NAME_ATTRIBUTE,
10+
GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE,
11+
GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE,
12+
GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE,
13+
} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes';
214
import { createRunner } from '../../../runner';
315

416
// These tests are not exhaustive because the instrumentation is
@@ -18,16 +30,16 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal }
1830
// Chat model span
1931
expect.objectContaining({
2032
data: expect.objectContaining({
21-
'gen_ai.operation.name': 'chat',
22-
'sentry.op': 'gen_ai.chat',
23-
'sentry.origin': 'auto.ai.langchain',
24-
'gen_ai.system': 'anthropic',
25-
'gen_ai.request.model': 'claude-3-5-sonnet-20241022',
26-
'gen_ai.request.temperature': 0.7,
27-
'gen_ai.request.max_tokens': 100,
28-
'gen_ai.usage.input_tokens': 10,
29-
'gen_ai.usage.output_tokens': 15,
30-
'gen_ai.usage.total_tokens': 25,
33+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
34+
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
35+
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
36+
[GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
37+
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022',
38+
[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
39+
[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100,
40+
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
41+
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
42+
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
3143
}),
3244
description: 'chat claude-3-5-sonnet-20241022',
3345
op: 'gen_ai.chat',
@@ -36,8 +48,8 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal }
3648
// Chain span
3749
expect.objectContaining({
3850
data: expect.objectContaining({
39-
'sentry.origin': 'auto.ai.langchain',
40-
'sentry.op': 'gen_ai.invoke_agent',
51+
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
52+
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
4153
'langchain.chain.name': 'my_test_chain',
4254
}),
4355
description: 'chain my_test_chain',
@@ -47,9 +59,9 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal }
4759
// Tool span
4860
expect.objectContaining({
4961
data: expect.objectContaining({
50-
'sentry.origin': 'auto.ai.langchain',
51-
'sentry.op': 'gen_ai.execute_tool',
52-
'gen_ai.tool.name': 'search_tool',
62+
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
63+
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool',
64+
[GEN_AI_TOOL_NAME_ATTRIBUTE]: 'search_tool',
5365
}),
5466
description: 'execute_tool search_tool',
5567
op: 'gen_ai.execute_tool',

dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts

Lines changed: 28 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,16 @@
1+
import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
12
import { expect, it } from 'vitest';
3+
import {
4+
GEN_AI_AGENT_NAME_ATTRIBUTE,
5+
GEN_AI_OPERATION_NAME_ATTRIBUTE,
6+
GEN_AI_PIPELINE_NAME_ATTRIBUTE,
7+
GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE,
8+
GEN_AI_INPUT_MESSAGES_ATTRIBUTE,
9+
GEN_AI_RESPONSE_MODEL_ATTRIBUTE,
10+
GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE,
11+
GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE,
12+
GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE,
13+
} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes';
214
import { createRunner } from '../../../runner';
315

416
// These tests are not exhaustive because the instrumentation is
@@ -18,10 +30,10 @@ it('traces langgraph compile and invoke operations', async ({ signal }) => {
1830
const createAgentSpan = transactionEvent.spans.find((span: any) => span.op === 'gen_ai.create_agent');
1931
expect(createAgentSpan).toMatchObject({
2032
data: {
21-
'gen_ai.operation.name': 'create_agent',
22-
'sentry.op': 'gen_ai.create_agent',
23-
'sentry.origin': 'auto.ai.langgraph',
24-
'gen_ai.agent.name': 'weather_assistant',
33+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent',
34+
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent',
35+
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
36+
[GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant',
2537
},
2638
description: 'create_agent weather_assistant',
2739
op: 'gen_ai.create_agent',
@@ -32,25 +44,25 @@ it('traces langgraph compile and invoke operations', async ({ signal }) => {
3244
const invokeAgentSpan = transactionEvent.spans.find((span: any) => span.op === 'gen_ai.invoke_agent');
3345
expect(invokeAgentSpan).toMatchObject({
3446
data: expect.objectContaining({
35-
'gen_ai.operation.name': 'invoke_agent',
36-
'sentry.op': 'gen_ai.invoke_agent',
37-
'sentry.origin': 'auto.ai.langgraph',
38-
'gen_ai.agent.name': 'weather_assistant',
39-
'gen_ai.pipeline.name': 'weather_assistant',
40-
'gen_ai.input.messages': '[{"role":"user","content":"What is the weather in SF?"}]',
41-
'gen_ai.response.model': 'mock-model',
42-
'gen_ai.usage.input_tokens': 20,
43-
'gen_ai.usage.output_tokens': 10,
44-
'gen_ai.usage.total_tokens': 30,
47+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
48+
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
49+
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
50+
[GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant',
51+
[GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant',
52+
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in SF?"}]',
53+
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model',
54+
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 20,
55+
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10,
56+
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
4557
}),
4658
description: 'invoke_agent weather_assistant',
4759
op: 'gen_ai.invoke_agent',
4860
origin: 'auto.ai.langgraph',
4961
});
5062

5163
// Verify tools are captured
52-
if (invokeAgentSpan.data['gen_ai.request.available_tools']) {
53-
expect(invokeAgentSpan.data['gen_ai.request.available_tools']).toMatch(/get_weather/);
64+
if (invokeAgentSpan.data[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]) {
65+
expect(invokeAgentSpan.data[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]).toMatch(/get_weather/);
5466
}
5567
})
5668
.start(signal);

dev-packages/cloudflare-integration-tests/suites/tracing/openai/test.ts

Lines changed: 25 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,17 @@
1+
import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
12
import { expect, it } from 'vitest';
3+
import {
4+
GEN_AI_OPERATION_NAME_ATTRIBUTE,
5+
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
6+
GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE,
7+
GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
8+
GEN_AI_RESPONSE_ID_ATTRIBUTE,
9+
GEN_AI_RESPONSE_MODEL_ATTRIBUTE,
10+
GEN_AI_SYSTEM_ATTRIBUTE,
11+
GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE,
12+
GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE,
13+
GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE,
14+
} from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes';
215
import { createRunner } from '../../../runner';
316

417
// These tests are not exhaustive because the instrumentation is
@@ -17,18 +30,18 @@ it('traces a basic chat completion request', async ({ signal }) => {
1730
expect.arrayContaining([
1831
expect.objectContaining({
1932
data: expect.objectContaining({
20-
'gen_ai.operation.name': 'chat',
21-
'sentry.op': 'gen_ai.chat',
22-
'sentry.origin': 'auto.ai.openai',
23-
'gen_ai.system': 'openai',
24-
'gen_ai.request.model': 'gpt-3.5-turbo',
25-
'gen_ai.request.temperature': 0.7,
26-
'gen_ai.response.model': 'gpt-3.5-turbo',
27-
'gen_ai.response.id': 'chatcmpl-mock123',
28-
'gen_ai.usage.input_tokens': 10,
29-
'gen_ai.usage.output_tokens': 15,
30-
'gen_ai.usage.total_tokens': 25,
31-
'gen_ai.response.finish_reasons': '["stop"]',
33+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
34+
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
35+
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
36+
[GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
37+
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
38+
[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
39+
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
40+
[GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123',
41+
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
42+
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
43+
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
44+
[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]',
3245
}),
3346
description: 'chat gpt-3.5-turbo',
3447
op: 'gen_ai.chat',

dev-packages/e2e-tests/Makefile

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
.PHONY: run list
2+
3+
run:
4+
@if ! command -v fzf &> /dev/null; then \
5+
echo "Error: fzf is required. Install with: brew install fzf"; \
6+
exit 1; \
7+
fi
8+
@ls test-applications | fzf --height=10 --layout=reverse --border=rounded --margin=1.5% --color=dark --prompt="yarn test:run " | xargs -r yarn test:run
9+
10+
list:
11+
@ls test-applications

0 commit comments

Comments
 (0)