Skip to content
14 changes: 7 additions & 7 deletions .size-limit.js
Original file line number Diff line number Diff line change
Expand Up @@ -96,21 +96,21 @@ module.exports = [
path: 'packages/browser/build/npm/esm/prod/index.js',
import: createImport('init', 'feedbackIntegration'),
gzip: true,
limit: '42 KB',
limit: '43 KB',
},
{
name: '@sentry/browser (incl. sendFeedback)',
path: 'packages/browser/build/npm/esm/prod/index.js',
import: createImport('init', 'sendFeedback'),
gzip: true,
limit: '30 KB',
limit: '31 KB',
},
{
name: '@sentry/browser (incl. FeedbackAsync)',
path: 'packages/browser/build/npm/esm/prod/index.js',
import: createImport('init', 'feedbackAsyncIntegration'),
gzip: true,
limit: '35 KB',
limit: '36 KB',
},
{
name: '@sentry/browser (incl. Metrics)',
Expand Down Expand Up @@ -140,7 +140,7 @@ module.exports = [
import: createImport('init', 'ErrorBoundary'),
ignore: ['react/jsx-runtime'],
gzip: true,
limit: '27 KB',
limit: '28 KB',
},
{
name: '@sentry/react (incl. Tracing)',
Expand Down Expand Up @@ -208,7 +208,7 @@ module.exports = [
name: 'CDN Bundle (incl. Tracing, Replay, Feedback, Logs, Metrics)',
path: createCDNPath('bundle.tracing.replay.feedback.logs.metrics.min.js'),
gzip: true,
limit: '86 KB',
limit: '87 KB',
},
// browser CDN bundles (non-gzipped)
{
Expand All @@ -223,7 +223,7 @@ module.exports = [
path: createCDNPath('bundle.tracing.min.js'),
gzip: false,
brotli: false,
limit: '127 KB',
limit: '128 KB',
},
{
name: 'CDN Bundle (incl. Tracing, Logs, Metrics) - uncompressed',
Expand Down Expand Up @@ -278,7 +278,7 @@ module.exports = [
import: createImport('init'),
ignore: [...builtinModules, ...nodePrefixedBuiltinModules],
gzip: true,
limit: '52 KB',
limit: '53 KB',
},
// Node SDK (ESM)
{
Expand Down
17 changes: 17 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,23 @@

Work in this release was contributed by @sebws and @harshit078. Thank you for your contributions!

- **feat(core): Introduces a new `Sentry.setConversationId()` API to track multi turn AI conversations across API calls. ([#18909](https://github.com/getsentry/sentry-javascript/pull/18909))**

You can now set a conversation ID that will be automatically applied to spans within that scope. This allows you to link traces from the same conversation together.

```javascript
import * as Sentry from '@sentry/node';

// Set conversation ID for all subsequent spans
Sentry.setConversationId('conv_abc123');

// All AI spans will now include the gen_ai.conversation.id attribute
await openai.chat.completions.create({...});
```

This is particularly useful for tracking multiple AI API calls that are part of the same conversation, allowing you to analyze entire conversation flows in Sentry.
The conversation ID is stored on the isolation scope and automatically applied to spans via the new `conversationIdIntegration`.

- **feat(tanstackstart-react): Auto-instrument global middleware in `sentryTanstackStart` Vite plugin ([#18884](https://github.com/getsentry/sentry-javascript/pull/18844))**

The `sentryTanstackStart` Vite plugin now automatically instruments `requestMiddleware` and `functionMiddleware` arrays in `createStart()`. This captures performance data without requiring manual wrapping.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ sentryTest('logs debug messages correctly', async ({ getLocalTestUrl, page }) =>
? [
'Sentry Logger [log]: Integration installed: InboundFilters',
'Sentry Logger [log]: Integration installed: FunctionToString',
'Sentry Logger [log]: Integration installed: ConversationId',
'Sentry Logger [log]: Integration installed: BrowserApiErrors',
'Sentry Logger [log]: Integration installed: Breadcrumbs',
'Sentry Logger [log]: Global Handler attached: onerror',
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
import * as Sentry from '@sentry/node';
import express from 'express';
import OpenAI from 'openai';

function startMockServer() {
const app = express();
app.use(express.json());

// Chat completions endpoint
app.post('/openai/chat/completions', (req, res) => {
const { model } = req.body;

res.send({
id: 'chatcmpl-mock123',
object: 'chat.completion',
created: 1677652288,
model: model,
choices: [
{
index: 0,
message: {
role: 'assistant',
content: 'Mock response from OpenAI',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 15,
total_tokens: 25,
},
});
});

return new Promise(resolve => {
const server = app.listen(0, () => {
resolve(server);
});
});
}

async function run() {
const server = await startMockServer();

// Test: Multiple chat completions in the same conversation with manual conversation ID
await Sentry.startSpan({ op: 'function', name: 'chat-with-manual-conversation-id' }, async () => {
const client = new OpenAI({
baseURL: `http://localhost:${server.address().port}/openai`,
apiKey: 'mock-api-key',
});

// Set conversation ID manually using Sentry API
Sentry.setConversationId('user_chat_session_abc123');

// First message in the conversation
await client.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'What is the capital of France?' }],
});

// Second message in the same conversation
await client.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'Tell me more about it' }],
});

// Third message in the same conversation
await client.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'What is its population?' }],
});
});

server.close();
await Sentry.flush(2000);
}

run();
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
import * as Sentry from '@sentry/node';
import express from 'express';
import OpenAI from 'openai';

function startMockServer() {
const app = express();
app.use(express.json());

// Chat completions endpoint
app.post('/openai/chat/completions', (req, res) => {
const { model } = req.body;

res.send({
id: 'chatcmpl-mock123',
object: 'chat.completion',
created: 1677652288,
model: model,
choices: [
{
index: 0,
message: {
role: 'assistant',
content: 'Mock response from OpenAI',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 15,
total_tokens: 25,
},
});
});

return new Promise(resolve => {
const server = app.listen(0, () => {
resolve(server);
});
});
}

async function run() {
const server = await startMockServer();
const client = new OpenAI({
baseURL: `http://localhost:${server.address().port}/openai`,
apiKey: 'mock-api-key',
});

// First request/conversation scope
await Sentry.withScope(async scope => {
// Set conversation ID for this request scope BEFORE starting the span
scope.setConversationId('conv_user1_session_abc');

await Sentry.startSpan({ op: 'http.server', name: 'GET /chat/conversation-1' }, async () => {
// First message in conversation 1
await client.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'Hello from conversation 1' }],
});

// Second message in conversation 1
await client.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'Follow-up in conversation 1' }],
});
});
});

server.close();
await Sentry.flush(2000);
}

run();
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
import * as Sentry from '@sentry/node';
import express from 'express';
import OpenAI from 'openai';

function startMockServer() {
const app = express();
app.use(express.json());

// Chat completions endpoint
app.post('/openai/chat/completions', (req, res) => {
const { model } = req.body;

res.send({
id: 'chatcmpl-mock123',
object: 'chat.completion',
created: 1677652288,
model: model,
choices: [
{
index: 0,
message: {
role: 'assistant',
content: 'Mock response from OpenAI',
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 15,
total_tokens: 25,
},
});
});

return new Promise(resolve => {
const server = app.listen(0, () => {
resolve(server);
});
});
}

async function run() {
const server = await startMockServer();
const client = new OpenAI({
baseURL: `http://localhost:${server.address().port}/openai`,
apiKey: 'mock-api-key',
});

// Second request/conversation scope (completely separate)
await Sentry.withScope(async scope => {
// Set different conversation ID for this request scope BEFORE starting the span
scope.setConversationId('conv_user2_session_xyz');

await Sentry.startSpan({ op: 'http.server', name: 'GET /chat/conversation-2' }, async () => {
// First message in conversation 2
await client.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'Hello from conversation 2' }],
});

// Second message in conversation 2
await client.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'Follow-up in conversation 2' }],
});
});
});

server.close();
await Sentry.flush(2000);
}

run();
Loading
Loading