Skip to content

Commit a2b9371

Browse files
codydeclaude
andcommitted
fix(core): Set cache token attributes on spans
The setTokenUsageAttributes function was accepting cache token parameters but only using them to calculate total tokens. Now it also sets them as individual span attributes (gen_ai.usage.input_tokens.cache_write and gen_ai.usage.input_tokens.cached). Also updates .size-limit.js to use develop branch value. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
1 parent 2343c17 commit a2b9371

2 files changed

Lines changed: 20 additions & 8 deletions

File tree

.size-limit.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,7 @@ module.exports = [
287287
import: createImport('init'),
288288
ignore: [...builtinModules, ...nodePrefixedBuiltinModules],
289289
gzip: true,
290-
limit: '163 KB',
290+
limit: '166 KB',
291291
},
292292
{
293293
name: '@sentry/node - without tracing',

packages/core/src/tracing/ai/utils.ts

Lines changed: 19 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44
import type { Span } from '../../types-hoist/span';
55
import {
66
GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE,
7+
GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE_ATTRIBUTE,
8+
GEN_AI_USAGE_INPUT_TOKENS_CACHED_ATTRIBUTE,
79
GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE,
810
GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE,
911
} from './gen-ai-attributes';
@@ -47,15 +49,15 @@ export function buildMethodPath(currentPath: string, prop: string): string {
4749
* @param span - The span to add attributes to
4850
* @param promptTokens - The number of prompt tokens
4951
* @param completionTokens - The number of completion tokens
50-
* @param cachedInputTokens - The number of cached input tokens
51-
* @param cachedOutputTokens - The number of cached output tokens
52+
* @param cacheWriteTokens - The number of cache creation/write input tokens
53+
* @param cacheReadTokens - The number of cache read input tokens
5254
*/
5355
export function setTokenUsageAttributes(
5456
span: Span,
5557
promptTokens?: number,
5658
completionTokens?: number,
57-
cachedInputTokens?: number,
58-
cachedOutputTokens?: number,
59+
cacheWriteTokens?: number,
60+
cacheReadTokens?: number,
5961
): void {
6062
if (promptTokens !== undefined) {
6163
span.setAttributes({
@@ -67,18 +69,28 @@ export function setTokenUsageAttributes(
6769
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: completionTokens,
6870
});
6971
}
72+
if (cacheWriteTokens !== undefined) {
73+
span.setAttributes({
74+
[GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE_ATTRIBUTE]: cacheWriteTokens,
75+
});
76+
}
77+
if (cacheReadTokens !== undefined) {
78+
span.setAttributes({
79+
[GEN_AI_USAGE_INPUT_TOKENS_CACHED_ATTRIBUTE]: cacheReadTokens,
80+
});
81+
}
7082
if (
7183
promptTokens !== undefined ||
7284
completionTokens !== undefined ||
73-
cachedInputTokens !== undefined ||
74-
cachedOutputTokens !== undefined
85+
cacheWriteTokens !== undefined ||
86+
cacheReadTokens !== undefined
7587
) {
7688
/**
7789
* Total input tokens in a request is the summation of `input_tokens`,
7890
* `cache_creation_input_tokens`, and `cache_read_input_tokens`.
7991
*/
8092
const totalTokens =
81-
(promptTokens ?? 0) + (completionTokens ?? 0) + (cachedInputTokens ?? 0) + (cachedOutputTokens ?? 0);
93+
(promptTokens ?? 0) + (completionTokens ?? 0) + (cacheWriteTokens ?? 0) + (cacheReadTokens ?? 0);
8294

8395
span.setAttributes({
8496
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: totalTokens,

0 commit comments

Comments
 (0)