diff --git a/.changeset/kind-donuts-dream.md b/.changeset/kind-donuts-dream.md new file mode 100644 index 00000000..1ebb06b9 --- /dev/null +++ b/.changeset/kind-donuts-dream.md @@ -0,0 +1,12 @@ +--- +'@tanstack/ai-openrouter': minor +'@tanstack/ai-anthropic': minor +'@tanstack/ai-gemini': minor +'@tanstack/ai-ollama': minor +'@tanstack/ai-openai': minor +'@tanstack/ai-grok': minor +'@tanstack/ai': minor +'@tanstack/ai-devtools-core': patch +--- + +Enhanced usage reporting for every provider diff --git a/docs/protocol/chunk-definitions.md b/docs/protocol/chunk-definitions.md index e0bfb585..12d57758 100644 --- a/docs/protocol/chunk-definitions.md +++ b/docs/protocol/chunk-definitions.md @@ -266,15 +266,47 @@ Emitted when the stream completes successfully. interface DoneStreamChunk extends BaseStreamChunk { type: 'done'; finishReason: 'stop' | 'length' | 'content_filter' | 'tool_calls' | null; - usage?: { - promptTokens: number; - completionTokens: number; - totalTokens: number; + usage?: TokenUsage; +} + +interface TokenUsage { + // Core token counts (always present when usage is available) + promptTokens: number; + completionTokens: number; + totalTokens: number; + + // Detailed prompt token breakdown + promptTokensDetails?: { + cachedTokens?: number; // Tokens from prompt cache hits + cacheWriteTokens?: number; // Tokens written to cache + cacheCreationTokens?: number; // Anthropic cache creation tokens + cacheReadTokens?: number; // Anthropic cache read tokens + audioTokens?: number; // Audio input tokens + videoTokens?: number; // Video input tokens + imageTokens?: number; // Image input tokens + textTokens?: number; // Text input tokens + }; + + // Detailed completion token breakdown + completionTokensDetails?: { + reasoningTokens?: number; // Reasoning/thinking tokens (o1, Claude) + audioTokens?: number; // Audio output tokens + videoTokens?: number; // Video output tokens + imageTokens?: number; // Image output tokens + textTokens?: number; // Text output tokens + acceptedPredictionTokens?: number; // Accepted prediction tokens + rejectedPredictionTokens?: number; // Rejected prediction tokens }; + + // Provider-specific details + providerUsageDetails?: Record; + + // Duration (for some billing models) + durationSeconds?: number; } ``` -**Example:** +**Example (basic usage):** ```json { "type": "done", @@ -290,6 +322,64 @@ interface DoneStreamChunk extends BaseStreamChunk { } ``` +**Example (with cached tokens - OpenAI):** +```json +{ + "type": "done", + "id": "chatcmpl-abc123", + "model": "gpt-4o", + "timestamp": 1701234567892, + "finishReason": "stop", + "usage": { + "promptTokens": 150, + "completionTokens": 75, + "totalTokens": 225, + "promptTokensDetails": { + "cachedTokens": 100 + } + } +} +``` + +**Example (with reasoning tokens - o1):** +```json +{ + "type": "done", + "id": "chatcmpl-abc123", + "model": "o1-preview", + "timestamp": 1701234567892, + "finishReason": "stop", + "usage": { + "promptTokens": 150, + "completionTokens": 500, + "totalTokens": 650, + "completionTokensDetails": { + "reasoningTokens": 425 + } + } +} +``` + +**Example (Anthropic with cache):** +```json +{ + "type": "done", + "id": "msg_abc123", + "model": "claude-3-5-sonnet", + "timestamp": 1701234567892, + "finishReason": "stop", + "usage": { + "promptTokens": 150, + "completionTokens": 75, + "totalTokens": 225, + "promptTokensDetails": { + "cacheCreationTokens": 50, + "cacheReadTokens": 100 + } + } +} +``` + **Finish Reasons:** - `stop` - Natural completion - `length` - Reached max tokens @@ -302,6 +392,13 @@ interface DoneStreamChunk extends BaseStreamChunk { - Clean up streaming state - Display token usage (if available) +**Token Usage Notes:** +- `promptTokensDetails.cachedTokens` - OpenAI prompt caching +- `promptTokensDetails.cacheCreationTokens` / `cacheReadTokens` - Anthropic caching +- `completionTokensDetails.reasoningTokens` - Internal reasoning tokens (o1, Claude thinking) +- `providerUsageDetails` - Provider-specific fields not in the standard schema +- For Gemini, modality-specific token counts (audio, video, image, text) are extracted from the response + --- ### ErrorStreamChunk diff --git a/examples/ts-react-chat/src/routes/api.tanchat.ts b/examples/ts-react-chat/src/routes/api.tanchat.ts index 6fd2eba7..0f850c62 100644 --- a/examples/ts-react-chat/src/routes/api.tanchat.ts +++ b/examples/ts-react-chat/src/routes/api.tanchat.ts @@ -129,8 +129,10 @@ export const Route = createFileRoute('/api/tanchat')({ openai: () => createChatOptions({ adapter: openaiText((model || 'gpt-4o') as 'gpt-4o'), - temperature: 2, - modelOptions: {}, + modelOptions: { + prompt_cache_key: 'user-session-12345', + prompt_cache_retention: '24h', + }, }), } diff --git a/packages/typescript/ai-anthropic/package.json b/packages/typescript/ai-anthropic/package.json index 2bb9f55f..768a4432 100644 --- a/packages/typescript/ai-anthropic/package.json +++ b/packages/typescript/ai-anthropic/package.json @@ -40,7 +40,7 @@ "test:types": "tsc" }, "dependencies": { - "@anthropic-ai/sdk": "^0.71.0" + "@anthropic-ai/sdk": "^0.71.2" }, "peerDependencies": { "@tanstack/ai": "workspace:^", diff --git a/packages/typescript/ai-anthropic/src/adapters/summarize.ts b/packages/typescript/ai-anthropic/src/adapters/summarize.ts index 02e08506..6238351d 100644 --- a/packages/typescript/ai-anthropic/src/adapters/summarize.ts +++ b/packages/typescript/ai-anthropic/src/adapters/summarize.ts @@ -1,4 +1,5 @@ import { BaseSummarizeAdapter } from '@tanstack/ai/adapters' +import { buildAnthropicUsage } from '../usage' import { createAnthropicClient, generateId, @@ -52,7 +53,7 @@ export class AnthropicSummarizeAdapter< async summarize(options: SummarizationOptions): Promise { const systemPrompt = this.buildSummarizationPrompt(options) - const response = await this.client.messages.create({ + const response = await this.client.beta.messages.create({ model: options.model, messages: [{ role: 'user', content: options.text }], system: systemPrompt, @@ -69,11 +70,7 @@ export class AnthropicSummarizeAdapter< id: response.id, model: response.model, summary: content, - usage: { - promptTokens: response.usage.input_tokens, - completionTokens: response.usage.output_tokens, - totalTokens: response.usage.input_tokens + response.usage.output_tokens, - }, + usage: buildAnthropicUsage(response.usage), } } @@ -84,10 +81,8 @@ export class AnthropicSummarizeAdapter< const id = generateId(this.name) const model = options.model let accumulatedContent = '' - let inputTokens = 0 - let outputTokens = 0 - const stream = await this.client.messages.create({ + const stream = await this.client.beta.messages.create({ model: options.model, messages: [{ role: 'user', content: options.text }], system: systemPrompt, @@ -98,7 +93,6 @@ export class AnthropicSummarizeAdapter< for await (const event of stream) { if (event.type === 'message_start') { - inputTokens = event.message.usage.input_tokens } else if (event.type === 'content_block_delta') { if (event.delta.type === 'text_delta') { const delta = event.delta.text @@ -114,7 +108,6 @@ export class AnthropicSummarizeAdapter< } } } else if (event.type === 'message_delta') { - outputTokens = event.usage.output_tokens yield { type: 'done', id, @@ -125,11 +118,7 @@ export class AnthropicSummarizeAdapter< | 'length' | 'content_filter' | null, - usage: { - promptTokens: inputTokens, - completionTokens: outputTokens, - totalTokens: inputTokens + outputTokens, - }, + usage: buildAnthropicUsage(event.usage), } } } diff --git a/packages/typescript/ai-anthropic/src/adapters/text.ts b/packages/typescript/ai-anthropic/src/adapters/text.ts index 744911e9..8e7009db 100644 --- a/packages/typescript/ai-anthropic/src/adapters/text.ts +++ b/packages/typescript/ai-anthropic/src/adapters/text.ts @@ -1,6 +1,7 @@ import { BaseTextAdapter } from '@tanstack/ai/adapters' import { convertToolsToProviderFormat } from '../tools/tool-converter' import { validateTextProviderOptions } from '../text/text-provider-options' +import { buildAnthropicUsage } from '../usage' import { createAnthropicClient, generateId, @@ -175,7 +176,7 @@ export class AnthropicTextAdapter< try { // Make non-streaming request with tool_choice forced to our structured output tool - const response = await this.client.messages.create( + const response = await this.client.beta.messages.create( { ...requestParams, stream: false, @@ -222,6 +223,7 @@ export class AnthropicTextAdapter< return { data: parsed, rawText, + usage: buildAnthropicUsage(response.usage), } } catch (error: unknown) { const err = error as Error @@ -560,13 +562,7 @@ export class AnthropicTextAdapter< model: model, timestamp, finishReason: 'tool_calls', - usage: { - promptTokens: event.usage.input_tokens || 0, - completionTokens: event.usage.output_tokens || 0, - totalTokens: - (event.usage.input_tokens || 0) + - (event.usage.output_tokens || 0), - }, + usage: buildAnthropicUsage(event.usage), } break } @@ -591,13 +587,7 @@ export class AnthropicTextAdapter< model: model, timestamp, finishReason: 'stop', - usage: { - promptTokens: event.usage.input_tokens || 0, - completionTokens: event.usage.output_tokens || 0, - totalTokens: - (event.usage.input_tokens || 0) + - (event.usage.output_tokens || 0), - }, + usage: buildAnthropicUsage(event.usage), } } } diff --git a/packages/typescript/ai-anthropic/src/index.ts b/packages/typescript/ai-anthropic/src/index.ts index 4bca2e4b..aa5ca4c4 100644 --- a/packages/typescript/ai-anthropic/src/index.ts +++ b/packages/typescript/ai-anthropic/src/index.ts @@ -44,3 +44,6 @@ export { convertToolsToProviderFormat } from './tools/tool-converter' // Export tool types export type { AnthropicTool, CustomTool } from './tools' + +// Export provider usage types +export type { AnthropicProviderUsageDetails } from './usage' diff --git a/packages/typescript/ai-anthropic/src/usage.ts b/packages/typescript/ai-anthropic/src/usage.ts new file mode 100644 index 00000000..b46171eb --- /dev/null +++ b/packages/typescript/ai-anthropic/src/usage.ts @@ -0,0 +1,66 @@ +import { buildBaseUsage } from '@tanstack/ai' +import type { TokenUsage } from '@tanstack/ai' +import type Anthropic_SDK from '@anthropic-ai/sdk' + +/** + * Anthropic-specific provider usage details. + * These fields are unique to Anthropic and placed in providerUsageDetails. + */ +export interface AnthropicProviderUsageDetails { + /** + * Server-side tool usage metrics. + * Available when using Anthropic's built-in tools like web search. + */ + serverToolUse?: { + /** Number of web search requests made during the response */ + webSearchRequests?: number + /** Number of web fetch requests made during the response */ + webFetchRequests?: number + } + /** Index signature for Record compatibility */ + [key: string]: unknown +} + +/** + * Build normalized TokenUsage from Anthropic's usage object. + * Handles cache tokens and server tool use metrics. + */ +export function buildAnthropicUsage( + usage: + | Anthropic_SDK.Beta.BetaUsage + | Anthropic_SDK.Beta.BetaMessageDeltaUsage, +): TokenUsage { + const inputTokens = usage.input_tokens ?? 0 + const outputTokens = usage.output_tokens + + const result = buildBaseUsage({ + promptTokens: inputTokens, + completionTokens: outputTokens, + totalTokens: inputTokens + outputTokens, + }) + + // Add prompt token details for cache tokens + const cacheCreation = usage.cache_creation_input_tokens + const cacheRead = usage.cache_read_input_tokens + + result.promptTokensDetails = { + ...(cacheCreation ? { cacheWriteTokens: cacheCreation } : {}), + ...(cacheRead ? { cachedTokens: cacheRead } : {}), + } + + // Add provider-specific usage details for server tool use + const serverToolUse = usage.server_tool_use + + result.providerUsageDetails = { + serverToolUse: { + ...(serverToolUse?.web_search_requests + ? { webSearchRequests: serverToolUse.web_search_requests } + : {}), + ...(serverToolUse?.web_fetch_requests + ? { webFetchRequests: serverToolUse.web_fetch_requests } + : {}), + }, + } satisfies AnthropicProviderUsageDetails + + return result +} diff --git a/packages/typescript/ai-anthropic/tests/model-meta.test.ts b/packages/typescript/ai-anthropic/tests/model-meta.test.ts index 842e8c04..99c0817e 100644 --- a/packages/typescript/ai-anthropic/tests/model-meta.test.ts +++ b/packages/typescript/ai-anthropic/tests/model-meta.test.ts @@ -1,26 +1,15 @@ -import { describe, it, expectTypeOf } from 'vitest' -import type { - AnthropicChatModelProviderOptionsByName, - AnthropicModelInputModalitiesByName, -} from '../src/model-meta' +import { describe, expectTypeOf, it } from 'vitest' +import type { AnthropicChatModelProviderOptionsByName } from '../src/model-meta' import type { AnthropicContainerOptions, AnthropicContextManagementOptions, AnthropicMCPOptions, + AnthropicSamplingOptions, AnthropicServiceTierOptions, AnthropicStopSequencesOptions, AnthropicThinkingOptions, AnthropicToolChoiceOptions, - AnthropicSamplingOptions, } from '../src/text/text-provider-options' -import type { - AudioPart, - ConstrainedModelMessage, - DocumentPart, - ImagePart, - TextPart, - VideoPart, -} from '@tanstack/ai' /** * Type assertion tests for Anthropic model provider options. @@ -468,161 +457,3 @@ describe('Anthropic Model Provider Options Type Assertions', () => { }) }) }) - -/** - * Anthropic Model Input Modality Type Assertions - * - * These tests verify that ConstrainedModelMessage correctly restricts - * content parts based on each Anthropic model's supported input modalities. - * - * All Claude models support: text, image, document - * No Claude models support: audio, video - */ -describe('Anthropic Model Input Modality Type Assertions', () => { - // Helper type for creating a user message with specific content - type MessageWithContent = { role: 'user'; content: Array } - - describe('Claude Opus 4.5 (text + image + document)', () => { - type Modalities = AnthropicModelInputModalitiesByName['claude-opus-4-5'] - type Message = ConstrainedModelMessage - - it('should allow TextPart, ImagePart, and DocumentPart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart or VideoPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('Claude Sonnet 4.5 (text + image + document)', () => { - type Modalities = AnthropicModelInputModalitiesByName['claude-sonnet-4-5'] - type Message = ConstrainedModelMessage - - it('should allow TextPart, ImagePart, and DocumentPart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart or VideoPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('Claude Haiku 4.5 (text + image + document)', () => { - type Modalities = AnthropicModelInputModalitiesByName['claude-haiku-4-5'] - type Message = ConstrainedModelMessage - - it('should allow TextPart, ImagePart, and DocumentPart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart or VideoPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('Claude Opus 4.1 (text + image + document)', () => { - type Modalities = AnthropicModelInputModalitiesByName['claude-opus-4-1'] - type Message = ConstrainedModelMessage - - it('should allow TextPart, ImagePart, and DocumentPart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart or VideoPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('Claude Sonnet 4 (text + image + document)', () => { - type Modalities = AnthropicModelInputModalitiesByName['claude-sonnet-4'] - type Message = ConstrainedModelMessage - - it('should allow TextPart, ImagePart, and DocumentPart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart or VideoPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('Claude 3.7 Sonnet (text + image + document)', () => { - type Modalities = AnthropicModelInputModalitiesByName['claude-3-7-sonnet'] - type Message = ConstrainedModelMessage - - it('should allow TextPart, ImagePart, and DocumentPart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart or VideoPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('Claude Opus 4 (text + image + document)', () => { - type Modalities = AnthropicModelInputModalitiesByName['claude-opus-4'] - type Message = ConstrainedModelMessage - - it('should allow TextPart, ImagePart, and DocumentPart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart or VideoPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('Claude 3.5 Haiku (text + image + document)', () => { - type Modalities = AnthropicModelInputModalitiesByName['claude-3-5-haiku'] - type Message = ConstrainedModelMessage - - it('should allow TextPart, ImagePart, and DocumentPart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart or VideoPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('Claude 3 Haiku (text + image + document)', () => { - type Modalities = AnthropicModelInputModalitiesByName['claude-3-haiku'] - type Message = ConstrainedModelMessage - - it('should allow TextPart, ImagePart, and DocumentPart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart or VideoPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) -}) diff --git a/packages/typescript/ai-anthropic/tests/usage-extraction.test.ts b/packages/typescript/ai-anthropic/tests/usage-extraction.test.ts new file mode 100644 index 00000000..abb7659d --- /dev/null +++ b/packages/typescript/ai-anthropic/tests/usage-extraction.test.ts @@ -0,0 +1,289 @@ +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { chat } from '@tanstack/ai' +import { AnthropicTextAdapter } from '../src/adapters/text' +import type { StreamChunk } from '@tanstack/ai' + +const mocks = vi.hoisted(() => { + const betaMessagesCreate = vi.fn() + + const client = { + beta: { + messages: { + create: betaMessagesCreate, + }, + }, + } + + return { betaMessagesCreate, client } +}) + +vi.mock('@anthropic-ai/sdk', () => { + const { client } = mocks + + class MockAnthropic { + beta = client.beta + + constructor(_: { apiKey: string }) {} + } + + return { default: MockAnthropic } +}) + +const createAdapter = () => + new AnthropicTextAdapter({ apiKey: 'test-key' }, 'claude-opus-4') + +function createMockStream( + chunks: Array>, +): AsyncIterable> { + return { + // eslint-disable-next-line @typescript-eslint/require-await + async *[Symbol.asyncIterator]() { + for (const chunk of chunks) { + yield chunk + } + }, + } +} + +describe('Anthropic usage extraction', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it('extracts basic token usage from message_delta', async () => { + const mockStream = createMockStream([ + { + type: 'message_start', + message: { + id: 'msg_123', + type: 'message', + role: 'assistant', + content: [], + model: 'claude-3-7-sonnet-20250219', + usage: { + input_tokens: 100, + output_tokens: 0, + }, + }, + }, + { + type: 'content_block_start', + index: 0, + content_block: { type: 'text', text: '' }, + }, + { + type: 'content_block_delta', + index: 0, + delta: { type: 'text_delta', text: 'Hello world' }, + }, + { + type: 'message_delta', + delta: { stop_reason: 'end_turn' }, + usage: { + input_tokens: 100, + output_tokens: 50, + }, + }, + { + type: 'message_stop', + }, + ]) + + mocks.betaMessagesCreate.mockResolvedValueOnce(mockStream) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage).toMatchObject({ + promptTokens: 100, + completionTokens: 50, + totalTokens: 150, + }) + }) + + it('extracts cache token details', async () => { + const mockStream = createMockStream([ + { + type: 'message_start', + message: { + id: 'msg_123', + type: 'message', + role: 'assistant', + content: [], + model: 'claude-3-7-sonnet-20250219', + usage: { + input_tokens: 100, + output_tokens: 0, + cache_creation_input_tokens: 50, + cache_read_input_tokens: 25, + }, + }, + }, + { + type: 'content_block_start', + index: 0, + content_block: { type: 'text', text: '' }, + }, + { + type: 'content_block_delta', + index: 0, + delta: { type: 'text_delta', text: 'Hello world' }, + }, + { + type: 'message_delta', + delta: { stop_reason: 'end_turn' }, + usage: { + input_tokens: 100, + output_tokens: 50, + cache_creation_input_tokens: 50, + cache_read_input_tokens: 25, + }, + }, + { + type: 'message_stop', + }, + ]) + + mocks.betaMessagesCreate.mockResolvedValueOnce(mockStream) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage?.promptTokensDetails).toEqual({ + cacheWriteTokens: 50, + cachedTokens: 25, + }) + }) + + it('extracts server tool use metrics', async () => { + const mockStream = createMockStream([ + { + type: 'message_start', + message: { + id: 'msg_123', + type: 'message', + role: 'assistant', + content: [], + model: 'claude-3-7-sonnet-20250219', + usage: { + input_tokens: 100, + output_tokens: 0, + }, + }, + }, + { + type: 'content_block_start', + index: 0, + content_block: { type: 'text', text: '' }, + }, + { + type: 'content_block_delta', + index: 0, + delta: { type: 'text_delta', text: 'Hello world' }, + }, + { + type: 'message_delta', + delta: { stop_reason: 'end_turn' }, + usage: { + input_tokens: 100, + output_tokens: 50, + server_tool_use: { + web_search_requests: 3, + web_fetch_requests: 2, + }, + }, + }, + { + type: 'message_stop', + }, + ]) + + mocks.betaMessagesCreate.mockResolvedValueOnce(mockStream) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage?.providerUsageDetails).toMatchObject({ + serverToolUse: { + webSearchRequests: 3, + webFetchRequests: 2, + }, + }) + }) + + it('handles response with no cache tokens', async () => { + const mockStream = createMockStream([ + { + type: 'message_start', + message: { + id: 'msg_123', + type: 'message', + role: 'assistant', + content: [], + model: 'claude-3-7-sonnet-20250219', + usage: { + input_tokens: 100, + output_tokens: 0, + }, + }, + }, + { + type: 'content_block_start', + index: 0, + content_block: { type: 'text', text: '' }, + }, + { + type: 'content_block_delta', + index: 0, + delta: { type: 'text_delta', text: 'Hello world' }, + }, + { + type: 'message_delta', + delta: { stop_reason: 'end_turn' }, + usage: { + input_tokens: 100, + output_tokens: 50, + }, + }, + { + type: 'message_stop', + }, + ]) + + mocks.betaMessagesCreate.mockResolvedValueOnce(mockStream) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + // promptTokensDetails should be empty object when no cache tokens + expect(doneChunk?.usage?.promptTokensDetails).toEqual({}) + }) +}) diff --git a/packages/typescript/ai-devtools/src/components/conversation/MessageCard.tsx b/packages/typescript/ai-devtools/src/components/conversation/MessageCard.tsx index 55019538..840e3032 100644 --- a/packages/typescript/ai-devtools/src/components/conversation/MessageCard.tsx +++ b/packages/typescript/ai-devtools/src/components/conversation/MessageCard.tsx @@ -116,6 +116,28 @@ export const MessageCard: Component = (props) => { {msg().usage?.promptTokens.toLocaleString()} in {msg().usage?.completionTokens.toLocaleString()} out + + + + 💾{' '} + {msg().usage?.promptTokensDetails?.cachedTokens?.toLocaleString()} + + + + + + 🧠{' '} + {msg().usage?.completionTokensDetails?.reasoningTokens?.toLocaleString()} + + + + + + ⏱️ {msg().usage?.durationSeconds?.toFixed(1)}s + + 0}> diff --git a/packages/typescript/ai-devtools/src/components/conversation/MessageGroup.tsx b/packages/typescript/ai-devtools/src/components/conversation/MessageGroup.tsx index 3ca89b90..fb7aefc0 100644 --- a/packages/typescript/ai-devtools/src/components/conversation/MessageGroup.tsx +++ b/packages/typescript/ai-devtools/src/components/conversation/MessageGroup.tsx @@ -3,13 +3,7 @@ import { useStyles } from '../../styles/use-styles' import { ChunkItem } from './ChunkItem' import { ChunkBadges } from './ChunkBadges' import type { Component } from 'solid-js' -import type { Chunk } from '../../store/ai-store' - -interface TokenUsage { - promptTokens: number - completionTokens: number - totalTokens: number -} +import type { Chunk, TokenUsage } from '../../store/ai-store' interface MessageGroupProps { messageId: string @@ -62,6 +56,28 @@ export const MessageGroup: Component = (props) => { {props.usage?.completionTokens.toLocaleString()} out + + + + 💾{' '} + {props.usage?.promptTokensDetails?.cachedTokens?.toLocaleString()} + + + + + + 🧠{' '} + {props.usage?.completionTokensDetails?.reasoningTokens?.toLocaleString()} + + + + + + ⏱️ {props.usage?.durationSeconds?.toFixed(1)}s + + diff --git a/packages/typescript/ai-devtools/src/store/ai-context.tsx b/packages/typescript/ai-devtools/src/store/ai-context.tsx index bc43d7f3..7dc2f755 100644 --- a/packages/typescript/ai-devtools/src/store/ai-context.tsx +++ b/packages/typescript/ai-devtools/src/store/ai-context.tsx @@ -1,6 +1,7 @@ import { batch, createContext, onCleanup, onMount, useContext } from 'solid-js' import { createStore, produce } from 'solid-js/store' import { aiEventClient } from '@tanstack/ai/event-client' +import type { TokenUsage } from '@tanstack/ai' import type { ParentComponent } from 'solid-js' interface MessagePart { @@ -26,11 +27,8 @@ export interface ToolCall { duration?: number } -interface TokenUsage { - promptTokens: number - completionTokens: number - totalTokens: number -} +// Re-export TokenUsage from @tanstack/ai for external consumers +export type { TokenUsage } export interface Message { id: string diff --git a/packages/typescript/ai-devtools/src/store/ai-store.ts b/packages/typescript/ai-devtools/src/store/ai-store.ts index 600b9ffb..6ba591ab 100644 --- a/packages/typescript/ai-devtools/src/store/ai-store.ts +++ b/packages/typescript/ai-devtools/src/store/ai-store.ts @@ -1,2 +1,8 @@ // Re-export types from ai-context for backward compatibility -export type { ToolCall, Message, Chunk, Conversation } from './ai-context' +export type { + ToolCall, + Message, + Chunk, + Conversation, + TokenUsage, +} from './ai-context' diff --git a/packages/typescript/ai-gemini/src/adapters/summarize.ts b/packages/typescript/ai-gemini/src/adapters/summarize.ts index 1c717e63..97a7332f 100644 --- a/packages/typescript/ai-gemini/src/adapters/summarize.ts +++ b/packages/typescript/ai-gemini/src/adapters/summarize.ts @@ -1,4 +1,6 @@ import { FinishReason } from '@google/genai' +import { buildBaseUsage } from '@tanstack/ai' +import { buildGeminiUsage } from '../usage' import { createGeminiClient, generateId, @@ -105,18 +107,12 @@ export class GeminiSummarizeAdapter< }) const summary = response.text ?? '' - const inputTokens = response.usageMetadata?.promptTokenCount ?? 0 - const outputTokens = response.usageMetadata?.candidatesTokenCount ?? 0 return { id: generateId('sum'), model, summary, - usage: { - promptTokens: inputTokens, - completionTokens: outputTokens, - totalTokens: inputTokens + outputTokens, - }, + usage: buildGeminiUsage(response.usageMetadata), } } @@ -194,11 +190,11 @@ export class GeminiSummarizeAdapter< : finishReason === FinishReason.MAX_TOKENS ? 'length' : 'content_filter', - usage: { + usage: buildBaseUsage({ promptTokens: inputTokens, completionTokens: outputTokens, totalTokens: inputTokens + outputTokens, - }, + }), } } } diff --git a/packages/typescript/ai-gemini/src/adapters/text.ts b/packages/typescript/ai-gemini/src/adapters/text.ts index 302409f8..85fb003a 100644 --- a/packages/typescript/ai-gemini/src/adapters/text.ts +++ b/packages/typescript/ai-gemini/src/adapters/text.ts @@ -1,6 +1,7 @@ import { FinishReason } from '@google/genai' import { BaseTextAdapter } from '@tanstack/ai/adapters' import { convertToolsToProviderFormat } from '../tools/tool-converter' +import { buildGeminiUsage } from '../usage' import { createGeminiClient, generateId, @@ -168,6 +169,9 @@ export class GeminiTextAdapter< return { data: parsed, rawText, + usage: result.usageMetadata + ? buildGeminiUsage(result.usageMetadata) + : undefined, } } catch (error) { throw new Error( @@ -365,11 +369,7 @@ export class GeminiTextAdapter< timestamp, finishReason: toolCallMap.size > 0 ? 'tool_calls' : 'stop', usage: chunk.usageMetadata - ? { - promptTokens: chunk.usageMetadata.promptTokenCount ?? 0, - completionTokens: chunk.usageMetadata.candidatesTokenCount ?? 0, - totalTokens: chunk.usageMetadata.totalTokenCount ?? 0, - } + ? buildGeminiUsage(chunk.usageMetadata) : undefined, } } diff --git a/packages/typescript/ai-gemini/src/index.ts b/packages/typescript/ai-gemini/src/index.ts index c60ce075..31069258 100644 --- a/packages/typescript/ai-gemini/src/index.ts +++ b/packages/typescript/ai-gemini/src/index.ts @@ -85,3 +85,6 @@ export type { GeminiDocumentMimeType, GeminiMessageMetadataByModality, } from './message-types' + +// Export provider usage types +export type { GeminiProviderUsageDetails } from './usage' diff --git a/packages/typescript/ai-gemini/src/usage.ts b/packages/typescript/ai-gemini/src/usage.ts new file mode 100644 index 00000000..8773d4a1 --- /dev/null +++ b/packages/typescript/ai-gemini/src/usage.ts @@ -0,0 +1,198 @@ +import { buildBaseUsage } from '@tanstack/ai' +import type { TokenUsage } from '@tanstack/ai' +import type { + GenerateContentResponseUsageMetadata, + ModalityTokenCount, +} from '@google/genai' + +/** + * Flattened modality token counts for normalized usage reporting. + * Maps Gemini's ModalityTokenCount array to individual fields. + */ +export interface FlattenedModalityTokens { + /** Text tokens */ + textTokens?: number + /** Image tokens */ + imageTokens?: number + /** Audio tokens */ + audioTokens?: number + /** Video tokens */ + videoTokens?: number +} + +/** + * Flattens Gemini's ModalityTokenCount array into individual token fields. + * Extracts TEXT, IMAGE, AUDIO, VIDEO modality counts into a normalized structure. + */ +export function flattenModalityTokenCounts( + modalities?: Array, +): FlattenedModalityTokens { + if (!modalities || modalities.length === 0) { + return {} + } + + const result: FlattenedModalityTokens = {} + + for (const item of modalities) { + if (!item.modality || item.tokenCount === undefined) { + continue + } + + const modality = item.modality.toUpperCase() + const count = item.tokenCount + + switch (modality) { + case 'TEXT': + result.textTokens = (result.textTokens ?? 0) + count + break + case 'IMAGE': + result.imageTokens = (result.imageTokens ?? 0) + count + break + case 'AUDIO': + result.audioTokens = (result.audioTokens ?? 0) + count + break + case 'VIDEO': + result.videoTokens = (result.videoTokens ?? 0) + count + break + } + } + + return result +} + +/** + * Checks if a FlattenedModalityTokens object has any values set. + */ +export function hasModalityTokens(tokens: FlattenedModalityTokens): boolean { + return ( + tokens.textTokens !== undefined || + tokens.imageTokens !== undefined || + tokens.audioTokens !== undefined || + tokens.videoTokens !== undefined + ) +} + +/** + * Gemini-specific provider usage details. + * These fields are unique to Gemini and placed in providerUsageDetails. + */ +export interface GeminiProviderUsageDetails { + /** + * The traffic type for this request. + * Can indicate whether request was handled by different service tiers. + */ + trafficType?: string + /** + * Number of tokens in the results from tool executions, + * which are provided back to the model as input. + */ + toolUsePromptTokenCount?: number + /** + * Detailed breakdown by modality of the token counts from + * the results of tool executions. + */ + toolUsePromptTokensDetails?: Array<{ + modality: string + tokenCount: number + }> + /** + * Detailed breakdown of cache tokens by modality. + * More granular than the normalized cachedTokens field. + */ + cacheTokensDetails?: Array<{ + modality: string + tokenCount: number + }> + /** Index signature for Record compatibility */ + [key: string]: unknown +} + +/** + * Build normalized TokenUsage from Gemini's usageMetadata. + * Handles modality breakdowns and thinking tokens. + */ +export function buildGeminiUsage( + usageMetadata: GenerateContentResponseUsageMetadata | undefined, +): TokenUsage { + const promptTokens = usageMetadata?.promptTokenCount ?? 0 + const completionTokens = usageMetadata?.candidatesTokenCount ?? 0 + + const result = buildBaseUsage({ + promptTokens: promptTokens, + completionTokens: completionTokens, + totalTokens: + usageMetadata?.totalTokenCount ?? promptTokens + completionTokens, + }) + + // Add prompt token details + // Flatten modality breakdown for prompt + const promptModalities = flattenModalityTokenCounts( + usageMetadata?.promptTokensDetails, + ) + const cachedTokens = usageMetadata?.cachedContentTokenCount + + const promptTokensDetails = { + ...(hasModalityTokens(promptModalities) ? promptModalities : {}), + ...(cachedTokens !== undefined && cachedTokens > 0 ? { cachedTokens } : {}), + } + + // Add completion token details + // Flatten modality breakdown for candidates (output) + const completionModalities = flattenModalityTokenCounts( + usageMetadata?.candidatesTokensDetails, + ) + const thoughtsTokens = usageMetadata?.thoughtsTokenCount + + const completionTokensDetails = { + ...(hasModalityTokens(completionModalities) ? completionModalities : {}), + // Map thoughtsTokenCount to reasoningTokens for consistency with OpenAI + ...(thoughtsTokens !== undefined && thoughtsTokens > 0 + ? { reasoningTokens: thoughtsTokens } + : {}), + } + + // Add provider-specific details + const providerDetails: GeminiProviderUsageDetails = { + ...(usageMetadata?.trafficType + ? { trafficType: usageMetadata.trafficType } + : {}), + ...(usageMetadata?.toolUsePromptTokenCount !== undefined && + usageMetadata.toolUsePromptTokenCount > 0 + ? { toolUsePromptTokenCount: usageMetadata.toolUsePromptTokenCount } + : {}), + ...(usageMetadata?.toolUsePromptTokensDetails && + usageMetadata.toolUsePromptTokensDetails.length > 0 + ? { + toolUsePromptTokensDetails: + usageMetadata.toolUsePromptTokensDetails.map((item) => ({ + modality: item.modality || 'UNKNOWN', + tokenCount: item.tokenCount ?? 0, + })), + } + : {}), + ...(usageMetadata?.cacheTokensDetails && + usageMetadata.cacheTokensDetails.length > 0 + ? { + cacheTokensDetails: usageMetadata.cacheTokensDetails.map((item) => ({ + modality: item.modality || 'UNKNOWN', + tokenCount: item.tokenCount ?? 0, + })), + } + : {}), + } + + // Add prompt token details if available + if (Object.keys(promptTokensDetails).length > 0) { + result.promptTokensDetails = promptTokensDetails + } + // Add provider details if available + if (Object.keys(providerDetails).length > 0) { + result.providerUsageDetails = providerDetails + } + // Add completion token details if available + if (Object.keys(completionTokensDetails).length > 0) { + result.completionTokensDetails = completionTokensDetails + } + + return result +} diff --git a/packages/typescript/ai-gemini/tests/flatten-modality-tokens.test.ts b/packages/typescript/ai-gemini/tests/flatten-modality-tokens.test.ts new file mode 100644 index 00000000..8655ee60 --- /dev/null +++ b/packages/typescript/ai-gemini/tests/flatten-modality-tokens.test.ts @@ -0,0 +1,132 @@ +import { MediaModality } from '@google/genai' +import { describe, expect, it } from 'vitest' +import { flattenModalityTokenCounts, hasModalityTokens } from '../src/usage' + +describe('flattenModalityTokenCounts', () => { + it('returns empty object for undefined input', () => { + expect(flattenModalityTokenCounts(undefined)).toEqual({}) + }) + + it('returns empty object for empty array', () => { + expect(flattenModalityTokenCounts([])).toEqual({}) + }) + + it('extracts TEXT modality tokens', () => { + const result = flattenModalityTokenCounts([ + { modality: MediaModality.TEXT, tokenCount: 100 }, + ]) + expect(result).toEqual({ textTokens: 100 }) + }) + + it('extracts IMAGE modality tokens', () => { + const result = flattenModalityTokenCounts([ + { modality: MediaModality.IMAGE, tokenCount: 50 }, + ]) + expect(result).toEqual({ imageTokens: 50 }) + }) + + it('extracts AUDIO modality tokens', () => { + const result = flattenModalityTokenCounts([ + { modality: MediaModality.AUDIO, tokenCount: 200 }, + ]) + expect(result).toEqual({ audioTokens: 200 }) + }) + + it('extracts VIDEO modality tokens', () => { + const result = flattenModalityTokenCounts([ + { modality: MediaModality.VIDEO, tokenCount: 150 }, + ]) + expect(result).toEqual({ videoTokens: 150 }) + }) + + it('handles multiple modalities', () => { + const result = flattenModalityTokenCounts([ + { modality: MediaModality.TEXT, tokenCount: 100 }, + { modality: MediaModality.IMAGE, tokenCount: 50 }, + { modality: MediaModality.AUDIO, tokenCount: 25 }, + ]) + expect(result).toEqual({ + textTokens: 100, + imageTokens: 50, + audioTokens: 25, + }) + }) + + it('handles case-insensitive modality names', () => { + const result = flattenModalityTokenCounts([ + { modality: 'text' as MediaModality, tokenCount: 100 }, + { modality: 'Image' as MediaModality, tokenCount: 50 }, + ]) + expect(result).toEqual({ + textTokens: 100, + imageTokens: 50, + }) + }) + + it('aggregates duplicate modality entries', () => { + const result = flattenModalityTokenCounts([ + { modality: MediaModality.TEXT, tokenCount: 100 }, + { modality: MediaModality.TEXT, tokenCount: 50 }, + ]) + expect(result).toEqual({ textTokens: 150 }) + }) + + it('ignores unknown modalities', () => { + const result = flattenModalityTokenCounts([ + { modality: MediaModality.TEXT, tokenCount: 100 }, + { modality: 'UNKNOWN' as MediaModality, tokenCount: 999 }, + ]) + expect(result).toEqual({ textTokens: 100 }) + }) + + it('skips entries with undefined modality', () => { + const result = flattenModalityTokenCounts([ + { modality: undefined, tokenCount: 100 }, + { modality: MediaModality.TEXT, tokenCount: 50 }, + ]) + expect(result).toEqual({ textTokens: 50 }) + }) + + it('skips entries with undefined tokenCount', () => { + const result = flattenModalityTokenCounts([ + { modality: MediaModality.TEXT, tokenCount: undefined }, + { modality: MediaModality.IMAGE, tokenCount: 50 }, + ]) + expect(result).toEqual({ imageTokens: 50 }) + }) +}) + +describe('hasModalityTokens', () => { + it('returns false for empty object', () => { + expect(hasModalityTokens({})).toBe(false) + }) + + it('returns true when textTokens is defined', () => { + expect(hasModalityTokens({ textTokens: 100 })).toBe(true) + }) + + it('returns true when imageTokens is defined', () => { + expect(hasModalityTokens({ imageTokens: 50 })).toBe(true) + }) + + it('returns true when audioTokens is defined', () => { + expect(hasModalityTokens({ audioTokens: 25 })).toBe(true) + }) + + it('returns true when videoTokens is defined', () => { + expect(hasModalityTokens({ videoTokens: 75 })).toBe(true) + }) + + it('returns true when multiple tokens are defined', () => { + expect( + hasModalityTokens({ + textTokens: 100, + imageTokens: 50, + }), + ).toBe(true) + }) + + it('returns true when token count is zero', () => { + expect(hasModalityTokens({ textTokens: 0 })).toBe(true) + }) +}) diff --git a/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts b/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts index 1f000171..b45f8d7d 100644 --- a/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts +++ b/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts @@ -1,16 +1,16 @@ -import { describe, it, expect, beforeEach, vi } from 'vitest' +import { beforeEach, describe, expect, it, vi } from 'vitest' import { chat, summarize } from '@tanstack/ai' -import type { Tool, StreamChunk } from '@tanstack/ai' -import { - Type, - type HarmBlockThreshold, - type HarmCategory, - type SafetySetting, -} from '@google/genai' +import { Type } from '@google/genai' import { GeminiTextAdapter } from '../src/adapters/text' import { GeminiSummarizeAdapter } from '../src/adapters/summarize' +import type { + HarmBlockThreshold, + HarmCategory, + SafetySetting, + Schema, +} from '@google/genai' +import type { StreamChunk, Tool } from '@tanstack/ai' import type { GeminiTextProviderOptions } from '../src/adapters/text' -import type { Schema } from '@google/genai' const mocks = vi.hoisted(() => { return { @@ -61,6 +61,7 @@ const weatherTool: Tool = { } const createStream = (chunks: Array>) => { + // eslint-disable-next-line @typescript-eslint/require-await return (async function* () { for (const chunk of chunks) { yield chunk @@ -112,7 +113,7 @@ describe('GeminiAdapter through AI', () => { } expect(mocks.generateContentStreamSpy).toHaveBeenCalledTimes(1) - const [payload] = mocks.generateContentStreamSpy.mock.calls[0] + const [payload] = mocks.generateContentStreamSpy.mock.calls[0] as any expect(payload.model).toBe('gemini-2.5-pro') expect(payload.config).toMatchObject({ temperature: 0.4, @@ -148,7 +149,7 @@ describe('GeminiAdapter through AI', () => { mocks.generateContentStreamSpy.mockResolvedValue(createStream(streamChunks)) - const safetySettings: SafetySetting[] = [ + const safetySettings: Array = [ { category: 'HARM_CATEGORY_HATE_SPEECH' as HarmCategory, threshold: 'BLOCK_LOW_AND_ABOVE' as HarmBlockThreshold, @@ -171,35 +172,35 @@ describe('GeminiAdapter through AI', () => { const providerOptions: GeminiTextProviderOptions = { safetySettings, - generationConfig: { - stopSequences: ['', '###'], - responseMimeType: 'application/json', - responseSchema, - responseJsonSchema, - responseModalities: ['TEXT'], - candidateCount: 2, - topK: 6, - seed: 7, - presencePenalty: 0.2, - frequencyPenalty: 0.4, - responseLogprobs: true, - logprobs: 3, - enableEnhancedCivicAnswers: true, - speechConfig: { - voiceConfig: { - prebuiltVoiceConfig: { - voiceName: 'Studio', - }, + + stopSequences: ['', '###'], + responseMimeType: 'application/json', + responseSchema, + responseJsonSchema, + responseModalities: ['TEXT'], + candidateCount: 2, + topK: 6, + seed: 7, + presencePenalty: 0.2, + frequencyPenalty: 0.4, + responseLogprobs: true, + logprobs: 3, + enableEnhancedCivicAnswers: true, + speechConfig: { + voiceConfig: { + prebuiltVoiceConfig: { + voiceName: 'Studio', }, }, - thinkingConfig: { - includeThoughts: true, - thinkingBudget: 128, - }, - imageConfig: { - aspectRatio: '1:1', - }, }, + thinkingConfig: { + includeThoughts: true, + thinkingBudget: 128, + }, + imageConfig: { + aspectRatio: '1:1', + }, + cachedContent: 'cachedContents/weather-context', } as const @@ -219,7 +220,7 @@ describe('GeminiAdapter through AI', () => { } expect(mocks.generateContentStreamSpy).toHaveBeenCalledTimes(1) - const [payload] = mocks.generateContentStreamSpy.mock.calls[0] + const [payload] = mocks.generateContentStreamSpy.mock.calls[0] as any const config = payload.config expect(config.temperature).toBe(0.61) @@ -227,28 +228,28 @@ describe('GeminiAdapter through AI', () => { expect(config.maxOutputTokens).toBe(512) expect(config.cachedContent).toBe(providerOptions.cachedContent) expect(config.safetySettings).toEqual(providerOptions.safetySettings) - expect(config.stopSequences).toEqual(providerOptions?.stopSequences) - expect(config.responseMimeType).toBe(providerOptions?.responseMimeType) - expect(config.responseSchema).toEqual(providerOptions?.responseSchema) + expect(config.stopSequences).toEqual(providerOptions.stopSequences) + expect(config.responseMimeType).toBe(providerOptions.responseMimeType) + expect(config.responseSchema).toEqual(providerOptions.responseSchema) expect(config.responseJsonSchema).toEqual( - providerOptions?.responseJsonSchema, + providerOptions.responseJsonSchema, ) expect(config.responseModalities).toEqual( - providerOptions?.responseModalities, + providerOptions.responseModalities, ) - expect(config.candidateCount).toBe(providerOptions?.candidateCount) - expect(config.topK).toBe(providerOptions?.topK) - expect(config.seed).toBe(providerOptions?.seed) - expect(config.presencePenalty).toBe(providerOptions?.presencePenalty) - expect(config.frequencyPenalty).toBe(providerOptions?.frequencyPenalty) - expect(config.responseLogprobs).toBe(providerOptions?.responseLogprobs) - expect(config.logprobs).toBe(providerOptions?.logprobs) + expect(config.candidateCount).toBe(providerOptions.candidateCount) + expect(config.topK).toBe(providerOptions.topK) + expect(config.seed).toBe(providerOptions.seed) + expect(config.presencePenalty).toBe(providerOptions.presencePenalty) + expect(config.frequencyPenalty).toBe(providerOptions.frequencyPenalty) + expect(config.responseLogprobs).toBe(providerOptions.responseLogprobs) + expect(config.logprobs).toBe(providerOptions.logprobs) expect(config.enableEnhancedCivicAnswers).toBe( - providerOptions?.enableEnhancedCivicAnswers, + providerOptions.enableEnhancedCivicAnswers, ) - expect(config.speechConfig).toEqual(providerOptions?.speechConfig) - expect(config.thinkingConfig).toEqual(providerOptions?.thinkingConfig) - expect(config.imageConfig).toEqual(providerOptions?.imageConfig) + expect(config.speechConfig).toEqual(providerOptions.speechConfig) + expect(config.thinkingConfig).toEqual(providerOptions.thinkingConfig) + expect(config.imageConfig).toEqual(providerOptions.imageConfig) }) it('streams chat chunks using mapped provider config', async () => { @@ -282,7 +283,7 @@ describe('GeminiAdapter through AI', () => { mocks.generateContentStreamSpy.mockResolvedValue(createStream(streamChunks)) const adapter = createTextAdapter() - const received: StreamChunk[] = [] + const received: Array = [] for await (const chunk of chat({ adapter, messages: [{ role: 'user', content: 'Tell me a joke' }], @@ -295,7 +296,7 @@ describe('GeminiAdapter through AI', () => { } expect(mocks.generateContentStreamSpy).toHaveBeenCalledTimes(1) - const [streamPayload] = mocks.generateContentStreamSpy.mock.calls[0] + const [streamPayload] = mocks.generateContentStreamSpy.mock.calls[0] as any expect(streamPayload.config?.topK).toBe(3) expect(received[0]).toMatchObject({ type: 'content', @@ -337,7 +338,7 @@ describe('GeminiAdapter through AI', () => { }) expect(mocks.generateContentSpy).toHaveBeenCalledTimes(1) - const [payload] = mocks.generateContentSpy.mock.calls[0] + const [payload] = mocks.generateContentSpy.mock.calls[0] as any expect(payload.model).toBe('gemini-2.0-flash') expect(payload.config.systemInstruction).toContain('summarizes text') expect(payload.config.systemInstruction).toContain('123 tokens') diff --git a/packages/typescript/ai-gemini/tests/image-adapter.test.ts b/packages/typescript/ai-gemini/tests/image-adapter.test.ts index b990fdc7..f01f8ccf 100644 --- a/packages/typescript/ai-gemini/tests/image-adapter.test.ts +++ b/packages/typescript/ai-gemini/tests/image-adapter.test.ts @@ -1,4 +1,4 @@ -import { describe, it, expect, vi } from 'vitest' +import { describe, expect, it, vi } from 'vitest' import { GeminiImageAdapter, createGeminiImage } from '../src/adapters/image' import { sizeToAspectRatio, @@ -129,7 +129,10 @@ describe('Gemini Image Adapter', () => { const mockGenerateImages = vi.fn().mockResolvedValueOnce(mockResponse) - const adapter = createGeminiImage('test-api-key') + const adapter = createGeminiImage( + 'gemini-2.5-flash-image', + 'test-api-key', + ) // Replace the internal Gemini SDK client with our mock ;( adapter as unknown as { @@ -159,7 +162,7 @@ describe('Gemini Image Adapter', () => { expect(result.model).toBe('imagen-3.0-generate-002') expect(result.images).toHaveLength(1) - expect(result.images[0].b64Json).toBe('base64encodedimage') + expect(result.images[0]?.b64Json).toBe('base64encodedimage') }) it('generates a unique ID for each response', async () => { @@ -169,7 +172,10 @@ describe('Gemini Image Adapter', () => { const mockGenerateImages = vi.fn().mockResolvedValue(mockResponse) - const adapter = createGeminiImage('test-api-key') + const adapter = createGeminiImage( + 'gemini-2.5-flash-image', + 'test-api-key', + ) ;( adapter as unknown as { client: { models: { generateImages: unknown } } diff --git a/packages/typescript/ai-gemini/tests/model-meta.test.ts b/packages/typescript/ai-gemini/tests/model-meta.test.ts index 05a59ba4..a989bd44 100644 --- a/packages/typescript/ai-gemini/tests/model-meta.test.ts +++ b/packages/typescript/ai-gemini/tests/model-meta.test.ts @@ -1,24 +1,12 @@ -import { describe, it, expectTypeOf } from 'vitest' +import { describe, expectTypeOf, it } from 'vitest' +import type { GeminiChatModelProviderOptionsByName } from '../src/model-meta' import type { - GeminiChatModelProviderOptionsByName, - GeminiModelInputModalitiesByName, -} from '../src/model-meta' -import type { - GeminiThinkingOptions, + GeminiCachedContentOptions, + GeminiSafetyOptions, GeminiStructuredOutputOptions, + GeminiThinkingOptions, GeminiToolConfigOptions, - GeminiSafetyOptions, - GeminiGenerationConfigOptions, - GeminiCachedContentOptions, } from '../src/text/text-provider-options' -import type { - AudioPart, - ConstrainedModelMessage, - DocumentPart, - ImagePart, - TextPart, - VideoPart, -} from '@tanstack/ai' /** * Type assertion tests for Gemini model provider options. @@ -33,7 +21,6 @@ import type { // Base options that ALL chat models should have type BaseOptions = GeminiToolConfigOptions & GeminiSafetyOptions & - GeminiGenerationConfigOptions & GeminiCachedContentOptions describe('Gemini Model Provider Options Type Assertions', () => { @@ -52,7 +39,6 @@ describe('Gemini Model Provider Options Type Assertions', () => { expectTypeOf().toExtend() // Verify specific properties exist - expectTypeOf().toHaveProperty('generationConfig') expectTypeOf().toHaveProperty('safetySettings') expectTypeOf().toHaveProperty('toolConfig') expectTypeOf().toHaveProperty('cachedContent') @@ -74,7 +60,6 @@ describe('Gemini Model Provider Options Type Assertions', () => { expectTypeOf().toExtend() // Verify specific properties exist - expectTypeOf().toHaveProperty('generationConfig') expectTypeOf().toHaveProperty('safetySettings') expectTypeOf().toHaveProperty('toolConfig') expectTypeOf().toHaveProperty('cachedContent') @@ -159,7 +144,6 @@ describe('Gemini Model Provider Options Type Assertions', () => { type Options = GeminiChatModelProviderOptionsByName[Model] // Should NOT have thinking options - verify it's not assignable - // GeminiThinkingOptions has generationConfig.thinkingConfig which should not exist expectTypeOf().not.toExtend() // Should have structured output options @@ -206,12 +190,11 @@ describe('Gemini Model Provider Options Type Assertions', () => { }) describe('Detailed property type assertions', () => { - it('thinking models should allow thinkingConfig in generationConfig', () => { + it('thinking models should have thinkingConfig option', () => { type Options = GeminiChatModelProviderOptionsByName['gemini-2.5-pro'] - // The generationConfig should include thinkingConfig from GeminiGenerationConfigOptions - // which intersects with GeminiThinkingOptions - expectTypeOf().toHaveProperty('generationConfig') + // Thinking models should have the thinkingConfig option + expectTypeOf().toHaveProperty('thinkingConfig') }) it('structured output options should have responseMimeType and responseSchema', () => { @@ -378,158 +361,3 @@ describe('Gemini Model Provider Options Type Assertions', () => { }) }) }) - -/** - * Gemini Model Input Modality Type Assertions - * - * These tests verify that ConstrainedModelMessage correctly restricts - * content parts based on each Gemini model's supported input modalities. - * - * Models with full multimodal (text + image + audio + video + document): - * - gemini-3-pro-preview - * - gemini-2.5-pro - * - gemini-2.5-flash-lite (and preview) - * - * Models with limited multimodal (text + image + audio + video, NO document): - * - gemini-2.5-flash (and preview) - * - gemini-2.0-flash (and lite) - */ -describe('Gemini Model Input Modality Type Assertions', () => { - // Helper type for creating a user message with specific content - type MessageWithContent = { role: 'user'; content: Array } - - // ===== Full Multimodal Models (text + image + audio + video + document) ===== - - describe('gemini-3-pro-preview (full multimodal)', () => { - type Modalities = GeminiModelInputModalitiesByName['gemini-3-pro-preview'] - type Message = ConstrainedModelMessage - - it('should allow all content part types', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - }) - - describe('gemini-3-flash-preview (full multimodal)', () => { - type Modalities = GeminiModelInputModalitiesByName['gemini-3-flash-preview'] - type Message = ConstrainedModelMessage - - it('should allow all content part types', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - }) - - describe('gemini-2.5-pro (full multimodal)', () => { - type Modalities = GeminiModelInputModalitiesByName['gemini-2.5-pro'] - type Message = ConstrainedModelMessage - - it('should allow all content part types', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - }) - - describe('gemini-2.5-flash-lite (full multimodal)', () => { - type Modalities = GeminiModelInputModalitiesByName['gemini-2.5-flash-lite'] - type Message = ConstrainedModelMessage - - it('should allow all content part types', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - }) - - describe('gemini-2.5-flash-lite-preview-09-2025 (full multimodal)', () => { - type Modalities = - GeminiModelInputModalitiesByName['gemini-2.5-flash-lite-preview-09-2025'] - type Message = ConstrainedModelMessage - - it('should allow all content part types', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - }) - - // ===== Limited Multimodal Models (text + image + audio + video, NO document) ===== - - describe('gemini-2.5-flash (no document)', () => { - type Modalities = GeminiModelInputModalitiesByName['gemini-2.5-flash'] - type Message = ConstrainedModelMessage - - it('should allow TextPart, ImagePart, AudioPart, and VideoPart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow DocumentPart', () => { - expectTypeOf>().not.toExtend() - }) - }) - - describe('gemini-2.5-flash-preview-09-2025 (no document)', () => { - type Modalities = - GeminiModelInputModalitiesByName['gemini-2.5-flash-preview-09-2025'] - type Message = ConstrainedModelMessage - - it('should allow TextPart, ImagePart, AudioPart, and VideoPart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow DocumentPart', () => { - expectTypeOf>().not.toExtend() - }) - }) - - describe('gemini-2.0-flash (no document)', () => { - type Modalities = GeminiModelInputModalitiesByName['gemini-2.0-flash'] - type Message = ConstrainedModelMessage - - it('should allow TextPart, ImagePart, AudioPart, and VideoPart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow DocumentPart', () => { - expectTypeOf>().not.toExtend() - }) - }) - - describe('gemini-2.0-flash-lite (no document)', () => { - type Modalities = GeminiModelInputModalitiesByName['gemini-2.0-flash-lite'] - type Message = ConstrainedModelMessage - - it('should allow TextPart, ImagePart, AudioPart, and VideoPart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow DocumentPart', () => { - expectTypeOf>().not.toExtend() - }) - }) -}) diff --git a/packages/typescript/ai-gemini/tests/usage-extraction.test.ts b/packages/typescript/ai-gemini/tests/usage-extraction.test.ts new file mode 100644 index 00000000..7ee46db5 --- /dev/null +++ b/packages/typescript/ai-gemini/tests/usage-extraction.test.ts @@ -0,0 +1,353 @@ +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { chat } from '@tanstack/ai' +import { GeminiTextAdapter } from '../src/adapters/text' +import type { StreamChunk } from '@tanstack/ai' + +const mocks = vi.hoisted(() => { + return { + constructorSpy: vi.fn<(options: { apiKey: string }) => void>(), + generateContentStreamSpy: vi.fn(), + } +}) + +vi.mock('@google/genai', async () => { + const { constructorSpy, generateContentStreamSpy } = mocks + + const actual = await vi.importActual('@google/genai') + class MockGoogleGenAI { + public models = { + generateContentStream: generateContentStreamSpy, + } + + constructor(options: { apiKey: string }) { + constructorSpy(options) + } + } + + return { + GoogleGenAI: MockGoogleGenAI, + Type: actual.Type, + FinishReason: actual.FinishReason, + } +}) + +const createAdapter = () => + new GeminiTextAdapter({ apiKey: 'test-key' }, 'gemini-2.5-pro') + +function createMockStream( + chunks: Array>, +): AsyncIterable> { + return { + // eslint-disable-next-line @typescript-eslint/require-await + async *[Symbol.asyncIterator]() { + for (const chunk of chunks) { + yield chunk + } + }, + } +} + +describe('Gemini usage extraction', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it('extracts basic token usage from usageMetadata', async () => { + const mockStream = createMockStream([ + { + candidates: [ + { + content: { + parts: [{ text: 'Hello world' }], + }, + finishReason: 'STOP', + }, + ], + usageMetadata: { + promptTokenCount: 100, + candidatesTokenCount: 50, + totalTokenCount: 150, + }, + }, + ]) + + mocks.generateContentStreamSpy.mockResolvedValueOnce(mockStream) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage).toMatchObject({ + promptTokens: 100, + completionTokens: 50, + totalTokens: 150, + }) + }) + + it('extracts cached content token count', async () => { + const mockStream = createMockStream([ + { + candidates: [ + { + content: { + parts: [{ text: 'Hello world' }], + }, + finishReason: 'STOP', + }, + ], + usageMetadata: { + promptTokenCount: 100, + candidatesTokenCount: 50, + totalTokenCount: 150, + cachedContentTokenCount: 25, + }, + }, + ]) + + mocks.generateContentStreamSpy.mockResolvedValueOnce(mockStream) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage?.promptTokensDetails).toMatchObject({ + cachedTokens: 25, + }) + }) + + it('extracts thoughts/reasoning tokens', async () => { + const mockStream = createMockStream([ + { + candidates: [ + { + content: { + parts: [{ text: 'Hello world' }], + }, + finishReason: 'STOP', + }, + ], + usageMetadata: { + promptTokenCount: 100, + candidatesTokenCount: 50, + totalTokenCount: 150, + thoughtsTokenCount: 30, + }, + }, + ]) + + mocks.generateContentStreamSpy.mockResolvedValueOnce(mockStream) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage?.completionTokensDetails).toMatchObject({ + reasoningTokens: 30, + }) + }) + + it('extracts modality token breakdown for prompt', async () => { + const mockStream = createMockStream([ + { + candidates: [ + { + content: { + parts: [{ text: 'Hello world' }], + }, + finishReason: 'STOP', + }, + ], + usageMetadata: { + promptTokenCount: 150, + candidatesTokenCount: 50, + totalTokenCount: 200, + promptTokensDetails: [ + { modality: 'TEXT', tokenCount: 100 }, + { modality: 'IMAGE', tokenCount: 50 }, + ], + }, + }, + ]) + + mocks.generateContentStreamSpy.mockResolvedValueOnce(mockStream) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage?.promptTokensDetails).toMatchObject({ + textTokens: 100, + imageTokens: 50, + }) + }) + + it('extracts modality token breakdown for completion', async () => { + const mockStream = createMockStream([ + { + candidates: [ + { + content: { + parts: [{ text: 'Hello world' }], + }, + finishReason: 'STOP', + }, + ], + usageMetadata: { + promptTokenCount: 100, + candidatesTokenCount: 80, + totalTokenCount: 180, + candidatesTokensDetails: [ + { modality: 'TEXT', tokenCount: 50 }, + { modality: 'AUDIO', tokenCount: 30 }, + ], + }, + }, + ]) + + mocks.generateContentStreamSpy.mockResolvedValueOnce(mockStream) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage?.completionTokensDetails).toMatchObject({ + textTokens: 50, + audioTokens: 30, + }) + }) + + it('extracts provider-specific traffic type', async () => { + const mockStream = createMockStream([ + { + candidates: [ + { + content: { + parts: [{ text: 'Hello world' }], + }, + finishReason: 'STOP', + }, + ], + usageMetadata: { + promptTokenCount: 100, + candidatesTokenCount: 50, + totalTokenCount: 150, + trafficType: 'ON_DEMAND', + }, + }, + ]) + + mocks.generateContentStreamSpy.mockResolvedValueOnce(mockStream) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage?.providerUsageDetails).toMatchObject({ + trafficType: 'ON_DEMAND', + }) + }) + + it('extracts tool use prompt token count', async () => { + const mockStream = createMockStream([ + { + candidates: [ + { + content: { + parts: [{ text: 'Hello world' }], + }, + finishReason: 'STOP', + }, + ], + usageMetadata: { + promptTokenCount: 100, + candidatesTokenCount: 50, + totalTokenCount: 150, + toolUsePromptTokenCount: 20, + }, + }, + ]) + + mocks.generateContentStreamSpy.mockResolvedValueOnce(mockStream) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage?.providerUsageDetails).toMatchObject({ + toolUsePromptTokenCount: 20, + }) + }) + + it('handles response with no usage metadata', async () => { + const mockStream = createMockStream([ + { + candidates: [ + { + content: { + parts: [{ text: 'Hello world' }], + }, + finishReason: 'STOP', + }, + ], + // No usageMetadata + }, + ]) + + mocks.generateContentStreamSpy.mockResolvedValueOnce(mockStream) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + // When no usageMetadata is provided, usage is undefined + expect(doneChunk?.usage).toBeUndefined() + }) +}) diff --git a/packages/typescript/ai-gemini/tsconfig.json b/packages/typescript/ai-gemini/tsconfig.json index ea11c109..9028fa3b 100644 --- a/packages/typescript/ai-gemini/tsconfig.json +++ b/packages/typescript/ai-gemini/tsconfig.json @@ -1,9 +1,8 @@ { "extends": "../../../tsconfig.json", "compilerOptions": { - "outDir": "dist", - "rootDir": "src" + "outDir": "dist" }, - "include": ["src/**/*.ts", "src/**/*.tsx"], + "include": ["src/**/*.ts", "src/**/*.tsx", "tests/**/*.ts"], "exclude": ["node_modules", "dist", "**/*.config.ts"] } diff --git a/packages/typescript/ai-grok/src/adapters/text.ts b/packages/typescript/ai-grok/src/adapters/text.ts index bef2ffaf..54d47ce1 100644 --- a/packages/typescript/ai-grok/src/adapters/text.ts +++ b/packages/typescript/ai-grok/src/adapters/text.ts @@ -1,6 +1,7 @@ import { BaseTextAdapter } from '@tanstack/ai/adapters' import { validateTextProviderOptions } from '../text/text-provider-options' import { convertToolsToProviderFormat } from '../tools' +import { buildGrokUsage } from '../usage' import { createGrokClient, generateId, @@ -145,6 +146,7 @@ export class GrokTextAdapter< return { data: transformed, rawText, + usage: buildGrokUsage(response.usage), } } catch (error: unknown) { const err = error as Error @@ -257,13 +259,7 @@ export class GrokTextAdapter< id: responseId, model: chunk.model || options.model, timestamp, - usage: chunk.usage - ? { - promptTokens: chunk.usage.prompt_tokens || 0, - completionTokens: chunk.usage.completion_tokens || 0, - totalTokens: chunk.usage.total_tokens || 0, - } - : undefined, + usage: buildGrokUsage(chunk.usage), finishReason: choice.finish_reason === 'tool_calls' || toolCallsInProgress.size > 0 diff --git a/packages/typescript/ai-grok/src/usage.ts b/packages/typescript/ai-grok/src/usage.ts new file mode 100644 index 00000000..c9101209 --- /dev/null +++ b/packages/typescript/ai-grok/src/usage.ts @@ -0,0 +1,55 @@ +import { buildBaseUsage } from '@tanstack/ai' +import type { TokenUsage } from '@tanstack/ai' +import type OpenAI_SDK from 'openai' + +/** + * Build normalized TokenUsage from Grok's Chat Completions usage + * Uses same format as OpenAI Chat Completions (not Responses API) + */ +export function buildGrokUsage( + usage: OpenAI_SDK.Chat.Completions.ChatCompletion['usage'] | undefined | null, +): TokenUsage | undefined { + if (!usage) return undefined + + const result = buildBaseUsage({ + promptTokens: usage.prompt_tokens || 0, + completionTokens: usage.completion_tokens || 0, + totalTokens: usage.total_tokens || 0, + }) + + // Check for completion tokens details (reasoning tokens, etc.) + // Grok (via OpenAI-compatible API) may provide these for reasoning models + const completionDetails = usage.completion_tokens_details + + const completionTokensDetails = { + ...(completionDetails?.reasoning_tokens && + completionDetails.reasoning_tokens > 0 + ? { reasoningTokens: completionDetails.reasoning_tokens } + : {}), + ...(completionDetails?.audio_tokens && completionDetails.audio_tokens > 0 + ? { audioTokens: completionDetails.audio_tokens } + : {}), + } + + // Check for prompt tokens details (cached tokens, etc.) + const promptDetails = usage.prompt_tokens_details + + const promptTokensDetails = { + ...(promptDetails?.cached_tokens && promptDetails.cached_tokens > 0 + ? { cachedTokens: promptDetails.cached_tokens } + : {}), + ...(promptDetails?.audio_tokens && promptDetails.audio_tokens > 0 + ? { audioTokens: promptDetails.audio_tokens } + : {}), + } + + // Add details only if non-empty + if (Object.keys(completionTokensDetails).length > 0) { + result.completionTokensDetails = completionTokensDetails + } + if (Object.keys(promptTokensDetails).length > 0) { + result.promptTokensDetails = promptTokensDetails + } + + return result +} diff --git a/packages/typescript/ai-grok/tests/grok-adapter.test.ts b/packages/typescript/ai-grok/tests/grok-adapter.test.ts index 09373f50..ed22104f 100644 --- a/packages/typescript/ai-grok/tests/grok-adapter.test.ts +++ b/packages/typescript/ai-grok/tests/grok-adapter.test.ts @@ -1,4 +1,4 @@ -import { describe, it, expect, vi, afterEach } from 'vitest' +import { afterEach, describe, expect, it, vi } from 'vitest' import { createGrokText, grokText } from '../src/adapters/text' import { createGrokImage, grokImage } from '../src/adapters/image' import { createGrokSummarize, grokSummarize } from '../src/adapters/summarize' @@ -21,11 +21,11 @@ describe('Grok adapters', () => { it('creates a text adapter from environment variable', () => { vi.stubEnv('XAI_API_KEY', 'env-api-key') - const adapter = grokText('grok-4-0709') + const adapter = grokText('grok-4') expect(adapter).toBeDefined() expect(adapter.kind).toBe('text') - expect(adapter.model).toBe('grok-4-0709') + expect(adapter.model).toBe('grok-4') }) it('throws if XAI_API_KEY is not set when using grokText', () => { @@ -84,7 +84,7 @@ describe('Grok adapters', () => { it('creates a summarize adapter from environment variable', () => { vi.stubEnv('XAI_API_KEY', 'env-api-key') - const adapter = grokSummarize('grok-4-0709') + const adapter = grokSummarize('grok-4') expect(adapter).toBeDefined() expect(adapter.kind).toBe('summarize') diff --git a/packages/typescript/ai-grok/tests/usage-extraction.test.ts b/packages/typescript/ai-grok/tests/usage-extraction.test.ts new file mode 100644 index 00000000..739e3601 --- /dev/null +++ b/packages/typescript/ai-grok/tests/usage-extraction.test.ts @@ -0,0 +1,322 @@ +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { chat } from '@tanstack/ai' +import { GrokTextAdapter } from '../src/adapters/text' +import type { StreamChunk } from '@tanstack/ai' + +const mocks = vi.hoisted(() => { + const chatCompletionsCreate = vi.fn() + return { chatCompletionsCreate } +}) + +vi.mock('openai', () => { + const { chatCompletionsCreate } = mocks + + class MockOpenAI { + chat = { + completions: { + create: chatCompletionsCreate, + }, + } + + constructor(_: { apiKey: string; baseURL?: string }) {} + } + + return { default: MockOpenAI } +}) + +const createAdapter = () => + new GrokTextAdapter({ apiKey: 'test-key' }, 'grok-3') + +function createMockStream( + chunks: Array>, +): AsyncIterable> { + return { + // eslint-disable-next-line @typescript-eslint/require-await + async *[Symbol.asyncIterator]() { + for (const chunk of chunks) { + yield chunk + } + }, + } +} + +describe('Grok usage extraction', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it('extracts basic token usage from chat completions', async () => { + const mockStream = createMockStream([ + { + id: 'chatcmpl-123', + choices: [ + { + delta: { content: 'Hello world' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 100, + completion_tokens: 50, + total_tokens: 150, + }, + }, + ]) + + mocks.chatCompletionsCreate.mockResolvedValueOnce(mockStream) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage).toMatchObject({ + promptTokens: 100, + completionTokens: 50, + totalTokens: 150, + }) + }) + + it('extracts prompt tokens details with cached tokens', async () => { + const mockStream = createMockStream([ + { + id: 'chatcmpl-123', + choices: [ + { + delta: { content: 'Hello world' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 100, + completion_tokens: 50, + total_tokens: 150, + prompt_tokens_details: { + cached_tokens: 25, + }, + }, + }, + ]) + + mocks.chatCompletionsCreate.mockResolvedValueOnce(mockStream) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage?.promptTokensDetails).toEqual({ + cachedTokens: 25, + }) + }) + + it('extracts completion tokens details with reasoning tokens', async () => { + const mockStream = createMockStream([ + { + id: 'chatcmpl-123', + choices: [ + { + delta: { content: 'Hello world' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 100, + completion_tokens: 50, + total_tokens: 150, + completion_tokens_details: { + reasoning_tokens: 30, + }, + }, + }, + ]) + + mocks.chatCompletionsCreate.mockResolvedValueOnce(mockStream) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage?.completionTokensDetails).toEqual({ + reasoningTokens: 30, + }) + }) + + it('extracts audio tokens in prompt details', async () => { + const mockStream = createMockStream([ + { + id: 'chatcmpl-123', + choices: [ + { + delta: { content: 'Hello world' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 100, + completion_tokens: 50, + total_tokens: 150, + prompt_tokens_details: { + audio_tokens: 15, + }, + }, + }, + ]) + + mocks.chatCompletionsCreate.mockResolvedValueOnce(mockStream) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage?.promptTokensDetails).toEqual({ + audioTokens: 15, + }) + }) + + it('handles response with no usage data', async () => { + const mockStream = createMockStream([ + { + id: 'chatcmpl-123', + choices: [ + { + delta: { content: 'Hello world' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + // No usage field + }, + ]) + + mocks.chatCompletionsCreate.mockResolvedValueOnce(mockStream) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage).toBeUndefined() + }) + + it('omits empty details when all values are zero', async () => { + const mockStream = createMockStream([ + { + id: 'chatcmpl-123', + choices: [ + { + delta: { content: 'Hello world' }, + finish_reason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + choices: [ + { + delta: {}, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 100, + completion_tokens: 50, + total_tokens: 150, + prompt_tokens_details: { + cached_tokens: 0, + audio_tokens: 0, + }, + completion_tokens_details: { + reasoning_tokens: 0, + }, + }, + }, + ]) + + mocks.chatCompletionsCreate.mockResolvedValueOnce(mockStream) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage?.promptTokensDetails).toBeUndefined() + expect(doneChunk?.usage?.completionTokensDetails).toBeUndefined() + }) +}) diff --git a/packages/typescript/ai-grok/tsconfig.json b/packages/typescript/ai-grok/tsconfig.json index ea11c109..2d3b235e 100644 --- a/packages/typescript/ai-grok/tsconfig.json +++ b/packages/typescript/ai-grok/tsconfig.json @@ -1,9 +1,8 @@ { "extends": "../../../tsconfig.json", "compilerOptions": { - "outDir": "dist", - "rootDir": "src" + "outDir": "dist" }, - "include": ["src/**/*.ts", "src/**/*.tsx"], + "include": ["src/**/*.ts", "src/**/*.tsx", "./tests/**/*.ts"], "exclude": ["node_modules", "dist", "**/*.config.ts"] } diff --git a/packages/typescript/ai-ollama/src/adapters/text.ts b/packages/typescript/ai-ollama/src/adapters/text.ts index 6fecd965..8ab7f4b2 100644 --- a/packages/typescript/ai-ollama/src/adapters/text.ts +++ b/packages/typescript/ai-ollama/src/adapters/text.ts @@ -1,5 +1,5 @@ import { BaseTextAdapter } from '@tanstack/ai/adapters' - +import { buildOllamaUsage } from '../usage' import { createOllamaClient, generateId, getOllamaHostFromEnv } from '../utils' import type { @@ -172,6 +172,7 @@ export class OllamaTextAdapter extends BaseTextAdapter< return { data: parsed, rawText, + usage: buildOllamaUsage(response), } } catch (error: unknown) { const err = error as Error @@ -228,6 +229,7 @@ export class OllamaTextAdapter extends BaseTextAdapter< model: chunk.model, timestamp, finishReason: 'tool_calls', + usage: buildOllamaUsage(chunk), } continue } @@ -237,6 +239,7 @@ export class OllamaTextAdapter extends BaseTextAdapter< model: chunk.model, timestamp, finishReason: hasEmittedToolCalls ? 'tool_calls' : 'stop', + usage: buildOllamaUsage(chunk), } continue } diff --git a/packages/typescript/ai-ollama/src/index.ts b/packages/typescript/ai-ollama/src/index.ts index 58431482..50256820 100644 --- a/packages/typescript/ai-ollama/src/index.ts +++ b/packages/typescript/ai-ollama/src/index.ts @@ -40,3 +40,6 @@ export type { OllamaChatModelOptionsByName, OllamaModelInputModalitiesByName, } from './model-meta' + +// Export provider usage types +export type { OllamaProviderUsageDetails } from './usage' diff --git a/packages/typescript/ai-ollama/src/usage.ts b/packages/typescript/ai-ollama/src/usage.ts new file mode 100644 index 00000000..58bf296d --- /dev/null +++ b/packages/typescript/ai-ollama/src/usage.ts @@ -0,0 +1,69 @@ +import { buildBaseUsage } from '@tanstack/ai' +import type { TokenUsage } from '@tanstack/ai' +import type { ChatResponse } from 'ollama' + +/** + * Ollama-specific provider usage details. + * These fields are unique to Ollama and placed in providerUsageDetails. + */ +export interface OllamaProviderUsageDetails { + /** Time spent loading the model in nanoseconds */ + loadDuration?: number + /** Time spent evaluating the prompt in nanoseconds */ + promptEvalDuration?: number + /** Time spent generating the response in nanoseconds */ + evalDuration?: number + /** Total duration of the request in nanoseconds */ + totalDuration?: number + /** Number of prompt evaluation steps */ + promptEvalCount?: number + /** Number of evaluation steps for generation */ + evalCount?: number + /** Index signature for Record compatibility */ + [key: string]: unknown +} + +/** + * Build normalized TokenUsage from Ollama's ChatResponse. + * Handles duration metrics as provider-specific details. + */ +export function buildOllamaUsage( + response: ChatResponse, +): TokenUsage | undefined { + // Ollama provides prompt_eval_count and eval_count + const promptTokens = response.prompt_eval_count + const completionTokens = response.eval_count + + // If no token counts are available, return undefined + if (promptTokens === 0 && completionTokens === 0) { + return undefined + } + + const result = buildBaseUsage({ + promptTokens, + completionTokens, + totalTokens: promptTokens + completionTokens, + }) + + // Add provider-specific duration details + const providerDetails = { + ...(response.load_duration > 0 + ? { loadDuration: response.load_duration } + : {}), + ...(response.prompt_eval_duration > 0 + ? { promptEvalDuration: response.prompt_eval_duration } + : {}), + ...(response.eval_duration > 0 + ? { evalDuration: response.eval_duration } + : {}), + ...(response.total_duration > 0 + ? { totalDuration: response.total_duration } + : {}), + } satisfies OllamaProviderUsageDetails + + if (Object.keys(providerDetails).length > 0) { + result.providerUsageDetails = providerDetails + } + + return result +} diff --git a/packages/typescript/ai-ollama/tests/usage-extraction.test.ts b/packages/typescript/ai-ollama/tests/usage-extraction.test.ts new file mode 100644 index 00000000..e134da6b --- /dev/null +++ b/packages/typescript/ai-ollama/tests/usage-extraction.test.ts @@ -0,0 +1,123 @@ +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { buildOllamaUsage } from '../src/usage' +import type { ChatResponse } from 'ollama' + +describe('Ollama usage extraction', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it('extracts basic token usage from response', () => { + const response = { + model: 'llama3.2', + message: { role: 'assistant', content: 'Hello world' }, + done: true, + prompt_eval_count: 100, + eval_count: 50, + total_duration: 1000000000, + load_duration: 100000000, + prompt_eval_duration: 200000000, + eval_duration: 700000000, + } as unknown as ChatResponse + + const usage = buildOllamaUsage(response) + + expect(usage).toBeDefined() + expect(usage).toMatchObject({ + promptTokens: 100, + completionTokens: 50, + totalTokens: 150, + }) + }) + + it('extracts provider-specific duration details', () => { + const response = { + model: 'llama3.2', + message: { role: 'assistant', content: 'Hello world' }, + done: true, + prompt_eval_count: 100, + eval_count: 50, + total_duration: 1000000000, + load_duration: 100000000, + prompt_eval_duration: 200000000, + eval_duration: 700000000, + } as unknown as ChatResponse + + const usage = buildOllamaUsage(response) + + expect(usage).toBeDefined() + expect(usage?.providerUsageDetails).toMatchObject({ + loadDuration: 100000000, + promptEvalDuration: 200000000, + evalDuration: 700000000, + totalDuration: 1000000000, + }) + }) + + it('handles response with zero token counts', () => { + const response = { + model: 'llama3.2', + message: { role: 'assistant', content: '' }, + done: true, + prompt_eval_count: 0, + eval_count: 0, + total_duration: 0, + load_duration: 0, + prompt_eval_duration: 0, + eval_duration: 0, + } as unknown as ChatResponse + + const usage = buildOllamaUsage(response) + + // When both token counts are 0, usage should be undefined + expect(usage).toBeUndefined() + }) + + it('omits provider details when durations are zero', () => { + const response = { + model: 'llama3.2', + message: { role: 'assistant', content: 'Hello world' }, + done: true, + prompt_eval_count: 100, + eval_count: 50, + total_duration: 0, + load_duration: 0, + prompt_eval_duration: 0, + eval_duration: 0, + } as unknown as ChatResponse + + const usage = buildOllamaUsage(response) + + expect(usage).toBeDefined() + expect(usage).toMatchObject({ + promptTokens: 100, + completionTokens: 50, + totalTokens: 150, + }) + // providerUsageDetails should be undefined when all durations are 0 + expect(usage?.providerUsageDetails).toBeUndefined() + }) + + it('extracts partial duration details', () => { + const response = { + model: 'llama3.2', + message: { role: 'assistant', content: 'Hello world' }, + done: true, + prompt_eval_count: 100, + eval_count: 50, + total_duration: 1000000000, + load_duration: 0, + prompt_eval_duration: 200000000, + eval_duration: 0, + } as unknown as ChatResponse + + const usage = buildOllamaUsage(response) + + expect(usage).toBeDefined() + // Should only have non-zero duration fields + expect(usage?.providerUsageDetails).toEqual({ + promptEvalDuration: 200000000, + totalDuration: 1000000000, + }) + }) +}) diff --git a/packages/typescript/ai-ollama/tsconfig.json b/packages/typescript/ai-ollama/tsconfig.json index ea11c109..2d3b235e 100644 --- a/packages/typescript/ai-ollama/tsconfig.json +++ b/packages/typescript/ai-ollama/tsconfig.json @@ -1,9 +1,8 @@ { "extends": "../../../tsconfig.json", "compilerOptions": { - "outDir": "dist", - "rootDir": "src" + "outDir": "dist" }, - "include": ["src/**/*.ts", "src/**/*.tsx"], + "include": ["src/**/*.ts", "src/**/*.tsx", "./tests/**/*.ts"], "exclude": ["node_modules", "dist", "**/*.config.ts"] } diff --git a/packages/typescript/ai-openai/src/adapters/text.ts b/packages/typescript/ai-openai/src/adapters/text.ts index 8ca7cfd5..0fa9cd76 100644 --- a/packages/typescript/ai-openai/src/adapters/text.ts +++ b/packages/typescript/ai-openai/src/adapters/text.ts @@ -1,6 +1,7 @@ import { BaseTextAdapter } from '@tanstack/ai/adapters' import { validateTextProviderOptions } from '../text/text-provider-options' import { convertToolsToProviderFormat } from '../tools' +import { buildOpenAIUsage } from '../usage' import { createOpenAIClient, generateId, @@ -199,6 +200,7 @@ export class OpenAITextAdapter< return { data: transformed, rawText, + usage: buildOpenAIUsage(response.usage), } } catch (error: unknown) { const err = error as Error @@ -475,11 +477,7 @@ export class OpenAITextAdapter< id: responseId || genId(), model: model || options.model, timestamp, - usage: { - promptTokens: chunk.response.usage?.input_tokens || 0, - completionTokens: chunk.response.usage?.output_tokens || 0, - totalTokens: chunk.response.usage?.total_tokens || 0, - }, + usage: buildOpenAIUsage(chunk.response.usage), finishReason: hasFunctionCalls ? 'tool_calls' : 'stop', } } diff --git a/packages/typescript/ai-openai/src/adapters/transcription.ts b/packages/typescript/ai-openai/src/adapters/transcription.ts index c59b3eca..bdb8062a 100644 --- a/packages/typescript/ai-openai/src/adapters/transcription.ts +++ b/packages/typescript/ai-openai/src/adapters/transcription.ts @@ -7,6 +7,7 @@ import { import type { OPENAI_TRANSCRIPTION_MODELS } from '../model-meta' import type { OpenAITranscriptionProviderOptions } from '../audio/transcription-provider-options' import type { + TokenUsage, TranscriptionOptions, TranscriptionResult, TranscriptionSegment, @@ -14,6 +15,54 @@ import type { import type OpenAI_SDK from 'openai' import type { OpenAIClientConfig } from '../utils' +/** + * Build TokenUsage from transcription response. + * Whisper-1 uses duration-based billing, GPT-4o models use token-based billing. + */ +function buildTranscriptionUsage( + model: string, + duration?: number, + response?: OpenAI_SDK.Audio.TranscriptionCreateResponse, +): TokenUsage | undefined { + // GPT-4o transcription models return usage with tokens + if (model.startsWith('gpt-4o')) { + // Check if response has usage field (GPT-4o models may include this) + const usage = response?.usage as + | { + prompt_tokens?: number + completion_tokens?: number + total_tokens?: number + } + | undefined + + if (usage) { + return { + promptTokens: usage.prompt_tokens ?? 0, + completionTokens: usage.completion_tokens ?? 0, + totalTokens: usage.total_tokens ?? 0, + promptTokensDetails: { + audioTokens: usage.prompt_tokens, + }, + completionTokensDetails: { + textTokens: usage.completion_tokens, + }, + } + } + } + + // Whisper-1 uses duration-based billing + if (duration !== undefined && duration > 0) { + return { + promptTokens: 0, + completionTokens: 0, + totalTokens: 0, + durationSeconds: duration, + } + } + + return undefined +} + /** * Configuration for OpenAI Transcription adapter */ @@ -98,6 +147,7 @@ export class OpenAITranscriptionAdapter< start: w.start, end: w.end, })), + usage: buildTranscriptionUsage(model, response.duration, response), } } else { const response = await this.client.audio.transcriptions.create(request) @@ -107,6 +157,7 @@ export class OpenAITranscriptionAdapter< model, text: typeof response === 'string' ? response : response.text, language, + usage: buildTranscriptionUsage(model, undefined, response), } } } diff --git a/packages/typescript/ai-openai/src/usage.ts b/packages/typescript/ai-openai/src/usage.ts new file mode 100644 index 00000000..78a8c9ce --- /dev/null +++ b/packages/typescript/ai-openai/src/usage.ts @@ -0,0 +1,42 @@ +import { buildBaseUsage } from '@tanstack/ai' +import type { TokenUsage } from '@tanstack/ai' +import type OpenAI_SDK from 'openai' + +/** + * Build normalized TokenUsage from OpenAI's ResponseUsage + */ +export function buildOpenAIUsage( + usage: OpenAI_SDK.Responses.ResponseUsage | undefined, +): TokenUsage | undefined { + if (!usage) return undefined + + const result = buildBaseUsage({ + promptTokens: usage.input_tokens || 0, + completionTokens: usage.output_tokens || 0, + totalTokens: usage.total_tokens || 0, + }) + + // Add prompt token details if available + // Note: Despite TypeScript types saying these are required, they can be undefined at runtime + // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition + const cachedTokens = usage.input_tokens_details?.cached_tokens + if (cachedTokens && cachedTokens > 0) { + result.promptTokensDetails = { + ...result.promptTokensDetails, + cachedTokens, + } + } + + // Add completion token details if available + // Note: Despite TypeScript types saying these are required, they can be undefined at runtime + // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition + const reasoningTokens = usage.output_tokens_details?.reasoning_tokens + if (reasoningTokens && reasoningTokens > 0) { + result.completionTokensDetails = { + ...result.completionTokensDetails, + reasoningTokens, + } + } + + return result +} diff --git a/packages/typescript/ai-openai/tests/image-adapter.test.ts b/packages/typescript/ai-openai/tests/image-adapter.test.ts index 49d3353c..ddd5926e 100644 --- a/packages/typescript/ai-openai/tests/image-adapter.test.ts +++ b/packages/typescript/ai-openai/tests/image-adapter.test.ts @@ -1,4 +1,4 @@ -import { describe, it, expect, beforeEach, vi } from 'vitest' +import { describe, expect, it, vi } from 'vitest' import { OpenAIImageAdapter, createOpenaiImage } from '../src/adapters/image' import { validateImageSize, @@ -174,8 +174,8 @@ describe('OpenAI Image Adapter', () => { expect(result.model).toBe('gpt-image-1') expect(result.images).toHaveLength(1) - expect(result.images[0].b64Json).toBe('base64encodedimage') - expect(result.images[0].revisedPrompt).toBe('A beautiful cat') + expect(result.images[0]?.b64Json).toBe('base64encodedimage') + expect(result.images[0]?.revisedPrompt).toBe('A beautiful cat') expect(result.usage).toEqual({ inputTokens: 10, outputTokens: 100, diff --git a/packages/typescript/ai-openai/tests/model-meta.test.ts b/packages/typescript/ai-openai/tests/model-meta.test.ts index 648deaa8..c736200a 100644 --- a/packages/typescript/ai-openai/tests/model-meta.test.ts +++ b/packages/typescript/ai-openai/tests/model-meta.test.ts @@ -1,36 +1,14 @@ -import { describe, it, expectTypeOf } from 'vitest' -import type { - OpenAIChatModelProviderOptionsByName, - OpenAIModelInputModalitiesByName, -} from '../src/model-meta' +import { describe, expectTypeOf, it } from 'vitest' +import type { OpenAIChatModelProviderOptionsByName } from '../src/model-meta' import type { OpenAIBaseOptions, + OpenAIMetadataOptions, OpenAIReasoningOptions, OpenAIReasoningOptionsWithConcise, + OpenAIStreamingOptions, OpenAIStructuredOutputOptions, OpenAIToolsOptions, - OpenAIStreamingOptions, - OpenAIMetadataOptions, } from '../src/text/text-provider-options' -import type { OpenAIMessageMetadataByModality } from '../src/message-types' -import type { - AudioPart, - ConstrainedModelMessage, - DocumentPart, - ImagePart, - Modality, - TextPart, - VideoPart, -} from '@tanstack/ai' - -/** - * Helper type to construct InputModalitiesTypes from modalities array and metadata. - * This is used to properly type ConstrainedModelMessage in tests. - */ -type MakeInputModalitiesTypes> = { - inputModalities: TModalities - messageMetadataByModality: OpenAIMessageMetadataByModality -} /** * Type assertion tests for OpenAI model provider options. @@ -341,8 +319,8 @@ describe('OpenAI Chat Model Provider Options Type Assertions', () => { }) describe('Models WITH minimal features (Basic Models)', () => { - it('chatgpt-4.0 should only have streaming and base options', () => { - type Options = OpenAIChatModelProviderOptionsByName['chatgpt-4.0'] + it('chatgpt-4o-latest should only have streaming and base options', () => { + type Options = OpenAIChatModelProviderOptionsByName['chatgpt-4o-latest'] expectTypeOf().not.toExtend() expectTypeOf().not.toExtend() @@ -394,7 +372,7 @@ describe('OpenAI Chat Model Provider Options Type Assertions', () => { describe('Chat-only models WITH reasoning AND structured output but WITHOUT tools', () => { it('gpt-5.1-chat should have reasoning and structured output but NOT tools', () => { - type Options = OpenAIChatModelProviderOptionsByName['gpt-5.1-chat'] + type Options = OpenAIChatModelProviderOptionsByName['gpt-5.1-chat-latest'] expectTypeOf().toExtend() expectTypeOf().toExtend() @@ -404,7 +382,7 @@ describe('OpenAI Chat Model Provider Options Type Assertions', () => { }) it('gpt-5-chat should have reasoning and structured output but NOT tools', () => { - type Options = OpenAIChatModelProviderOptionsByName['gpt-5-chat'] + type Options = OpenAIChatModelProviderOptionsByName['gpt-5-chat-latest'] expectTypeOf().toExtend() expectTypeOf().toExtend() @@ -502,15 +480,15 @@ describe('OpenAI Chat Model Provider Options Type Assertions', () => { expectTypeOf<'gpt-3.5-turbo'>().toExtend() // Basic models - expectTypeOf<'chatgpt-4.0'>().toExtend() + expectTypeOf<'chatgpt-4o-latest'>().toExtend() expectTypeOf<'gpt-audio'>().toExtend() expectTypeOf<'gpt-audio-mini'>().toExtend() expectTypeOf<'gpt-4o-audio'>().toExtend() expectTypeOf<'gpt-4o-mini-audio'>().toExtend() // Chat-only models - expectTypeOf<'gpt-5.1-chat'>().toExtend() - expectTypeOf<'gpt-5-chat'>().toExtend() + expectTypeOf<'gpt-5.1-chat-latest'>().toExtend() + expectTypeOf<'gpt-5-chat-latest'>().toExtend() // Codex/Preview models expectTypeOf<'gpt-5.1-codex-mini'>().toExtend() @@ -551,7 +529,7 @@ describe('OpenAI Chat Model Provider Options Type Assertions', () => { OpenAIChatModelProviderOptionsByName['gpt-3.5-turbo'] >().toHaveProperty('metadata') expectTypeOf< - OpenAIChatModelProviderOptionsByName['chatgpt-4.0'] + OpenAIChatModelProviderOptionsByName['chatgpt-4o-latest'] >().toHaveProperty('metadata') }) @@ -584,7 +562,7 @@ describe('OpenAI Chat Model Provider Options Type Assertions', () => { OpenAIChatModelProviderOptionsByName['gpt-3.5-turbo'] >().toHaveProperty('store') expectTypeOf< - OpenAIChatModelProviderOptionsByName['chatgpt-4.0'] + OpenAIChatModelProviderOptionsByName['chatgpt-4o-latest'] >().toHaveProperty('store') }) @@ -617,7 +595,7 @@ describe('OpenAI Chat Model Provider Options Type Assertions', () => { OpenAIChatModelProviderOptionsByName['gpt-3.5-turbo'] >().toHaveProperty('service_tier') expectTypeOf< - OpenAIChatModelProviderOptionsByName['chatgpt-4.0'] + OpenAIChatModelProviderOptionsByName['chatgpt-4o-latest'] >().toHaveProperty('service_tier') }) @@ -665,10 +643,10 @@ describe('OpenAI Chat Model Provider Options Type Assertions', () => { OpenAIChatModelProviderOptionsByName['gpt-4o'] >().toHaveProperty('text') expectTypeOf< - OpenAIChatModelProviderOptionsByName['gpt-5.1-chat'] + OpenAIChatModelProviderOptionsByName['gpt-5.1-chat-latest'] >().toHaveProperty('text') expectTypeOf< - OpenAIChatModelProviderOptionsByName['gpt-5-chat'] + OpenAIChatModelProviderOptionsByName['gpt-5-chat-latest'] >().toHaveProperty('text') }) @@ -692,7 +670,7 @@ describe('OpenAI Chat Model Provider Options Type Assertions', () => { OpenAIChatModelProviderOptionsByName['gpt-4'] >().toHaveProperty('stream_options') expectTypeOf< - OpenAIChatModelProviderOptionsByName['chatgpt-4.0'] + OpenAIChatModelProviderOptionsByName['chatgpt-4o-latest'] >().toHaveProperty('stream_options') }) }) @@ -806,13 +784,13 @@ describe('OpenAI Chat Model Provider Options Type Assertions', () => { OpenAIChatModelProviderOptionsByName['gpt-3.5-turbo'] >().toExtend() expectTypeOf< - OpenAIChatModelProviderOptionsByName['chatgpt-4.0'] + OpenAIChatModelProviderOptionsByName['chatgpt-4o-latest'] >().toExtend() expectTypeOf< - OpenAIChatModelProviderOptionsByName['gpt-5.1-chat'] + OpenAIChatModelProviderOptionsByName['gpt-5.1-chat-latest'] >().toExtend() expectTypeOf< - OpenAIChatModelProviderOptionsByName['gpt-5-chat'] + OpenAIChatModelProviderOptionsByName['gpt-5-chat-latest'] >().toExtend() expectTypeOf< OpenAIChatModelProviderOptionsByName['computer-use-preview'] @@ -820,466 +798,3 @@ describe('OpenAI Chat Model Provider Options Type Assertions', () => { }) }) }) - -// Helper types for message with specific content -type MessageWithContent = { role: 'user'; content: Array } - -/** - * OpenAI Model Input Modality Type Assertions - * - * These tests verify that ConstrainedModelMessage correctly restricts - * content parts based on each model's supported input modalities. - */ -describe('OpenAI Model Input Modality Type Assertions', () => { - // ===== Models with text + image input ===== - - describe('gpt-5.1 (text + image)', () => { - type Modalities = OpenAIModelInputModalitiesByName['gpt-5.1'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and ImagePart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('gpt-5.1-codex (text + image)', () => { - type Modalities = OpenAIModelInputModalitiesByName['gpt-5.1-codex'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and ImagePart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('gpt-5 (text + image)', () => { - type Modalities = OpenAIModelInputModalitiesByName['gpt-5'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and ImagePart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('gpt-5-mini (text + image)', () => { - type Modalities = OpenAIModelInputModalitiesByName['gpt-5-mini'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and ImagePart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('gpt-5-nano (text + image)', () => { - type Modalities = OpenAIModelInputModalitiesByName['gpt-5-nano'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and ImagePart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('gpt-5-pro (text + image)', () => { - type Modalities = OpenAIModelInputModalitiesByName['gpt-5-pro'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and ImagePart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('gpt-5-codex (text + image)', () => { - type Modalities = OpenAIModelInputModalitiesByName['gpt-5-codex'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and ImagePart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('gpt-4.1 (text + image)', () => { - type Modalities = OpenAIModelInputModalitiesByName['gpt-4.1'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and ImagePart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('gpt-4.1-mini (text + image)', () => { - type Modalities = OpenAIModelInputModalitiesByName['gpt-4.1-mini'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and ImagePart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('gpt-4.1-nano (text + image)', () => { - type Modalities = OpenAIModelInputModalitiesByName['gpt-4.1-nano'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and ImagePart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('codex-mini-latest (text + image)', () => { - type Modalities = OpenAIModelInputModalitiesByName['codex-mini-latest'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and ImagePart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('computer-use-preview (text + image)', () => { - type Modalities = OpenAIModelInputModalitiesByName['computer-use-preview'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and ImagePart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('o3 (text + image)', () => { - type Modalities = OpenAIModelInputModalitiesByName['o3'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and ImagePart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('o3-pro (text + image)', () => { - type Modalities = OpenAIModelInputModalitiesByName['o3-pro'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and ImagePart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('o3-deep-research (text + image)', () => { - type Modalities = OpenAIModelInputModalitiesByName['o3-deep-research'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and ImagePart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('o4-mini-deep-research (text + image)', () => { - type Modalities = OpenAIModelInputModalitiesByName['o4-mini-deep-research'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and ImagePart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('o4-mini (text + image)', () => { - type Modalities = OpenAIModelInputModalitiesByName['o4-mini'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and ImagePart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('o1 (text + image)', () => { - type Modalities = OpenAIModelInputModalitiesByName['o1'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and ImagePart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('o1-pro (text + image)', () => { - type Modalities = OpenAIModelInputModalitiesByName['o1-pro'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and ImagePart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - // ===== Models with text + audio input ===== - - describe('gpt-audio (text + audio)', () => { - type Modalities = OpenAIModelInputModalitiesByName['gpt-audio'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and AudioPart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow ImagePart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - describe('gpt-audio-mini (text + audio)', () => { - type Modalities = OpenAIModelInputModalitiesByName['gpt-audio-mini'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart and AudioPart', () => { - expectTypeOf>().toExtend() - expectTypeOf>().toExtend() - }) - - it('should NOT allow ImagePart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - // ===== Models with text only input ===== - - describe('o3-mini (text only)', () => { - type Modalities = OpenAIModelInputModalitiesByName['o3-mini'] - type Message = ConstrainedModelMessage> - - it('should allow TextPart', () => { - expectTypeOf>().toExtend() - }) - - it('should NOT allow ImagePart, AudioPart, VideoPart, or DocumentPart', () => { - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - expectTypeOf>().not.toExtend() - }) - }) - - // ===== String and null content tests ===== - - describe('String and null content should always be allowed', () => { - it('text+image models should allow string content', () => { - type GPT51Message = ConstrainedModelMessage< - OpenAIModelInputModalitiesByName['gpt-5.1'] - > - type O3Message = ConstrainedModelMessage< - OpenAIModelInputModalitiesByName['o3'] - > - - expectTypeOf<{ role: 'user'; content: string }>().toExtend() - expectTypeOf<{ role: 'user'; content: string }>().toExtend() - }) - - it('text-only models should allow string content', () => { - type O3MiniMessage = ConstrainedModelMessage< - OpenAIModelInputModalitiesByName['o3-mini'] - > - - expectTypeOf<{ - role: 'user' - content: string - }>().toExtend() - }) - - it('text+audio models should allow string content', () => { - type GPTAudioMessage = ConstrainedModelMessage< - OpenAIModelInputModalitiesByName['gpt-audio'] - > - - expectTypeOf<{ - role: 'user' - content: string - }>().toExtend() - }) - - it('all models should allow null content', () => { - type GPT51Message = ConstrainedModelMessage< - OpenAIModelInputModalitiesByName['gpt-5.1'] - > - type O3MiniMessage = ConstrainedModelMessage< - OpenAIModelInputModalitiesByName['o3-mini'] - > - type GPTAudioMessage = ConstrainedModelMessage< - OpenAIModelInputModalitiesByName['gpt-audio'] - > - - expectTypeOf<{ - role: 'assistant' - content: null - }>().toExtend() - expectTypeOf<{ - role: 'assistant' - content: null - }>().toExtend() - expectTypeOf<{ - role: 'assistant' - content: null - }>().toExtend() - }) - }) - - // ===== Mixed content part validation ===== - - describe('Mixed content part validation', () => { - it('should NOT allow mixing valid and invalid content parts', () => { - type GPT51Message = ConstrainedModelMessage< - OpenAIModelInputModalitiesByName['gpt-5.1'] - > - - // TextPart + VideoPart should NOT be allowed (GPT-5.1 doesn't support video) - expectTypeOf< - MessageWithContent - >().not.toExtend() - - // ImagePart + AudioPart should NOT be allowed (GPT-5.1 doesn't support audio) - expectTypeOf< - MessageWithContent - >().not.toExtend() - }) - - it('should allow mixing valid content parts', () => { - type GPT51Message = ConstrainedModelMessage< - OpenAIModelInputModalitiesByName['gpt-5.1'] - > - - // TextPart + ImagePart should be allowed - expectTypeOf< - MessageWithContent - >().toExtend() - }) - }) -}) diff --git a/packages/typescript/ai-openai/tests/openai-adapter.test.ts b/packages/typescript/ai-openai/tests/openai-adapter.test.ts index 552793a2..4cd78367 100644 --- a/packages/typescript/ai-openai/tests/openai-adapter.test.ts +++ b/packages/typescript/ai-openai/tests/openai-adapter.test.ts @@ -1,6 +1,7 @@ -import { describe, it, expect, beforeEach, vi } from 'vitest' -import { chat, type Tool, type StreamChunk } from '@tanstack/ai' +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { chat } from '@tanstack/ai' import { OpenAITextAdapter } from '../src/adapters/text' +import type { StreamChunk, Tool } from '@tanstack/ai' import type { OpenAITextProviderOptions } from '../src/adapters/text' const createAdapter = ( @@ -18,6 +19,7 @@ function createMockChatCompletionsStream( chunks: Array>, ): AsyncIterable> { return { + // eslint-disable-next-line @typescript-eslint/require-await async *[Symbol.asyncIterator]() { for (const chunk of chunks) { yield chunk @@ -79,11 +81,11 @@ describe('OpenAI adapter option mapping', () => { tool_choice: 'required', } - const chunks: StreamChunk[] = [] + const chunks: Array = [] for await (const chunk of chat({ adapter, messages: [ - { role: 'system', content: 'Stay concise' }, + { role: 'user', content: 'Stay concise' }, { role: 'user', content: 'How is the weather?' }, { role: 'assistant', @@ -109,7 +111,7 @@ describe('OpenAI adapter option mapping', () => { } expect(responsesCreate).toHaveBeenCalledTimes(1) - const [payload] = responsesCreate.mock.calls[0] + const [payload] = responsesCreate.mock.calls[0] as any // Responses API uses different field names and structure expect(payload).toMatchObject({ diff --git a/packages/typescript/ai-openai/tests/usage-extraction.test.ts b/packages/typescript/ai-openai/tests/usage-extraction.test.ts new file mode 100644 index 00000000..9808afe7 --- /dev/null +++ b/packages/typescript/ai-openai/tests/usage-extraction.test.ts @@ -0,0 +1,286 @@ +import { describe, expect, it, vi } from 'vitest' +import { chat } from '@tanstack/ai' +import { OpenAITextAdapter } from '../src/adapters/text' +import type { StreamChunk } from '@tanstack/ai' + +const createAdapter = () => + new OpenAITextAdapter({ apiKey: 'test-key' }, 'gpt-4o-mini') + +function createMockResponsesStream( + chunks: Array>, +): AsyncIterable> { + return { + // eslint-disable-next-line @typescript-eslint/require-await + async *[Symbol.asyncIterator]() { + for (const chunk of chunks) { + yield chunk + } + }, + } +} + +describe('OpenAI usage extraction', () => { + it('extracts basic token usage from response.done', async () => { + const mockStream = createMockResponsesStream([ + { + type: 'response.created', + response: { + id: 'resp-123', + model: 'gpt-4o-mini', + status: 'in_progress', + created_at: 1234567890, + }, + }, + { + type: 'response.output_text.done', + text: 'Hello world', + }, + { + type: 'response.completed', + response: { + id: 'resp-123', + model: 'gpt-4o-mini', + status: 'completed', + output: [], + usage: { + input_tokens: 100, + output_tokens: 50, + total_tokens: 150, + }, + }, + }, + ]) + + const responsesCreate = vi.fn().mockResolvedValueOnce(mockStream) + const adapter = createAdapter() + ;(adapter as any).client = { + responses: { create: responsesCreate }, + } + + const chunks: Array = [] + for await (const chunk of chat({ + adapter, + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage).toEqual({ + promptTokens: 100, + completionTokens: 50, + totalTokens: 150, + }) + }) + + it('extracts prompt tokens details with cached tokens', async () => { + const mockStream = createMockResponsesStream([ + { + type: 'response.created', + response: { + id: 'resp-123', + model: 'gpt-4o-mini', + status: 'in_progress', + created_at: 1234567890, + }, + }, + { + type: 'response.output_text.done', + text: 'Hello world', + }, + { + type: 'response.completed', + response: { + id: 'resp-123', + model: 'gpt-4o-mini', + status: 'completed', + output: [], + usage: { + input_tokens: 100, + output_tokens: 50, + total_tokens: 150, + input_tokens_details: { + cached_tokens: 25, + }, + }, + }, + }, + ]) + + const responsesCreate = vi.fn().mockResolvedValueOnce(mockStream) + const adapter = createAdapter() + ;(adapter as any).client = { + responses: { create: responsesCreate }, + } + + const chunks: Array = [] + for await (const chunk of chat({ + adapter, + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage?.promptTokensDetails).toEqual({ + cachedTokens: 25, + }) + }) + + it('extracts completion tokens details with reasoning tokens', async () => { + const mockStream = createMockResponsesStream([ + { + type: 'response.created', + response: { + id: 'resp-123', + model: 'gpt-4o-mini', + status: 'in_progress', + created_at: 1234567890, + }, + }, + { + type: 'response.output_text.done', + text: 'Hello world', + }, + { + type: 'response.completed', + response: { + id: 'resp-123', + model: 'gpt-4o-mini', + status: 'completed', + output: [], + usage: { + input_tokens: 100, + output_tokens: 50, + total_tokens: 150, + output_tokens_details: { + reasoning_tokens: 30, + }, + }, + }, + }, + ]) + + const responsesCreate = vi.fn().mockResolvedValueOnce(mockStream) + const adapter = createAdapter() + ;(adapter as any).client = { + responses: { create: responsesCreate }, + } + + const chunks: Array = [] + for await (const chunk of chat({ + adapter, + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage?.completionTokensDetails).toEqual({ + reasoningTokens: 30, + }) + }) + + it('handles response with no usage data', async () => { + const mockStream = createMockResponsesStream([ + { + type: 'response.created', + response: { + id: 'resp-123', + model: 'gpt-4o-mini', + status: 'in_progress', + created_at: 1234567890, + }, + }, + { + type: 'response.output_text.done', + text: 'Hello world', + }, + { + type: 'response.completed', + response: { + id: 'resp-123', + model: 'gpt-4o-mini', + status: 'completed', + output: [], + // No usage field + }, + }, + ]) + + const responsesCreate = vi.fn().mockResolvedValueOnce(mockStream) + const adapter = createAdapter() + ;(adapter as any).client = { + responses: { create: responsesCreate }, + } + + const chunks: Array = [] + for await (const chunk of chat({ + adapter, + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage).toBeUndefined() + }) + + it('omits empty prompt details when all values are zero', async () => { + const mockStream = createMockResponsesStream([ + { + type: 'response.created', + response: { + id: 'resp-123', + model: 'gpt-4o-mini', + status: 'in_progress', + created_at: 1234567890, + }, + }, + { + type: 'response.output_text.done', + text: 'Hello world', + }, + { + type: 'response.completed', + response: { + id: 'resp-123', + model: 'gpt-4o-mini', + status: 'completed', + output: [], + usage: { + input_tokens: 100, + output_tokens: 50, + total_tokens: 150, + input_tokens_details: { + cached_tokens: 0, + audio_tokens: 0, + }, + }, + }, + }, + ]) + + const responsesCreate = vi.fn().mockResolvedValueOnce(mockStream) + const adapter = createAdapter() + ;(adapter as any).client = { + responses: { create: responsesCreate }, + } + + const chunks: Array = [] + for await (const chunk of chat({ + adapter, + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage?.promptTokensDetails).toBeUndefined() + }) +}) diff --git a/packages/typescript/ai-openai/tsconfig.json b/packages/typescript/ai-openai/tsconfig.json index ea11c109..9028fa3b 100644 --- a/packages/typescript/ai-openai/tsconfig.json +++ b/packages/typescript/ai-openai/tsconfig.json @@ -1,9 +1,8 @@ { "extends": "../../../tsconfig.json", "compilerOptions": { - "outDir": "dist", - "rootDir": "src" + "outDir": "dist" }, - "include": ["src/**/*.ts", "src/**/*.tsx"], + "include": ["src/**/*.ts", "src/**/*.tsx", "tests/**/*.ts"], "exclude": ["node_modules", "dist", "**/*.config.ts"] } diff --git a/packages/typescript/ai-openrouter/src/adapters/summarize.ts b/packages/typescript/ai-openrouter/src/adapters/summarize.ts index a046d8a2..1a1fc1f9 100644 --- a/packages/typescript/ai-openrouter/src/adapters/summarize.ts +++ b/packages/typescript/ai-openrouter/src/adapters/summarize.ts @@ -1,5 +1,6 @@ import { BaseSummarizeAdapter } from '@tanstack/ai/adapters' import { getOpenRouterApiKeyFromEnv } from '../utils' +import { buildOpenRouterUsage } from '../usage' import { OpenRouterTextAdapter } from './text' import type { StreamChunk, @@ -76,7 +77,7 @@ export class OpenRouterSummarizeAdapter< model = chunk.model } if (chunk.type === 'done' && chunk.usage) { - usage = chunk.usage + usage = buildOpenRouterUsage(chunk.usage)! } if (chunk.type === 'error') { throw new Error(`Error during summarization: ${chunk.error.message}`) diff --git a/packages/typescript/ai-openrouter/src/adapters/text.ts b/packages/typescript/ai-openrouter/src/adapters/text.ts index b0a8147f..7f95b95f 100644 --- a/packages/typescript/ai-openrouter/src/adapters/text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/text.ts @@ -2,6 +2,7 @@ import { OpenRouter } from '@openrouter/sdk' import { RequestAbortedError } from '@openrouter/sdk/models/errors' import { BaseTextAdapter } from '@tanstack/ai/adapters' import { convertToolsToProviderFormat } from '../tools' +import { buildOpenRouterUsage } from '../usage' import { getOpenRouterApiKeyFromEnv, generateId as utilGenerateId, @@ -189,6 +190,7 @@ export class OpenRouterTextAdapter< return { data: parsed, rawText: toolCall.function.arguments || '', + usage: buildOpenRouterUsage(result.usage), } } @@ -205,6 +207,7 @@ export class OpenRouterTextAdapter< return { data: parsed, rawText: content, + usage: buildOpenRouterUsage(result.usage), } } catch (error: unknown) { if (error instanceof RequestAbortedError) { @@ -345,11 +348,7 @@ export class OpenRouterTextAdapter< : lastFinishReason === 'length' ? 'length' : 'stop', - usage: { - promptTokens: usage.promptTokens || 0, - completionTokens: usage.completionTokens || 0, - totalTokens: usage.totalTokens || 0, - }, + usage: buildOpenRouterUsage(usage), } } } diff --git a/packages/typescript/ai-openrouter/src/index.ts b/packages/typescript/ai-openrouter/src/index.ts index c66f89ff..c2f4889d 100644 --- a/packages/typescript/ai-openrouter/src/index.ts +++ b/packages/typescript/ai-openrouter/src/index.ts @@ -78,3 +78,6 @@ export { convertToolsToProviderFormat } from './tools/tool-converter' export { createWebSearchTool } from './tools/web-search-tool' export type { OpenRouterTool, FunctionTool, WebSearchTool } from './tools' + +// Export provider usage types +export type { OpenRouterProviderUsageDetails } from './usage' diff --git a/packages/typescript/ai-openrouter/src/usage.ts b/packages/typescript/ai-openrouter/src/usage.ts new file mode 100644 index 00000000..504cb07d --- /dev/null +++ b/packages/typescript/ai-openrouter/src/usage.ts @@ -0,0 +1,64 @@ +import { buildBaseUsage } from '@tanstack/ai' +import type { TokenUsage } from '@tanstack/ai' +import type { ChatGenerationTokenUsage } from '@openrouter/sdk/models' + +/** + * OpenRouter-specific provider usage details. + * These fields are unique to OpenRouter and placed in providerUsageDetails. + */ +export interface OpenRouterProviderUsageDetails { + /** Accepted prediction tokens (speculative decoding) */ + acceptedPredictionTokens?: number + /** Rejected prediction tokens (speculative decoding) */ + rejectedPredictionTokens?: number + /** Index signature for Record compatibility */ + [key: string]: unknown +} + +/** + * Build normalized TokenUsage from OpenRouter's ChatGenerationTokenUsage + * OpenRouter already has the detail fields structured correctly + */ +export function buildOpenRouterUsage( + usage: ChatGenerationTokenUsage | undefined, +): TokenUsage | undefined { + if (!usage) return undefined + + const result = buildBaseUsage({ + promptTokens: usage.promptTokens || 0, + completionTokens: usage.completionTokens || 0, + totalTokens: usage.totalTokens || 0, + }) + + // Add prompt token details (passthrough from SDK) + if (usage.promptTokensDetails) { + result.promptTokensDetails = usage.promptTokensDetails ?? undefined + } + + // Map completion tokens details (passthrough from SDK) + if (usage.completionTokensDetails) { + const details = usage.completionTokensDetails + result.completionTokensDetails = { + ...(details.reasoningTokens + ? { reasoningTokens: details.reasoningTokens } + : {}), + ...(details.audioTokens ? { audioTokens: details.audioTokens } : {}), + } + + // Add OpenRouter-specific prediction tokens to providerUsageDetails + const providerDetails = { + ...(details.acceptedPredictionTokens + ? { acceptedPredictionTokens: details.acceptedPredictionTokens } + : {}), + ...(details.rejectedPredictionTokens + ? { rejectedPredictionTokens: details.rejectedPredictionTokens } + : {}), + } satisfies OpenRouterProviderUsageDetails + + if (Object.keys(providerDetails).length > 0) { + result.providerUsageDetails = providerDetails + } + } + + return result +} diff --git a/packages/typescript/ai-openrouter/tests/usage-extraction.test.ts b/packages/typescript/ai-openrouter/tests/usage-extraction.test.ts new file mode 100644 index 00000000..f6086c64 --- /dev/null +++ b/packages/typescript/ai-openrouter/tests/usage-extraction.test.ts @@ -0,0 +1,307 @@ +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { chat } from '@tanstack/ai' +import { createOpenRouterText } from '../src/adapters/text' +import type { Mock } from 'vitest' +import type { StreamChunk } from '@tanstack/ai' + +let mockSend: Mock + +vi.mock('@openrouter/sdk', () => { + return { + OpenRouter: class { + chat = { + send: (...args: Array) => mockSend(...args), + } + }, + } +}) + +const createAdapter = () => + createOpenRouterText('openai/gpt-4o-mini', 'test-key') + +function createAsyncIterable(chunks: Array): AsyncIterable { + return { + [Symbol.asyncIterator]() { + let index = 0 + return { + // eslint-disable-next-line @typescript-eslint/require-await + async next() { + if (index < chunks.length) { + return { value: chunks[index++]!, done: false } + } + return { value: undefined as T, done: true } + }, + } + }, + } +} + +describe('OpenRouter usage extraction', () => { + beforeEach(() => { + vi.clearAllMocks() + mockSend = vi.fn() + }) + + it('extracts basic token usage from stream', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'openai/gpt-4o-mini', + choices: [ + { + delta: { content: 'Hello world' }, + finishReason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'openai/gpt-4o-mini', + choices: [ + { + delta: {}, + finishReason: 'stop', + }, + ], + usage: { + promptTokens: 100, + completionTokens: 50, + totalTokens: 150, + }, + }, + ] + + mockSend.mockImplementation((params) => { + if (params.stream) { + return Promise.resolve(createAsyncIterable(streamChunks)) + } + return Promise.resolve({}) + }) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage).toMatchObject({ + promptTokens: 100, + completionTokens: 50, + totalTokens: 150, + }) + }) + + it('extracts prompt tokens details', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'openai/gpt-4o-mini', + choices: [ + { + delta: { content: 'Hello world' }, + finishReason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'openai/gpt-4o-mini', + choices: [ + { + delta: {}, + finishReason: 'stop', + }, + ], + usage: { + promptTokens: 100, + completionTokens: 50, + totalTokens: 150, + promptTokensDetails: { + cachedTokens: 25, + }, + }, + }, + ] + + mockSend.mockImplementation((params) => { + if (params.stream) { + return Promise.resolve(createAsyncIterable(streamChunks)) + } + return Promise.resolve({}) + }) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage?.promptTokensDetails).toEqual({ + cachedTokens: 25, + }) + }) + + it('extracts completion tokens details with reasoning tokens', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'openai/gpt-4o-mini', + choices: [ + { + delta: { content: 'Hello world' }, + finishReason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'openai/gpt-4o-mini', + choices: [ + { + delta: {}, + finishReason: 'stop', + }, + ], + usage: { + promptTokens: 100, + completionTokens: 50, + totalTokens: 150, + completionTokensDetails: { + reasoningTokens: 30, + }, + }, + }, + ] + + mockSend.mockImplementation((params) => { + if (params.stream) { + return Promise.resolve(createAsyncIterable(streamChunks)) + } + return Promise.resolve({}) + }) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + expect(doneChunk?.usage?.completionTokensDetails).toEqual({ + reasoningTokens: 30, + }) + }) + + it('extracts completion tokens details with prediction tokens', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'openai/gpt-4o-mini', + choices: [ + { + delta: { content: 'Hello world' }, + finishReason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'openai/gpt-4o-mini', + choices: [ + { + delta: {}, + finishReason: 'stop', + }, + ], + usage: { + promptTokens: 100, + completionTokens: 50, + totalTokens: 150, + completionTokensDetails: { + acceptedPredictionTokens: 20, + rejectedPredictionTokens: 5, + }, + }, + }, + ] + + mockSend.mockImplementation((params) => { + if (params.stream) { + return Promise.resolve(createAsyncIterable(streamChunks)) + } + return Promise.resolve({}) + }) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeDefined() + // Prediction tokens are OpenRouter-specific, so they go in providerUsageDetails + expect(doneChunk?.usage?.providerUsageDetails).toEqual({ + acceptedPredictionTokens: 20, + rejectedPredictionTokens: 5, + }) + }) + + it('handles response with no usage data - no done chunk emitted', async () => { + const streamChunks = [ + { + id: 'chatcmpl-123', + model: 'openai/gpt-4o-mini', + choices: [ + { + delta: { content: 'Hello world' }, + finishReason: null, + }, + ], + }, + { + id: 'chatcmpl-123', + model: 'openai/gpt-4o-mini', + choices: [ + { + delta: {}, + finishReason: 'stop', + }, + ], + // No usage field + }, + ] + + mockSend.mockImplementation((params) => { + if (params.stream) { + return Promise.resolve(createAsyncIterable(streamChunks)) + } + return Promise.resolve({}) + }) + + const chunks: Array = [] + for await (const chunk of chat({ + adapter: createAdapter(), + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + // When usage is not provided, the adapter doesn't emit a done chunk + const doneChunk = chunks.find((c) => c.type === 'done') + expect(doneChunk).toBeUndefined() + }) +}) diff --git a/packages/typescript/ai/src/activities/chat/adapter.ts b/packages/typescript/ai/src/activities/chat/adapter.ts index ac7e96e3..5ac26bd5 100644 --- a/packages/typescript/ai/src/activities/chat/adapter.ts +++ b/packages/typescript/ai/src/activities/chat/adapter.ts @@ -4,6 +4,7 @@ import type { Modality, StreamChunk, TextOptions, + TokenUsage, } from '../../types' /** @@ -35,6 +36,8 @@ export interface StructuredOutputResult { data: T /** The raw text response from the model before parsing */ rawText: string + /** Token usage information (if provided by the adapter) */ + usage?: TokenUsage } /** diff --git a/packages/typescript/ai/src/event-client.ts b/packages/typescript/ai/src/event-client.ts index b76d6705..87a7e3f2 100644 --- a/packages/typescript/ai/src/event-client.ts +++ b/packages/typescript/ai/src/event-client.ts @@ -1,5 +1,5 @@ import { EventClient } from '@tanstack/devtools-event-client' -import type { MessagePart, ToolCall } from './types' +import type { MessagePart, TokenUsage, ToolCall } from './types' /** * Tool call states - track the lifecycle of a tool call @@ -21,12 +21,6 @@ export type ToolResultState = | 'complete' // Result is complete | 'error' // Error occurred -export interface TokenUsage { - promptTokens: number - completionTokens: number - totalTokens: number -} - export interface ImageUsage { inputTokens?: number outputTokens?: number diff --git a/packages/typescript/ai/src/index.ts b/packages/typescript/ai/src/index.ts index d65008cd..7745653b 100644 --- a/packages/typescript/ai/src/index.ts +++ b/packages/typescript/ai/src/index.ts @@ -73,6 +73,9 @@ export { // All types export * from './types' +// Usage utilities +export { buildBaseUsage, type BaseUsageInput } from './utilities/usage' + // Event client + event types export * from './event-client' diff --git a/packages/typescript/ai/src/types.ts b/packages/typescript/ai/src/types.ts index 7c49d995..86b35a84 100644 --- a/packages/typescript/ai/src/types.ts +++ b/packages/typescript/ai/src/types.ts @@ -692,14 +692,68 @@ export interface ToolResultStreamChunk extends BaseStreamChunk { content: string } +/** + * Detailed breakdown of prompt/input token usage. + * Fields are populated based on provider support. + */ +export interface PromptTokensDetails { + /** Tokens read from cache */ + cachedTokens?: number + /** Tokens written to cache */ + cacheWriteTokens?: number + + /** Audio input tokens */ + audioTokens?: number + /** Video input tokens */ + videoTokens?: number + /** Image input tokens */ + imageTokens?: number + /** Text input tokens */ + textTokens?: number +} + +/** + * Detailed breakdown of completion/output token usage. + * Fields are populated based on provider support. + */ +export interface CompletionTokensDetails { + /** Reasoning/thinking tokens */ + reasoningTokens?: number + /** Audio output tokens */ + audioTokens?: number + /** Video output tokens */ + videoTokens?: number + /** Image output tokens */ + imageTokens?: number + /** Text output tokens */ + textTokens?: number +} + +/** + * Token usage information with optional detailed breakdowns. + * Core fields are always present, detail fields are provider-dependent. + */ +export interface TokenUsage { + /** Total input/prompt tokens */ + promptTokens: number + /** Total output/completion tokens */ + completionTokens: number + /** Total tokens (prompt + completion) */ + totalTokens: number + /** Detailed breakdown of prompt tokens by category */ + promptTokensDetails?: PromptTokensDetails + /** Detailed breakdown of completion tokens by category */ + completionTokensDetails?: CompletionTokensDetails + /** Duration in seconds for duration-based billing (e.g., Whisper-1 transcription) */ + durationSeconds?: number + /** Provider-specific usage details not covered by standard fields */ + providerUsageDetails?: Record +} + export interface DoneStreamChunk extends BaseStreamChunk { type: 'done' finishReason: 'stop' | 'length' | 'content_filter' | 'tool_calls' | null - usage?: { - promptTokens: number - completionTokens: number - totalTokens: number - } + usage?: TokenUsage } export interface ErrorStreamChunk extends BaseStreamChunk { @@ -755,11 +809,7 @@ export interface TextCompletionChunk { content: string role?: 'assistant' finishReason?: 'stop' | 'length' | 'content_filter' | null - usage?: { - promptTokens: number - completionTokens: number - totalTokens: number - } + usage?: TokenUsage } export interface SummarizationOptions { @@ -774,11 +824,7 @@ export interface SummarizationResult { id: string model: string summary: string - usage: { - promptTokens: number - completionTokens: number - totalTokens: number - } + usage: TokenUsage } // ============================================================================ @@ -1015,6 +1061,8 @@ export interface TranscriptionResult { segments?: Array /** Word-level timestamps, if available */ words?: Array + /** Token usage information (if provided by the adapter) */ + usage?: TokenUsage } /** diff --git a/packages/typescript/ai/src/utilities/usage.ts b/packages/typescript/ai/src/utilities/usage.ts new file mode 100644 index 00000000..f606e34f --- /dev/null +++ b/packages/typescript/ai/src/utilities/usage.ts @@ -0,0 +1,39 @@ +import type { TokenUsage } from '../types' + +/** + * Input parameters for building base TokenUsage. + * Provider functions should extract these from their SDK's response. + */ +export interface BaseUsageInput { + /** Total input/prompt tokens */ + promptTokens: number + /** Total output/completion tokens */ + completionTokens: number + /** Total tokens (prompt + completion) */ + totalTokens: number +} + +/** + * Builds the base TokenUsage object with core fields. + * Provider-specific functions should use this and then add their own details. + * + * @param input - The base token counts + * @returns A TokenUsage object with promptTokens, completionTokens, totalTokens + * + * @example + * ```typescript + * const base = buildBaseUsage({ + * promptTokens: 100, + * completionTokens: 50, + * totalTokens: 150 + * }); + * // Returns: { promptTokens: 100, completionTokens: 50, totalTokens: 150 } + * ``` + */ +export function buildBaseUsage(input: BaseUsageInput): TokenUsage { + return { + promptTokens: input.promptTokens, + completionTokens: input.completionTokens, + totalTokens: input.totalTokens, + } +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index bac95717..507bebe8 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -625,7 +625,7 @@ importers: packages/typescript/ai-anthropic: dependencies: '@anthropic-ai/sdk': - specifier: ^0.71.0 + specifier: ^0.71.2 version: 0.71.2(zod@4.2.1) devDependencies: '@tanstack/ai': @@ -2891,6 +2891,11 @@ packages: cpu: [arm] os: [android] + '@rollup/rollup-android-arm-eabi@4.56.0': + resolution: {integrity: sha512-LNKIPA5k8PF1+jAFomGe3qN3bbIgJe/IlpDBwuVjrDKrJhVWywgnJvflMt/zkbVNLFtF1+94SljYQS6e99klnw==} + cpu: [arm] + os: [android] + '@rollup/rollup-android-arm64@4.53.3': resolution: {integrity: sha512-CbDGaMpdE9sh7sCmTrTUyllhrg65t6SwhjlMJsLr+J8YjFuPmCEjbBSx4Z/e4SmDyH3aB5hGaJUP2ltV/vcs4w==} cpu: [arm64] @@ -2901,6 +2906,11 @@ packages: cpu: [arm64] os: [android] + '@rollup/rollup-android-arm64@4.56.0': + resolution: {integrity: sha512-lfbVUbelYqXlYiU/HApNMJzT1E87UPGvzveGg2h0ktUNlOCxKlWuJ9jtfvs1sKHdwU4fzY7Pl8sAl49/XaEk6Q==} + cpu: [arm64] + os: [android] + '@rollup/rollup-darwin-arm64@4.53.3': resolution: {integrity: sha512-Nr7SlQeqIBpOV6BHHGZgYBuSdanCXuw09hon14MGOLGmXAFYjx1wNvquVPmpZnl0tLjg25dEdr4IQ6GgyToCUA==} cpu: [arm64] @@ -2911,6 +2921,11 @@ packages: cpu: [arm64] os: [darwin] + '@rollup/rollup-darwin-arm64@4.56.0': + resolution: {integrity: sha512-EgxD1ocWfhoD6xSOeEEwyE7tDvwTgZc8Bss7wCWe+uc7wO8G34HHCUH+Q6cHqJubxIAnQzAsyUsClt0yFLu06w==} + cpu: [arm64] + os: [darwin] + '@rollup/rollup-darwin-x64@4.53.3': resolution: {integrity: sha512-DZ8N4CSNfl965CmPktJ8oBnfYr3F8dTTNBQkRlffnUarJ2ohudQD17sZBa097J8xhQ26AwhHJ5mvUyQW8ddTsQ==} cpu: [x64] @@ -2921,6 +2936,11 @@ packages: cpu: [x64] os: [darwin] + '@rollup/rollup-darwin-x64@4.56.0': + resolution: {integrity: sha512-1vXe1vcMOssb/hOF8iv52A7feWW2xnu+c8BV4t1F//m9QVLTfNVpEdja5ia762j/UEJe2Z1jAmEqZAK42tVW3g==} + cpu: [x64] + os: [darwin] + '@rollup/rollup-freebsd-arm64@4.53.3': resolution: {integrity: sha512-yMTrCrK92aGyi7GuDNtGn2sNW+Gdb4vErx4t3Gv/Tr+1zRb8ax4z8GWVRfr3Jw8zJWvpGHNpss3vVlbF58DZ4w==} cpu: [arm64] @@ -2931,6 +2951,11 @@ packages: cpu: [arm64] os: [freebsd] + '@rollup/rollup-freebsd-arm64@4.56.0': + resolution: {integrity: sha512-bof7fbIlvqsyv/DtaXSck4VYQ9lPtoWNFCB/JY4snlFuJREXfZnm+Ej6yaCHfQvofJDXLDMTVxWscVSuQvVWUQ==} + cpu: [arm64] + os: [freebsd] + '@rollup/rollup-freebsd-x64@4.53.3': resolution: {integrity: sha512-lMfF8X7QhdQzseM6XaX0vbno2m3hlyZFhwcndRMw8fbAGUGL3WFMBdK0hbUBIUYcEcMhVLr1SIamDeuLBnXS+Q==} cpu: [x64] @@ -2941,6 +2966,11 @@ packages: cpu: [x64] os: [freebsd] + '@rollup/rollup-freebsd-x64@4.56.0': + resolution: {integrity: sha512-KNa6lYHloW+7lTEkYGa37fpvPq+NKG/EHKM8+G/g9WDU7ls4sMqbVRV78J6LdNuVaeeK5WB9/9VAFbKxcbXKYg==} + cpu: [x64] + os: [freebsd] + '@rollup/rollup-linux-arm-gnueabihf@4.53.3': resolution: {integrity: sha512-k9oD15soC/Ln6d2Wv/JOFPzZXIAIFLp6B+i14KhxAfnq76ajt0EhYc5YPeX6W1xJkAdItcVT+JhKl1QZh44/qw==} cpu: [arm] @@ -2951,6 +2981,11 @@ packages: cpu: [arm] os: [linux] + '@rollup/rollup-linux-arm-gnueabihf@4.56.0': + resolution: {integrity: sha512-E8jKK87uOvLrrLN28jnAAAChNq5LeCd2mGgZF+fGF5D507WlG/Noct3lP/QzQ6MrqJ5BCKNwI9ipADB6jyiq2A==} + cpu: [arm] + os: [linux] + '@rollup/rollup-linux-arm-musleabihf@4.53.3': resolution: {integrity: sha512-vTNlKq+N6CK/8UktsrFuc+/7NlEYVxgaEgRXVUVK258Z5ymho29skzW1sutgYjqNnquGwVUObAaxae8rZ6YMhg==} cpu: [arm] @@ -2961,6 +2996,11 @@ packages: cpu: [arm] os: [linux] + '@rollup/rollup-linux-arm-musleabihf@4.56.0': + resolution: {integrity: sha512-jQosa5FMYF5Z6prEpTCCmzCXz6eKr/tCBssSmQGEeozA9tkRUty/5Vx06ibaOP9RCrW1Pvb8yp3gvZhHwTDsJw==} + cpu: [arm] + os: [linux] + '@rollup/rollup-linux-arm64-gnu@4.53.3': resolution: {integrity: sha512-RGrFLWgMhSxRs/EWJMIFM1O5Mzuz3Xy3/mnxJp/5cVhZ2XoCAxJnmNsEyeMJtpK+wu0FJFWz+QF4mjCA7AUQ3w==} cpu: [arm64] @@ -2971,6 +3011,11 @@ packages: cpu: [arm64] os: [linux] + '@rollup/rollup-linux-arm64-gnu@4.56.0': + resolution: {integrity: sha512-uQVoKkrC1KGEV6udrdVahASIsaF8h7iLG0U0W+Xn14ucFwi6uS539PsAr24IEF9/FoDtzMeeJXJIBo5RkbNWvQ==} + cpu: [arm64] + os: [linux] + '@rollup/rollup-linux-arm64-musl@4.53.3': resolution: {integrity: sha512-kASyvfBEWYPEwe0Qv4nfu6pNkITLTb32p4yTgzFCocHnJLAHs+9LjUu9ONIhvfT/5lv4YS5muBHyuV84epBo/A==} cpu: [arm64] @@ -2981,6 +3026,11 @@ packages: cpu: [arm64] os: [linux] + '@rollup/rollup-linux-arm64-musl@4.56.0': + resolution: {integrity: sha512-vLZ1yJKLxhQLFKTs42RwTwa6zkGln+bnXc8ueFGMYmBTLfNu58sl5/eXyxRa2RarTkJbXl8TKPgfS6V5ijNqEA==} + cpu: [arm64] + os: [linux] + '@rollup/rollup-linux-loong64-gnu@4.53.3': resolution: {integrity: sha512-JiuKcp2teLJwQ7vkJ95EwESWkNRFJD7TQgYmCnrPtlu50b4XvT5MOmurWNrCj3IFdyjBQ5p9vnrX4JM6I8OE7g==} cpu: [loong64] @@ -2991,11 +3041,21 @@ packages: cpu: [loong64] os: [linux] + '@rollup/rollup-linux-loong64-gnu@4.56.0': + resolution: {integrity: sha512-FWfHOCub564kSE3xJQLLIC/hbKqHSVxy8vY75/YHHzWvbJL7aYJkdgwD/xGfUlL5UV2SB7otapLrcCj2xnF1dg==} + cpu: [loong64] + os: [linux] + '@rollup/rollup-linux-loong64-musl@4.55.1': resolution: {integrity: sha512-MR8c0+UxAlB22Fq4R+aQSPBayvYa3+9DrwG/i1TKQXFYEaoW3B5b/rkSRIypcZDdWjWnpcvxbNaAJDcSbJU3Lw==} cpu: [loong64] os: [linux] + '@rollup/rollup-linux-loong64-musl@4.56.0': + resolution: {integrity: sha512-z1EkujxIh7nbrKL1lmIpqFTc/sr0u8Uk0zK/qIEFldbt6EDKWFk/pxFq3gYj4Bjn3aa9eEhYRlL3H8ZbPT1xvA==} + cpu: [loong64] + os: [linux] + '@rollup/rollup-linux-ppc64-gnu@4.53.3': resolution: {integrity: sha512-EoGSa8nd6d3T7zLuqdojxC20oBfNT8nexBbB/rkxgKj5T5vhpAQKKnD+h3UkoMuTyXkP5jTjK/ccNRmQrPNDuw==} cpu: [ppc64] @@ -3006,11 +3066,21 @@ packages: cpu: [ppc64] os: [linux] + '@rollup/rollup-linux-ppc64-gnu@4.56.0': + resolution: {integrity: sha512-iNFTluqgdoQC7AIE8Q34R3AuPrJGJirj5wMUErxj22deOcY7XwZRaqYmB6ZKFHoVGqRcRd0mqO+845jAibKCkw==} + cpu: [ppc64] + os: [linux] + '@rollup/rollup-linux-ppc64-musl@4.55.1': resolution: {integrity: sha512-ziR1OuZx0vdYZZ30vueNZTg73alF59DicYrPViG0NEgDVN8/Jl87zkAPu4u6VjZST2llgEUjaiNl9JM6HH1Vdw==} cpu: [ppc64] os: [linux] + '@rollup/rollup-linux-ppc64-musl@4.56.0': + resolution: {integrity: sha512-MtMeFVlD2LIKjp2sE2xM2slq3Zxf9zwVuw0jemsxvh1QOpHSsSzfNOTH9uYW9i1MXFxUSMmLpeVeUzoNOKBaWg==} + cpu: [ppc64] + os: [linux] + '@rollup/rollup-linux-riscv64-gnu@4.53.3': resolution: {integrity: sha512-4s+Wped2IHXHPnAEbIB0YWBv7SDohqxobiiPA1FIWZpX+w9o2i4LezzH/NkFUl8LRci/8udci6cLq+jJQlh+0g==} cpu: [riscv64] @@ -3021,6 +3091,11 @@ packages: cpu: [riscv64] os: [linux] + '@rollup/rollup-linux-riscv64-gnu@4.56.0': + resolution: {integrity: sha512-in+v6wiHdzzVhYKXIk5U74dEZHdKN9KH0Q4ANHOTvyXPG41bajYRsy7a8TPKbYPl34hU7PP7hMVHRvv/5aCSew==} + cpu: [riscv64] + os: [linux] + '@rollup/rollup-linux-riscv64-musl@4.53.3': resolution: {integrity: sha512-68k2g7+0vs2u9CxDt5ktXTngsxOQkSEV/xBbwlqYcUrAVh6P9EgMZvFsnHy4SEiUl46Xf0IObWVbMvPrr2gw8A==} cpu: [riscv64] @@ -3031,6 +3106,11 @@ packages: cpu: [riscv64] os: [linux] + '@rollup/rollup-linux-riscv64-musl@4.56.0': + resolution: {integrity: sha512-yni2raKHB8m9NQpI9fPVwN754mn6dHQSbDTwxdr9SE0ks38DTjLMMBjrwvB5+mXrX+C0npX0CVeCUcvvvD8CNQ==} + cpu: [riscv64] + os: [linux] + '@rollup/rollup-linux-s390x-gnu@4.53.3': resolution: {integrity: sha512-VYsFMpULAz87ZW6BVYw3I6sWesGpsP9OPcyKe8ofdg9LHxSbRMd7zrVrr5xi/3kMZtpWL/wC+UIJWJYVX5uTKg==} cpu: [s390x] @@ -3041,6 +3121,11 @@ packages: cpu: [s390x] os: [linux] + '@rollup/rollup-linux-s390x-gnu@4.56.0': + resolution: {integrity: sha512-zhLLJx9nQPu7wezbxt2ut+CI4YlXi68ndEve16tPc/iwoylWS9B3FxpLS2PkmfYgDQtosah07Mj9E0khc3Y+vQ==} + cpu: [s390x] + os: [linux] + '@rollup/rollup-linux-x64-gnu@4.53.3': resolution: {integrity: sha512-3EhFi1FU6YL8HTUJZ51imGJWEX//ajQPfqWLI3BQq4TlvHy4X0MOr5q3D2Zof/ka0d5FNdPwZXm3Yyib/UEd+w==} cpu: [x64] @@ -3051,6 +3136,11 @@ packages: cpu: [x64] os: [linux] + '@rollup/rollup-linux-x64-gnu@4.56.0': + resolution: {integrity: sha512-MVC6UDp16ZSH7x4rtuJPAEoE1RwS8N4oK9DLHy3FTEdFoUTCFVzMfJl/BVJ330C+hx8FfprA5Wqx4FhZXkj2Kw==} + cpu: [x64] + os: [linux] + '@rollup/rollup-linux-x64-musl@4.53.3': resolution: {integrity: sha512-eoROhjcc6HbZCJr+tvVT8X4fW3/5g/WkGvvmwz/88sDtSJzO7r/blvoBDgISDiCjDRZmHpwud7h+6Q9JxFwq1Q==} cpu: [x64] @@ -3061,11 +3151,21 @@ packages: cpu: [x64] os: [linux] + '@rollup/rollup-linux-x64-musl@4.56.0': + resolution: {integrity: sha512-ZhGH1eA4Qv0lxaV00azCIS1ChedK0V32952Md3FtnxSqZTBTd6tgil4nZT5cU8B+SIw3PFYkvyR4FKo2oyZIHA==} + cpu: [x64] + os: [linux] + '@rollup/rollup-openbsd-x64@4.55.1': resolution: {integrity: sha512-eLXw0dOiqE4QmvikfQ6yjgkg/xDM+MdU9YJuP4ySTibXU0oAvnEWXt7UDJmD4UkYialMfOGFPJnIHSe/kdzPxg==} cpu: [x64] os: [openbsd] + '@rollup/rollup-openbsd-x64@4.56.0': + resolution: {integrity: sha512-O16XcmyDeFI9879pEcmtWvD/2nyxR9mF7Gs44lf1vGGx8Vg2DRNx11aVXBEqOQhWb92WN4z7fW/q4+2NYzCbBA==} + cpu: [x64] + os: [openbsd] + '@rollup/rollup-openharmony-arm64@4.53.3': resolution: {integrity: sha512-OueLAWgrNSPGAdUdIjSWXw+u/02BRTcnfw9PN41D2vq/JSEPnJnVuBgw18VkN8wcd4fjUs+jFHVM4t9+kBSNLw==} cpu: [arm64] @@ -3076,6 +3176,11 @@ packages: cpu: [arm64] os: [openharmony] + '@rollup/rollup-openharmony-arm64@4.56.0': + resolution: {integrity: sha512-LhN/Reh+7F3RCgQIRbgw8ZMwUwyqJM+8pXNT6IIJAqm2IdKkzpCh/V9EdgOMBKuebIrzswqy4ATlrDgiOwbRcQ==} + cpu: [arm64] + os: [openharmony] + '@rollup/rollup-win32-arm64-msvc@4.53.3': resolution: {integrity: sha512-GOFuKpsxR/whszbF/bzydebLiXIHSgsEUp6M0JI8dWvi+fFa1TD6YQa4aSZHtpmh2/uAlj/Dy+nmby3TJ3pkTw==} cpu: [arm64] @@ -3086,6 +3191,11 @@ packages: cpu: [arm64] os: [win32] + '@rollup/rollup-win32-arm64-msvc@4.56.0': + resolution: {integrity: sha512-kbFsOObXp3LBULg1d3JIUQMa9Kv4UitDmpS+k0tinPBz3watcUiV2/LUDMMucA6pZO3WGE27P7DsfaN54l9ing==} + cpu: [arm64] + os: [win32] + '@rollup/rollup-win32-ia32-msvc@4.53.3': resolution: {integrity: sha512-iah+THLcBJdpfZ1TstDFbKNznlzoxa8fmnFYK4V67HvmuNYkVdAywJSoteUszvBQ9/HqN2+9AZghbajMsFT+oA==} cpu: [ia32] @@ -3096,6 +3206,11 @@ packages: cpu: [ia32] os: [win32] + '@rollup/rollup-win32-ia32-msvc@4.56.0': + resolution: {integrity: sha512-vSSgny54D6P4vf2izbtFm/TcWYedw7f8eBrOiGGecyHyQB9q4Kqentjaj8hToe+995nob/Wv48pDqL5a62EWtg==} + cpu: [ia32] + os: [win32] + '@rollup/rollup-win32-x64-gnu@4.53.3': resolution: {integrity: sha512-J9QDiOIZlZLdcot5NXEepDkstocktoVjkaKUtqzgzpt2yWjGlbYiKyp05rWwk4nypbYUNoFAztEgixoLaSETkg==} cpu: [x64] @@ -3106,6 +3221,11 @@ packages: cpu: [x64] os: [win32] + '@rollup/rollup-win32-x64-gnu@4.56.0': + resolution: {integrity: sha512-FeCnkPCTHQJFbiGG49KjV5YGW/8b9rrXAM2Mz2kiIoktq2qsJxRD5giEMEOD2lPdgs72upzefaUvS+nc8E3UzQ==} + cpu: [x64] + os: [win32] + '@rollup/rollup-win32-x64-msvc@4.53.3': resolution: {integrity: sha512-UhTd8u31dXadv0MopwGgNOBpUVROFKWVQgAg5N1ESyCz8AuBcMqm4AuTjrwgQKGDfoFuz02EuMRHQIw/frmYKQ==} cpu: [x64] @@ -3116,6 +3236,11 @@ packages: cpu: [x64] os: [win32] + '@rollup/rollup-win32-x64-msvc@4.56.0': + resolution: {integrity: sha512-H8AE9Ur/t0+1VXujj90w0HrSOuv0Nq9r1vSZF2t5km20NTfosQsGGUXDaKdQZzwuLts7IyL1fYT4hM95TI9c4g==} + cpu: [x64] + os: [win32] + '@rushstack/node-core-library@5.7.0': resolution: {integrity: sha512-Ff9Cz/YlWu9ce4dmqNBZpA45AEya04XaBFIjV7xTVeEf+y/kTjEasmozqFELXlNG4ROdevss75JrrZ5WgufDkQ==} peerDependencies: @@ -7230,6 +7355,11 @@ packages: engines: {node: '>=18.0.0', npm: '>=8.0.0'} hasBin: true + rollup@4.56.0: + resolution: {integrity: sha512-9FwVqlgUHzbXtDg9RCMgodF3Ua4Na6Gau+Sdt9vyCN4RhHfVKX2DCHy3BjMLTDd47ITDhYAnTwGulWTblJSDLg==} + engines: {node: '>=18.0.0', npm: '>=8.0.0'} + hasBin: true + rou3@0.7.12: resolution: {integrity: sha512-iFE4hLDuloSWcD7mjdCDhx2bKcIsYbtOTpfH5MHHLSKMOUyjqQXTeZVa289uuwEGEKFoE/BAPbhaU4B774nceg==} @@ -9832,141 +9962,216 @@ snapshots: '@rollup/rollup-android-arm-eabi@4.55.1': optional: true + '@rollup/rollup-android-arm-eabi@4.56.0': + optional: true + '@rollup/rollup-android-arm64@4.53.3': optional: true '@rollup/rollup-android-arm64@4.55.1': optional: true + '@rollup/rollup-android-arm64@4.56.0': + optional: true + '@rollup/rollup-darwin-arm64@4.53.3': optional: true '@rollup/rollup-darwin-arm64@4.55.1': optional: true + '@rollup/rollup-darwin-arm64@4.56.0': + optional: true + '@rollup/rollup-darwin-x64@4.53.3': optional: true '@rollup/rollup-darwin-x64@4.55.1': optional: true + '@rollup/rollup-darwin-x64@4.56.0': + optional: true + '@rollup/rollup-freebsd-arm64@4.53.3': optional: true '@rollup/rollup-freebsd-arm64@4.55.1': optional: true + '@rollup/rollup-freebsd-arm64@4.56.0': + optional: true + '@rollup/rollup-freebsd-x64@4.53.3': optional: true '@rollup/rollup-freebsd-x64@4.55.1': optional: true + '@rollup/rollup-freebsd-x64@4.56.0': + optional: true + '@rollup/rollup-linux-arm-gnueabihf@4.53.3': optional: true '@rollup/rollup-linux-arm-gnueabihf@4.55.1': optional: true + '@rollup/rollup-linux-arm-gnueabihf@4.56.0': + optional: true + '@rollup/rollup-linux-arm-musleabihf@4.53.3': optional: true '@rollup/rollup-linux-arm-musleabihf@4.55.1': optional: true + '@rollup/rollup-linux-arm-musleabihf@4.56.0': + optional: true + '@rollup/rollup-linux-arm64-gnu@4.53.3': optional: true '@rollup/rollup-linux-arm64-gnu@4.55.1': optional: true + '@rollup/rollup-linux-arm64-gnu@4.56.0': + optional: true + '@rollup/rollup-linux-arm64-musl@4.53.3': optional: true '@rollup/rollup-linux-arm64-musl@4.55.1': optional: true + '@rollup/rollup-linux-arm64-musl@4.56.0': + optional: true + '@rollup/rollup-linux-loong64-gnu@4.53.3': optional: true '@rollup/rollup-linux-loong64-gnu@4.55.1': optional: true + '@rollup/rollup-linux-loong64-gnu@4.56.0': + optional: true + '@rollup/rollup-linux-loong64-musl@4.55.1': optional: true + '@rollup/rollup-linux-loong64-musl@4.56.0': + optional: true + '@rollup/rollup-linux-ppc64-gnu@4.53.3': optional: true '@rollup/rollup-linux-ppc64-gnu@4.55.1': optional: true + '@rollup/rollup-linux-ppc64-gnu@4.56.0': + optional: true + '@rollup/rollup-linux-ppc64-musl@4.55.1': optional: true + '@rollup/rollup-linux-ppc64-musl@4.56.0': + optional: true + '@rollup/rollup-linux-riscv64-gnu@4.53.3': optional: true '@rollup/rollup-linux-riscv64-gnu@4.55.1': optional: true + '@rollup/rollup-linux-riscv64-gnu@4.56.0': + optional: true + '@rollup/rollup-linux-riscv64-musl@4.53.3': optional: true '@rollup/rollup-linux-riscv64-musl@4.55.1': optional: true + '@rollup/rollup-linux-riscv64-musl@4.56.0': + optional: true + '@rollup/rollup-linux-s390x-gnu@4.53.3': optional: true '@rollup/rollup-linux-s390x-gnu@4.55.1': optional: true + '@rollup/rollup-linux-s390x-gnu@4.56.0': + optional: true + '@rollup/rollup-linux-x64-gnu@4.53.3': optional: true '@rollup/rollup-linux-x64-gnu@4.55.1': optional: true + '@rollup/rollup-linux-x64-gnu@4.56.0': + optional: true + '@rollup/rollup-linux-x64-musl@4.53.3': optional: true '@rollup/rollup-linux-x64-musl@4.55.1': optional: true + '@rollup/rollup-linux-x64-musl@4.56.0': + optional: true + '@rollup/rollup-openbsd-x64@4.55.1': optional: true + '@rollup/rollup-openbsd-x64@4.56.0': + optional: true + '@rollup/rollup-openharmony-arm64@4.53.3': optional: true '@rollup/rollup-openharmony-arm64@4.55.1': optional: true + '@rollup/rollup-openharmony-arm64@4.56.0': + optional: true + '@rollup/rollup-win32-arm64-msvc@4.53.3': optional: true '@rollup/rollup-win32-arm64-msvc@4.55.1': optional: true + '@rollup/rollup-win32-arm64-msvc@4.56.0': + optional: true + '@rollup/rollup-win32-ia32-msvc@4.53.3': optional: true '@rollup/rollup-win32-ia32-msvc@4.55.1': optional: true + '@rollup/rollup-win32-ia32-msvc@4.56.0': + optional: true + '@rollup/rollup-win32-x64-gnu@4.53.3': optional: true '@rollup/rollup-win32-x64-gnu@4.55.1': optional: true + '@rollup/rollup-win32-x64-gnu@4.56.0': + optional: true + '@rollup/rollup-win32-x64-msvc@4.53.3': optional: true '@rollup/rollup-win32-x64-msvc@4.55.1': optional: true + '@rollup/rollup-win32-x64-msvc@4.56.0': + optional: true + '@rushstack/node-core-library@5.7.0(@types/node@24.10.3)': dependencies: ajv: 8.13.0 @@ -15507,6 +15712,37 @@ snapshots: '@rollup/rollup-win32-x64-msvc': 4.55.1 fsevents: 2.3.3 + rollup@4.56.0: + dependencies: + '@types/estree': 1.0.8 + optionalDependencies: + '@rollup/rollup-android-arm-eabi': 4.56.0 + '@rollup/rollup-android-arm64': 4.56.0 + '@rollup/rollup-darwin-arm64': 4.56.0 + '@rollup/rollup-darwin-x64': 4.56.0 + '@rollup/rollup-freebsd-arm64': 4.56.0 + '@rollup/rollup-freebsd-x64': 4.56.0 + '@rollup/rollup-linux-arm-gnueabihf': 4.56.0 + '@rollup/rollup-linux-arm-musleabihf': 4.56.0 + '@rollup/rollup-linux-arm64-gnu': 4.56.0 + '@rollup/rollup-linux-arm64-musl': 4.56.0 + '@rollup/rollup-linux-loong64-gnu': 4.56.0 + '@rollup/rollup-linux-loong64-musl': 4.56.0 + '@rollup/rollup-linux-ppc64-gnu': 4.56.0 + '@rollup/rollup-linux-ppc64-musl': 4.56.0 + '@rollup/rollup-linux-riscv64-gnu': 4.56.0 + '@rollup/rollup-linux-riscv64-musl': 4.56.0 + '@rollup/rollup-linux-s390x-gnu': 4.56.0 + '@rollup/rollup-linux-x64-gnu': 4.56.0 + '@rollup/rollup-linux-x64-musl': 4.56.0 + '@rollup/rollup-openbsd-x64': 4.56.0 + '@rollup/rollup-openharmony-arm64': 4.56.0 + '@rollup/rollup-win32-arm64-msvc': 4.56.0 + '@rollup/rollup-win32-ia32-msvc': 4.56.0 + '@rollup/rollup-win32-x64-gnu': 4.56.0 + '@rollup/rollup-win32-x64-msvc': 4.56.0 + fsevents: 2.3.3 + rou3@0.7.12: {} router@2.2.0: @@ -16557,7 +16793,7 @@ snapshots: fdir: 6.5.0(picomatch@4.0.3) picomatch: 4.0.3 postcss: 8.5.6 - rollup: 4.55.1 + rollup: 4.56.0 tinyglobby: 0.2.15 optionalDependencies: '@types/node': 24.10.3 @@ -16574,7 +16810,7 @@ snapshots: fdir: 6.5.0(picomatch@4.0.3) picomatch: 4.0.3 postcss: 8.5.6 - rollup: 4.55.1 + rollup: 4.56.0 tinyglobby: 0.2.15 optionalDependencies: '@types/node': 25.0.1