Skip to content

Commit 00dc34f

Browse files
committed
Wire through token cache statistics
1 parent fb2941a commit 00dc34f

File tree

3 files changed

+23
-1
lines changed

3 files changed

+23
-1
lines changed

src/anthropic-api-types.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,8 @@ export type AnthropicToolChoice =
118118
export type AnthropicStreamUsage = {
119119
input_tokens: number;
120120
output_tokens: number;
121+
cache_creation_input_tokens?: number;
122+
cache_read_input_tokens?: number;
121123
};
122124

123125
export type AnthropicStreamChunk =

src/anthropic-proxy.ts

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -325,6 +325,13 @@ export const createAnthropicProxy = ({
325325
usage: {
326326
input_tokens: usage.inputTokens,
327327
output_tokens: usage.outputTokens,
328+
// OpenAI provides cached tokens via cachedInputTokens or in experimental_providerMetadata
329+
// Map to Anthropic's cache_read_input_tokens
330+
cache_creation_input_tokens: 0, // OpenAI doesn't report cache creation separately
331+
cache_read_input_tokens: usage.cachedInputTokens ??
332+
(typeof (response as any).experimental_providerMetadata?.openai?.cached_tokens === 'number'
333+
? (response as any).experimental_providerMetadata.openai.cached_tokens
334+
: 0),
328335
},
329336
})
330337
);

src/convert-to-anthropic-stream.ts

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,12 @@ export function convertToAnthropicStream(
2727
model: "claude-4-sonnet-20250514",
2828
stop_reason: null,
2929
stop_sequence: null,
30-
usage: { input_tokens: 0, output_tokens: 0 },
30+
usage: {
31+
input_tokens: 0,
32+
output_tokens: 0,
33+
cache_creation_input_tokens: 0,
34+
cache_read_input_tokens: 0,
35+
},
3136
},
3237
});
3338
break;
@@ -42,6 +47,14 @@ export function convertToAnthropicStream(
4247
usage: {
4348
input_tokens: chunk.usage.inputTokens ?? 0,
4449
output_tokens: chunk.usage.outputTokens ?? 0,
50+
// OpenAI provides cached tokens via cachedInputTokens or in providerMetadata
51+
cache_creation_input_tokens: 0, // OpenAI doesn't report cache creation separately
52+
cache_read_input_tokens:
53+
chunk.usage.cachedInputTokens ??
54+
(typeof chunk.providerMetadata?.openai?.cached_tokens ===
55+
"number"
56+
? chunk.providerMetadata.openai.cached_tokens
57+
: 0),
4558
},
4659
});
4760
break;

0 commit comments

Comments
 (0)