File tree Expand file tree Collapse file tree 3 files changed +23
-1
lines changed Expand file tree Collapse file tree 3 files changed +23
-1
lines changed Original file line number Diff line number Diff line change @@ -118,6 +118,8 @@ export type AnthropicToolChoice =
118
118
export type AnthropicStreamUsage = {
119
119
input_tokens : number ;
120
120
output_tokens : number ;
121
+ cache_creation_input_tokens ?: number ;
122
+ cache_read_input_tokens ?: number ;
121
123
} ;
122
124
123
125
export type AnthropicStreamChunk =
Original file line number Diff line number Diff line change @@ -325,6 +325,13 @@ export const createAnthropicProxy = ({
325
325
usage : {
326
326
input_tokens : usage . inputTokens ,
327
327
output_tokens : usage . outputTokens ,
328
+ // OpenAI provides cached tokens via cachedInputTokens or in experimental_providerMetadata
329
+ // Map to Anthropic's cache_read_input_tokens
330
+ cache_creation_input_tokens : 0 , // OpenAI doesn't report cache creation separately
331
+ cache_read_input_tokens : usage . cachedInputTokens ??
332
+ ( typeof ( response as any ) . experimental_providerMetadata ?. openai ?. cached_tokens === 'number'
333
+ ? ( response as any ) . experimental_providerMetadata . openai . cached_tokens
334
+ : 0 ) ,
328
335
} ,
329
336
} )
330
337
) ;
Original file line number Diff line number Diff line change @@ -27,7 +27,12 @@ export function convertToAnthropicStream(
27
27
model : "claude-4-sonnet-20250514" ,
28
28
stop_reason : null ,
29
29
stop_sequence : null ,
30
- usage : { input_tokens : 0 , output_tokens : 0 } ,
30
+ usage : {
31
+ input_tokens : 0 ,
32
+ output_tokens : 0 ,
33
+ cache_creation_input_tokens : 0 ,
34
+ cache_read_input_tokens : 0 ,
35
+ } ,
31
36
} ,
32
37
} ) ;
33
38
break ;
@@ -42,6 +47,14 @@ export function convertToAnthropicStream(
42
47
usage : {
43
48
input_tokens : chunk . usage . inputTokens ?? 0 ,
44
49
output_tokens : chunk . usage . outputTokens ?? 0 ,
50
+ // OpenAI provides cached tokens via cachedInputTokens or in providerMetadata
51
+ cache_creation_input_tokens : 0 , // OpenAI doesn't report cache creation separately
52
+ cache_read_input_tokens :
53
+ chunk . usage . cachedInputTokens ??
54
+ ( typeof chunk . providerMetadata ?. openai ?. cached_tokens ===
55
+ "number"
56
+ ? chunk . providerMetadata . openai . cached_tokens
57
+ : 0 ) ,
45
58
} ,
46
59
} ) ;
47
60
break ;
You can’t perform that action at this time.
0 commit comments