@@ -12,8 +12,9 @@ import {
12
12
ProviderOptions ,
13
13
} from '../types.js' ;
14
14
15
- // Define model context window sizes for Anthropic models
16
- const ANTHROPIC_MODEL_LIMITS : Record < string , number > = {
15
+ // Fallback model context window sizes for Anthropic models
16
+ // Used only if models.list() call fails or returns incomplete data
17
+ const ANTHROPIC_MODEL_LIMITS_FALLBACK : Record < string , number > = {
17
18
default : 200000 ,
18
19
'claude-3-7-sonnet-20250219' : 200000 ,
19
20
'claude-3-7-sonnet-latest' : 200000 ,
@@ -96,15 +97,27 @@ function addCacheControlToMessages(
96
97
} ) ;
97
98
}
98
99
99
- function tokenUsageFromMessage ( message : Anthropic . Message , model : string ) {
100
+ // Cache for model context window sizes
101
+ const modelContextWindowCache : Record < string , number > = { } ;
102
+
103
+ function tokenUsageFromMessage (
104
+ message : Anthropic . Message ,
105
+ model : string ,
106
+ contextWindow ?: number ,
107
+ ) {
100
108
const usage = new TokenUsage ( ) ;
101
109
usage . input = message . usage . input_tokens ;
102
110
usage . cacheWrites = message . usage . cache_creation_input_tokens ?? 0 ;
103
111
usage . cacheReads = message . usage . cache_read_input_tokens ?? 0 ;
104
112
usage . output = message . usage . output_tokens ;
105
113
106
114
const totalTokens = usage . input + usage . output ;
107
- const maxTokens = ANTHROPIC_MODEL_LIMITS [ model ] || 100000 ; // Default fallback
115
+ // Use provided context window, or fallback to cached value, or use hardcoded fallback
116
+ const maxTokens =
117
+ contextWindow ||
118
+ modelContextWindowCache [ model ] ||
119
+ ANTHROPIC_MODEL_LIMITS_FALLBACK [ model ] ||
120
+ ANTHROPIC_MODEL_LIMITS_FALLBACK . default ;
108
121
109
122
return {
110
123
usage,
@@ -123,6 +136,7 @@ export class AnthropicProvider implements LLMProvider {
123
136
private client : Anthropic ;
124
137
private apiKey : string ;
125
138
private baseUrl ?: string ;
139
+ private modelContextWindow ?: number ;
126
140
127
141
constructor ( model : string , options : AnthropicOptions = { } ) {
128
142
this . model = model ;
@@ -138,6 +152,32 @@ export class AnthropicProvider implements LLMProvider {
138
152
apiKey : this . apiKey ,
139
153
...( this . baseUrl && { baseURL : this . baseUrl } ) ,
140
154
} ) ;
155
+
156
+ // Initialize model context window detection
157
+ this . initializeModelContextWindow ( ) ;
158
+ }
159
+
160
+ /**
161
+ * Fetches the model context window size from the Anthropic API
162
+ */
163
+ private async initializeModelContextWindow ( ) : Promise < void > {
164
+ try {
165
+ const response = await this . client . models . list ( ) ;
166
+ const model = response . data . find ( ( m ) => m . id === this . model ) ;
167
+
168
+ // Using type assertion to access context_window property
169
+ // The Anthropic API returns context_window but it may not be in the TypeScript definitions
170
+ if ( model && 'context_window' in model ) {
171
+ this . modelContextWindow = ( model as any ) . context_window ;
172
+ // Cache the result for future use
173
+ modelContextWindowCache [ this . model ] = ( model as any ) . context_window ;
174
+ }
175
+ } catch ( error ) {
176
+ console . warn (
177
+ `Failed to fetch model context window for ${ this . model } : ${ ( error as Error ) . message } ` ,
178
+ ) ;
179
+ // Will fall back to hardcoded limits
180
+ }
141
181
}
142
182
143
183
/**
@@ -198,7 +238,11 @@ export class AnthropicProvider implements LLMProvider {
198
238
} ;
199
239
} ) ;
200
240
201
- const tokenInfo = tokenUsageFromMessage ( response , this . model ) ;
241
+ const tokenInfo = tokenUsageFromMessage (
242
+ response ,
243
+ this . model ,
244
+ this . modelContextWindow ,
245
+ ) ;
202
246
203
247
return {
204
248
text : content ,
0 commit comments