1
1
import type { CancellationToken } from 'vscode' ;
2
+ import type { Response } from '@env/fetch' ;
2
3
import type { AIModel } from './aiProviderService' ;
3
- import { getMaxCharacters , getValidatedTemperature } from './aiProviderService' ;
4
- import type { ChatMessage } from './openAICompatibleProvider' ;
5
4
import { OpenAICompatibleProvider } from './openAICompatibleProvider' ;
6
5
7
6
const provider = { id : 'anthropic' , name : 'Anthropic' } as const ;
@@ -98,66 +97,50 @@ export class AnthropicProvider extends OpenAICompatibleProvider<typeof provider.
98
97
} ;
99
98
}
100
99
101
- override async fetch (
100
+ protected override fetchCore (
102
101
model : AIModel < typeof provider . id > ,
103
102
apiKey : string ,
104
- messages : ( maxCodeCharacters : number , retries : number ) => ChatMessage [ ] ,
105
- outputTokens : number ,
103
+ request : object ,
106
104
cancellation : CancellationToken | undefined ,
107
- ) : Promise < [ result : string , maxCodeCharacters : number ] > {
108
- let retries = 0 ;
109
- let maxCodeCharacters = getMaxCharacters ( model , 2600 ) ;
110
-
111
- while ( true ) {
112
- // Split the system message from the rest of the messages
113
- const [ system , ...msgs ] = messages ( maxCodeCharacters , retries ) ;
114
-
115
- const request : AnthropicMessageRequest = {
116
- model : model . id ,
117
- messages : msgs ,
118
- system : system . content ,
119
- stream : false ,
120
- max_tokens : Math . min ( outputTokens , model . maxTokens . output ) ,
121
- temperature : getValidatedTemperature ( model . temperature ) ,
122
- } ;
123
-
124
- const rsp = await this . fetchCore ( model , apiKey , request , cancellation ) ;
125
- if ( ! rsp . ok ) {
126
- if ( rsp . status === 404 ) {
127
- throw new Error ( `Your API key doesn't seem to have access to the selected '${ model . id } ' model` ) ;
128
- }
129
- if ( rsp . status === 429 ) {
130
- throw new Error (
131
- `(${ this . name } ) ${ rsp . status } : Too many requests (rate limit exceeded) or your API key is associated with an expired trial` ,
132
- ) ;
133
- }
134
-
135
- let json ;
136
- try {
137
- json = ( await rsp . json ( ) ) as AnthropicError | undefined ;
138
- } catch { }
105
+ ) : Promise < Response > {
106
+ if ( 'max_completion_tokens' in request ) {
107
+ const { max_completion_tokens : max , ...rest } = request ;
108
+ request = max ? { max_tokens : max , ...rest } : rest ;
109
+ }
110
+ return super . fetchCore ( model , apiKey , request , cancellation ) ;
111
+ }
139
112
140
- debugger ;
113
+ protected override async handleFetchFailure (
114
+ rsp : Response ,
115
+ model : AIModel < typeof provider . id > ,
116
+ retries : number ,
117
+ maxCodeCharacters : number ,
118
+ ) : Promise < { retry : boolean ; maxCodeCharacters : number } > {
119
+ if ( rsp . status === 404 ) {
120
+ throw new Error ( `Your API key doesn't seem to have access to the selected '${ model . id } ' model` ) ;
121
+ }
122
+ if ( rsp . status === 429 ) {
123
+ throw new Error (
124
+ `(${ this . name } ) ${ rsp . status } : Too many requests (rate limit exceeded) or your account is out of funds` ,
125
+ ) ;
126
+ }
141
127
142
- if (
143
- retries ++ < 2 &&
144
- json ?. error ?. type === 'invalid_request_error' &&
145
- json ?. error ?. message ?. includes ( 'prompt is too long' )
146
- ) {
147
- maxCodeCharacters -= 500 * retries ;
148
- continue ;
149
- }
128
+ let json ;
129
+ try {
130
+ json = ( await rsp . json ( ) ) as AnthropicError | undefined ;
131
+ } catch { }
150
132
151
- throw new Error ( `(${ this . name } ) ${ rsp . status } : ${ json ?. error ?. message || rsp . statusText } )` ) ;
152
- }
133
+ debugger ;
153
134
154
- const data : AnthropicMessageResponse = await rsp . json ( ) ;
155
- const result = data . content
156
- . map ( c => c . text )
157
- . join ( '\n ')
158
- . trim ( ) ;
159
- return [ result , maxCodeCharacters ] ;
135
+ if (
136
+ retries ++ < 2 &&
137
+ json ?. error ?. type === 'invalid_request_error' &&
138
+ json ?. error ?. message ?. includes ( 'prompt is too long ')
139
+ ) {
140
+ return { retry : true , maxCodeCharacters : maxCodeCharacters - 500 * retries } ;
160
141
}
142
+
143
+ throw new Error ( `(${ this . name } ) ${ rsp . status } : ${ json ?. error ?. message || rsp . statusText } )` ) ;
161
144
}
162
145
}
163
146
@@ -175,31 +158,3 @@ interface AnthropicError {
175
158
message : string ;
176
159
} ;
177
160
}
178
-
179
- interface AnthropicMessageRequest {
180
- model : AnthropicModel [ 'id' ] ;
181
- messages : ChatMessage [ ] ;
182
- system ?: string ;
183
-
184
- max_tokens : number ;
185
- metadata ?: object ;
186
- stop_sequences ?: string [ ] ;
187
- stream ?: boolean ;
188
- temperature ?: number ;
189
- top_p ?: number ;
190
- top_k ?: number ;
191
- }
192
-
193
- interface AnthropicMessageResponse {
194
- id : string ;
195
- type : 'message' ;
196
- role : 'assistant' ;
197
- content : { type : 'text' ; text : string } [ ] ;
198
- model : string ;
199
- stop_reason : 'end_turn' | 'max_tokens' | 'stop_sequence' ;
200
- stop_sequence : string | null ;
201
- usage : {
202
- input_tokens : number ;
203
- output_tokens : number ;
204
- } ;
205
- }
0 commit comments