@@ -24,13 +24,15 @@ import { DEFAULT_HEADERS } from "./constants"
2424import { BaseProvider } from "./base-provider"
2525import type { SingleCompletionHandler , ApiHandlerCreateMessageMetadata } from "../index"
2626import { getApiRequestTimeout } from "./utils/timeout-config"
27+ import { handleOpenAIError } from "./utils/openai-error-handler"
2728
2829// TODO: Rename this to OpenAICompatibleHandler. Also, I think the
2930// `OpenAINativeHandler` can subclass from this, since it's obviously
3031// compatible with the OpenAI API. We can also rename it to `OpenAIHandler`.
3132export class OpenAiHandler extends BaseProvider implements SingleCompletionHandler {
3233 protected options : ApiHandlerOptions
3334 private client : OpenAI
35+ private readonly providerName = "OpenAI"
3436
3537 constructor ( options : ApiHandlerOptions ) {
3638 super ( )
@@ -174,10 +176,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
174176 // Add max_tokens if needed
175177 this . addMaxTokensIfNeeded ( requestOptions , modelInfo )
176178
177- const stream = await this . client . chat . completions . create (
178- requestOptions ,
179- isAzureAiInference ? { path : OPENAI_AZURE_AI_INFERENCE_PATH } : { } ,
180- )
179+ let stream
180+ try {
181+ stream = await this . client . chat . completions . create (
182+ requestOptions ,
183+ isAzureAiInference ? { path : OPENAI_AZURE_AI_INFERENCE_PATH } : { } ,
184+ )
185+ } catch ( error ) {
186+ throw handleOpenAIError ( error , this . providerName )
187+ }
181188
182189 const matcher = new XmlMatcher (
183190 "think" ,
@@ -236,10 +243,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
236243 // Add max_tokens if needed
237244 this . addMaxTokensIfNeeded ( requestOptions , modelInfo )
238245
239- const response = await this . client . chat . completions . create (
240- requestOptions ,
241- this . _isAzureAiInference ( modelUrl ) ? { path : OPENAI_AZURE_AI_INFERENCE_PATH } : { } ,
242- )
246+ let response
247+ try {
248+ response = await this . client . chat . completions . create (
249+ requestOptions ,
250+ this . _isAzureAiInference ( modelUrl ) ? { path : OPENAI_AZURE_AI_INFERENCE_PATH } : { } ,
251+ )
252+ } catch ( error ) {
253+ throw handleOpenAIError ( error , this . providerName )
254+ }
243255
244256 yield {
245257 type : "text" ,
@@ -281,15 +293,20 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
281293 // Add max_tokens if needed
282294 this . addMaxTokensIfNeeded ( requestOptions , modelInfo )
283295
284- const response = await this . client . chat . completions . create (
285- requestOptions ,
286- isAzureAiInference ? { path : OPENAI_AZURE_AI_INFERENCE_PATH } : { } ,
287- )
296+ let response
297+ try {
298+ response = await this . client . chat . completions . create (
299+ requestOptions ,
300+ isAzureAiInference ? { path : OPENAI_AZURE_AI_INFERENCE_PATH } : { } ,
301+ )
302+ } catch ( error ) {
303+ throw handleOpenAIError ( error , this . providerName )
304+ }
288305
289306 return response . choices [ 0 ] ?. message . content || ""
290307 } catch ( error ) {
291308 if ( error instanceof Error ) {
292- throw new Error ( `OpenAI completion error: ${ error . message } ` )
309+ throw new Error ( `${ this . providerName } completion error: ${ error . message } ` )
293310 }
294311
295312 throw error
@@ -327,10 +344,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
327344 // This allows O3 models to limit response length when includeMaxTokens is enabled
328345 this . addMaxTokensIfNeeded ( requestOptions , modelInfo )
329346
330- const stream = await this . client . chat . completions . create (
331- requestOptions ,
332- methodIsAzureAiInference ? { path : OPENAI_AZURE_AI_INFERENCE_PATH } : { } ,
333- )
347+ let stream
348+ try {
349+ stream = await this . client . chat . completions . create (
350+ requestOptions ,
351+ methodIsAzureAiInference ? { path : OPENAI_AZURE_AI_INFERENCE_PATH } : { } ,
352+ )
353+ } catch ( error ) {
354+ throw handleOpenAIError ( error , this . providerName )
355+ }
334356
335357 yield * this . handleStreamResponse ( stream )
336358 } else {
@@ -352,10 +374,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
352374 // This allows O3 models to limit response length when includeMaxTokens is enabled
353375 this . addMaxTokensIfNeeded ( requestOptions , modelInfo )
354376
355- const response = await this . client . chat . completions . create (
356- requestOptions ,
357- methodIsAzureAiInference ? { path : OPENAI_AZURE_AI_INFERENCE_PATH } : { } ,
358- )
377+ let response
378+ try {
379+ response = await this . client . chat . completions . create (
380+ requestOptions ,
381+ methodIsAzureAiInference ? { path : OPENAI_AZURE_AI_INFERENCE_PATH } : { } ,
382+ )
383+ } catch ( error ) {
384+ throw handleOpenAIError ( error , this . providerName )
385+ }
359386
360387 yield {
361388 type : "text" ,
0 commit comments