Skip to content

Commit 65146b1

Browse files
roomote[bot]roomotedaniel-lxs
authored
fix: add error transform to cryptic openAI SDK errors when API key is invalid (#7586)
Co-authored-by: Roo Code <[email protected]> Co-authored-by: Daniel Riccio <[email protected]>
1 parent 7935c94 commit 65146b1

27 files changed

+251
-75
lines changed

src/api/providers/base-openai-compatible-provider.ts

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ import { convertToOpenAiMessages } from "../transform/openai-format"
1010
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1111
import { DEFAULT_HEADERS } from "./constants"
1212
import { BaseProvider } from "./base-provider"
13+
import { handleOpenAIError } from "./utils/openai-error-handler"
1314

1415
type BaseOpenAiCompatibleProviderOptions<ModelName extends string> = ApiHandlerOptions & {
1516
providerName: string
@@ -86,7 +87,11 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
8687
params.temperature = this.options.modelTemperature
8788
}
8889

89-
return this.client.chat.completions.create(params, requestOptions)
90+
try {
91+
return this.client.chat.completions.create(params, requestOptions)
92+
} catch (error) {
93+
throw handleOpenAIError(error, this.providerName)
94+
}
9095
}
9196

9297
override async *createMessage(
@@ -127,11 +132,7 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
127132

128133
return response.choices[0]?.message.content || ""
129134
} catch (error) {
130-
if (error instanceof Error) {
131-
throw new Error(`${this.providerName} completion error: ${error.message}`)
132-
}
133-
134-
throw error
135+
throw handleOpenAIError(error, this.providerName)
135136
}
136137
}
137138

src/api/providers/huggingface.ts

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,13 @@ import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from ".
88
import { DEFAULT_HEADERS } from "./constants"
99
import { BaseProvider } from "./base-provider"
1010
import { getHuggingFaceModels, getCachedHuggingFaceModels } from "./fetchers/huggingface"
11+
import { handleOpenAIError } from "./utils/openai-error-handler"
1112

1213
export class HuggingFaceHandler extends BaseProvider implements SingleCompletionHandler {
1314
private client: OpenAI
1415
private options: ApiHandlerOptions
1516
private modelCache: ModelRecord | null = null
17+
private readonly providerName = "HuggingFace"
1618

1719
constructor(options: ApiHandlerOptions) {
1820
super()
@@ -64,7 +66,12 @@ export class HuggingFaceHandler extends BaseProvider implements SingleCompletion
6466
params.max_tokens = this.options.modelMaxTokens
6567
}
6668

67-
const stream = await this.client.chat.completions.create(params)
69+
let stream
70+
try {
71+
stream = await this.client.chat.completions.create(params)
72+
} catch (error) {
73+
throw handleOpenAIError(error, this.providerName)
74+
}
6875

6976
for await (const chunk of stream) {
7077
const delta = chunk.choices[0]?.delta
@@ -97,11 +104,7 @@ export class HuggingFaceHandler extends BaseProvider implements SingleCompletion
97104

98105
return response.choices[0]?.message.content || ""
99106
} catch (error) {
100-
if (error instanceof Error) {
101-
throw new Error(`Hugging Face completion error: ${error.message}`)
102-
}
103-
104-
throw error
107+
throw handleOpenAIError(error, this.providerName)
105108
}
106109
}
107110

src/api/providers/lm-studio.ts

Lines changed: 18 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,18 +15,23 @@ import { BaseProvider } from "./base-provider"
1515
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1616
import { getModels, getModelsFromCache } from "./fetchers/modelCache"
1717
import { getApiRequestTimeout } from "./utils/timeout-config"
18+
import { handleOpenAIError } from "./utils/openai-error-handler"
1819

1920
export class LmStudioHandler extends BaseProvider implements SingleCompletionHandler {
2021
protected options: ApiHandlerOptions
2122
private client: OpenAI
23+
private readonly providerName = "LM Studio"
2224

2325
constructor(options: ApiHandlerOptions) {
2426
super()
2527
this.options = options
2628

29+
// LM Studio uses "noop" as a placeholder API key
30+
const apiKey = "noop"
31+
2732
this.client = new OpenAI({
2833
baseURL: (this.options.lmStudioBaseUrl || "http://localhost:1234") + "/v1",
29-
apiKey: "noop",
34+
apiKey: apiKey,
3035
timeout: getApiRequestTimeout(),
3136
})
3237
}
@@ -88,7 +93,12 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
8893
params.draft_model = this.options.lmStudioDraftModelId
8994
}
9095

91-
const results = await this.client.chat.completions.create(params)
96+
let results
97+
try {
98+
results = await this.client.chat.completions.create(params)
99+
} catch (error) {
100+
throw handleOpenAIError(error, this.providerName)
101+
}
92102

93103
const matcher = new XmlMatcher(
94104
"think",
@@ -164,7 +174,12 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
164174
params.draft_model = this.options.lmStudioDraftModelId
165175
}
166176

167-
const response = await this.client.chat.completions.create(params)
177+
let response
178+
try {
179+
response = await this.client.chat.completions.create(params)
180+
} catch (error) {
181+
throw handleOpenAIError(error, this.providerName)
182+
}
168183
return response.choices[0]?.message.content || ""
169184
} catch (error) {
170185
throw new Error(

src/api/providers/ollama.ts

Lines changed: 27 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -14,12 +14,14 @@ import { ApiStream } from "../transform/stream"
1414
import { BaseProvider } from "./base-provider"
1515
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1616
import { getApiRequestTimeout } from "./utils/timeout-config"
17+
import { handleOpenAIError } from "./utils/openai-error-handler"
1718

1819
type CompletionUsage = OpenAI.Chat.Completions.ChatCompletionChunk["usage"]
1920

2021
export class OllamaHandler extends BaseProvider implements SingleCompletionHandler {
2122
protected options: ApiHandlerOptions
2223
private client: OpenAI
24+
private readonly providerName = "Ollama"
2325

2426
constructor(options: ApiHandlerOptions) {
2527
super()
@@ -54,13 +56,18 @@ export class OllamaHandler extends BaseProvider implements SingleCompletionHandl
5456
...(useR1Format ? convertToR1Format(messages) : convertToOpenAiMessages(messages)),
5557
]
5658

57-
const stream = await this.client.chat.completions.create({
58-
model: this.getModel().id,
59-
messages: openAiMessages,
60-
temperature: this.options.modelTemperature ?? 0,
61-
stream: true,
62-
stream_options: { include_usage: true },
63-
})
59+
let stream
60+
try {
61+
stream = await this.client.chat.completions.create({
62+
model: this.getModel().id,
63+
messages: openAiMessages,
64+
temperature: this.options.modelTemperature ?? 0,
65+
stream: true,
66+
stream_options: { include_usage: true },
67+
})
68+
} catch (error) {
69+
throw handleOpenAIError(error, this.providerName)
70+
}
6471
const matcher = new XmlMatcher(
6572
"think",
6673
(chunk) =>
@@ -106,14 +113,19 @@ export class OllamaHandler extends BaseProvider implements SingleCompletionHandl
106113
try {
107114
const modelId = this.getModel().id
108115
const useR1Format = modelId.toLowerCase().includes("deepseek-r1")
109-
const response = await this.client.chat.completions.create({
110-
model: this.getModel().id,
111-
messages: useR1Format
112-
? convertToR1Format([{ role: "user", content: prompt }])
113-
: [{ role: "user", content: prompt }],
114-
temperature: this.options.modelTemperature ?? (useR1Format ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0),
115-
stream: false,
116-
})
116+
let response
117+
try {
118+
response = await this.client.chat.completions.create({
119+
model: this.getModel().id,
120+
messages: useR1Format
121+
? convertToR1Format([{ role: "user", content: prompt }])
122+
: [{ role: "user", content: prompt }],
123+
temperature: this.options.modelTemperature ?? (useR1Format ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0),
124+
stream: false,
125+
})
126+
} catch (error) {
127+
throw handleOpenAIError(error, this.providerName)
128+
}
117129
return response.choices[0]?.message.content || ""
118130
} catch (error) {
119131
if (error instanceof Error) {

src/api/providers/openai.ts

Lines changed: 48 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -24,13 +24,15 @@ import { DEFAULT_HEADERS } from "./constants"
2424
import { BaseProvider } from "./base-provider"
2525
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
2626
import { getApiRequestTimeout } from "./utils/timeout-config"
27+
import { handleOpenAIError } from "./utils/openai-error-handler"
2728

2829
// TODO: Rename this to OpenAICompatibleHandler. Also, I think the
2930
// `OpenAINativeHandler` can subclass from this, since it's obviously
3031
// compatible with the OpenAI API. We can also rename it to `OpenAIHandler`.
3132
export class OpenAiHandler extends BaseProvider implements SingleCompletionHandler {
3233
protected options: ApiHandlerOptions
3334
private client: OpenAI
35+
private readonly providerName = "OpenAI"
3436

3537
constructor(options: ApiHandlerOptions) {
3638
super()
@@ -174,10 +176,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
174176
// Add max_tokens if needed
175177
this.addMaxTokensIfNeeded(requestOptions, modelInfo)
176178

177-
const stream = await this.client.chat.completions.create(
178-
requestOptions,
179-
isAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
180-
)
179+
let stream
180+
try {
181+
stream = await this.client.chat.completions.create(
182+
requestOptions,
183+
isAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
184+
)
185+
} catch (error) {
186+
throw handleOpenAIError(error, this.providerName)
187+
}
181188

182189
const matcher = new XmlMatcher(
183190
"think",
@@ -236,10 +243,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
236243
// Add max_tokens if needed
237244
this.addMaxTokensIfNeeded(requestOptions, modelInfo)
238245

239-
const response = await this.client.chat.completions.create(
240-
requestOptions,
241-
this._isAzureAiInference(modelUrl) ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
242-
)
246+
let response
247+
try {
248+
response = await this.client.chat.completions.create(
249+
requestOptions,
250+
this._isAzureAiInference(modelUrl) ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
251+
)
252+
} catch (error) {
253+
throw handleOpenAIError(error, this.providerName)
254+
}
243255

244256
yield {
245257
type: "text",
@@ -281,15 +293,20 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
281293
// Add max_tokens if needed
282294
this.addMaxTokensIfNeeded(requestOptions, modelInfo)
283295

284-
const response = await this.client.chat.completions.create(
285-
requestOptions,
286-
isAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
287-
)
296+
let response
297+
try {
298+
response = await this.client.chat.completions.create(
299+
requestOptions,
300+
isAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
301+
)
302+
} catch (error) {
303+
throw handleOpenAIError(error, this.providerName)
304+
}
288305

289306
return response.choices[0]?.message.content || ""
290307
} catch (error) {
291308
if (error instanceof Error) {
292-
throw new Error(`OpenAI completion error: ${error.message}`)
309+
throw new Error(`${this.providerName} completion error: ${error.message}`)
293310
}
294311

295312
throw error
@@ -327,10 +344,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
327344
// This allows O3 models to limit response length when includeMaxTokens is enabled
328345
this.addMaxTokensIfNeeded(requestOptions, modelInfo)
329346

330-
const stream = await this.client.chat.completions.create(
331-
requestOptions,
332-
methodIsAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
333-
)
347+
let stream
348+
try {
349+
stream = await this.client.chat.completions.create(
350+
requestOptions,
351+
methodIsAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
352+
)
353+
} catch (error) {
354+
throw handleOpenAIError(error, this.providerName)
355+
}
334356

335357
yield* this.handleStreamResponse(stream)
336358
} else {
@@ -352,10 +374,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
352374
// This allows O3 models to limit response length when includeMaxTokens is enabled
353375
this.addMaxTokensIfNeeded(requestOptions, modelInfo)
354376

355-
const response = await this.client.chat.completions.create(
356-
requestOptions,
357-
methodIsAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
358-
)
377+
let response
378+
try {
379+
response = await this.client.chat.completions.create(
380+
requestOptions,
381+
methodIsAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
382+
)
383+
} catch (error) {
384+
throw handleOpenAIError(error, this.providerName)
385+
}
359386

360387
yield {
361388
type: "text",

src/api/providers/openrouter.ts

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ import { getModelEndpoints } from "./fetchers/modelEndpointCache"
2525
import { DEFAULT_HEADERS } from "./constants"
2626
import { BaseProvider } from "./base-provider"
2727
import type { SingleCompletionHandler } from "../index"
28+
import { handleOpenAIError } from "./utils/openai-error-handler"
2829

2930
// Image generation types
3031
interface ImageGenerationResponse {
@@ -85,6 +86,7 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
8586
private client: OpenAI
8687
protected models: ModelRecord = {}
8788
protected endpoints: ModelRecord = {}
89+
private readonly providerName = "OpenRouter"
8890

8991
constructor(options: ApiHandlerOptions) {
9092
super()
@@ -161,7 +163,12 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
161163
...(reasoning && { reasoning }),
162164
}
163165

164-
const stream = await this.client.chat.completions.create(completionParams)
166+
let stream
167+
try {
168+
stream = await this.client.chat.completions.create(completionParams)
169+
} catch (error) {
170+
throw handleOpenAIError(error, this.providerName)
171+
}
165172

166173
let lastUsage: CompletionUsage | undefined = undefined
167174

@@ -259,7 +266,12 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
259266
...(reasoning && { reasoning }),
260267
}
261268

262-
const response = await this.client.chat.completions.create(completionParams)
269+
let response
270+
try {
271+
response = await this.client.chat.completions.create(completionParams)
272+
} catch (error) {
273+
throw handleOpenAIError(error, this.providerName)
274+
}
263275

264276
if ("error" in response) {
265277
const error = response.error as { message?: string; code?: number }

0 commit comments

Comments
 (0)