Skip to content

Commit 5f8874b

Browse files
committed
feat: show localized 'API key contains invalid characters' at request-time; centralize in openai-error-handler; remove provider/index specifics; i18n: add invalidKeyInvalidChars, remove invalidKeyNonAscii
1 parent ce0fc7a commit 5f8874b

31 files changed

+264
-257
lines changed

src/api/providers/base-openai-compatible-provider.ts

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ import { convertToOpenAiMessages } from "../transform/openai-format"
1010
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1111
import { DEFAULT_HEADERS } from "./constants"
1212
import { BaseProvider } from "./base-provider"
13-
import { validateApiKeyForByteString } from "./utils/api-key-validation"
13+
import { handleOpenAIError } from "./utils/openai-error-handler"
1414

1515
type BaseOpenAiCompatibleProviderOptions<ModelName extends string> = ApiHandlerOptions & {
1616
providerName: string
@@ -56,9 +56,6 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
5656
throw new Error("API key is required")
5757
}
5858

59-
// Validate API key for ByteString compatibility
60-
validateApiKeyForByteString(this.options.apiKey, this.providerName)
61-
6259
this.client = new OpenAI({
6360
baseURL,
6461
apiKey: this.options.apiKey,
@@ -90,7 +87,11 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
9087
params.temperature = this.options.modelTemperature
9188
}
9289

93-
return this.client.chat.completions.create(params, requestOptions)
90+
try {
91+
return this.client.chat.completions.create(params, requestOptions)
92+
} catch (error) {
93+
throw handleOpenAIError(error, this.providerName)
94+
}
9495
}
9596

9697
override async *createMessage(
@@ -131,11 +132,7 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
131132

132133
return response.choices[0]?.message.content || ""
133134
} catch (error) {
134-
if (error instanceof Error) {
135-
throw new Error(`${this.providerName} completion error: ${error.message}`)
136-
}
137-
138-
throw error
135+
throw handleOpenAIError(error, this.providerName)
139136
}
140137
}
141138

src/api/providers/huggingface.ts

Lines changed: 17 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from ".
88
import { DEFAULT_HEADERS } from "./constants"
99
import { BaseProvider } from "./base-provider"
1010
import { getHuggingFaceModels, getCachedHuggingFaceModels } from "./fetchers/huggingface"
11-
import { validateApiKeyForByteString } from "./utils/api-key-validation"
11+
import { createOpenAIClientWithErrorHandling, handleOpenAIError } from "./utils/openai-error-handler"
1212

1313
export class HuggingFaceHandler extends BaseProvider implements SingleCompletionHandler {
1414
private client: OpenAI
@@ -23,14 +23,15 @@ export class HuggingFaceHandler extends BaseProvider implements SingleCompletion
2323
throw new Error("Hugging Face API key is required")
2424
}
2525

26-
// Validate API key for ByteString compatibility
27-
validateApiKeyForByteString(this.options.huggingFaceApiKey, "HuggingFace")
28-
29-
this.client = new OpenAI({
30-
baseURL: "https://router.huggingface.co/v1",
31-
apiKey: this.options.huggingFaceApiKey,
32-
defaultHeaders: DEFAULT_HEADERS,
33-
})
26+
this.client = createOpenAIClientWithErrorHandling(
27+
() =>
28+
new OpenAI({
29+
baseURL: "https://router.huggingface.co/v1",
30+
apiKey: this.options.huggingFaceApiKey,
31+
defaultHeaders: DEFAULT_HEADERS,
32+
}),
33+
"HuggingFace",
34+
)
3435

3536
// Try to get cached models first
3637
this.modelCache = getCachedHuggingFaceModels()
@@ -68,7 +69,12 @@ export class HuggingFaceHandler extends BaseProvider implements SingleCompletion
6869
params.max_tokens = this.options.modelMaxTokens
6970
}
7071

71-
const stream = await this.client.chat.completions.create(params)
72+
let stream
73+
try {
74+
stream = await this.client.chat.completions.create(params)
75+
} catch (error) {
76+
throw handleOpenAIError(error, "HuggingFace")
77+
}
7278

7379
for await (const chunk of stream) {
7480
const delta = chunk.choices[0]?.delta
@@ -101,11 +107,7 @@ export class HuggingFaceHandler extends BaseProvider implements SingleCompletion
101107

102108
return response.choices[0]?.message.content || ""
103109
} catch (error) {
104-
if (error instanceof Error) {
105-
throw new Error(`Hugging Face completion error: ${error.message}`)
106-
}
107-
108-
throw error
110+
throw handleOpenAIError(error, "HuggingFace")
109111
}
110112
}
111113

src/api/providers/lm-studio.ts

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ import { BaseProvider } from "./base-provider"
1515
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1616
import { getModels, getModelsFromCache } from "./fetchers/modelCache"
1717
import { getApiRequestTimeout } from "./utils/timeout-config"
18-
import { validateApiKeyForByteString } from "./utils/api-key-validation"
18+
import { handleOpenAIError } from "./utils/openai-error-handler"
1919

2020
export class LmStudioHandler extends BaseProvider implements SingleCompletionHandler {
2121
protected options: ApiHandlerOptions
@@ -25,9 +25,8 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
2525
super()
2626
this.options = options
2727

28-
// LM Studio uses "noop" as a placeholder API key, but we should still validate if a real key is provided
28+
// LM Studio uses "noop" as a placeholder API key
2929
const apiKey = "noop"
30-
validateApiKeyForByteString(apiKey, "LM Studio")
3130

3231
this.client = new OpenAI({
3332
baseURL: (this.options.lmStudioBaseUrl || "http://localhost:1234") + "/v1",
@@ -93,7 +92,12 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
9392
params.draft_model = this.options.lmStudioDraftModelId
9493
}
9594

96-
const results = await this.client.chat.completions.create(params)
95+
let results
96+
try {
97+
results = await this.client.chat.completions.create(params)
98+
} catch (error) {
99+
throw handleOpenAIError(error, "LM Studio")
100+
}
97101

98102
const matcher = new XmlMatcher(
99103
"think",
@@ -169,7 +173,12 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
169173
params.draft_model = this.options.lmStudioDraftModelId
170174
}
171175

172-
const response = await this.client.chat.completions.create(params)
176+
let response
177+
try {
178+
response = await this.client.chat.completions.create(params)
179+
} catch (error) {
180+
throw handleOpenAIError(error, "LM Studio")
181+
}
173182
return response.choices[0]?.message.content || ""
174183
} catch (error) {
175184
throw new Error(

src/api/providers/ollama.ts

Lines changed: 26 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ import { ApiStream } from "../transform/stream"
1414
import { BaseProvider } from "./base-provider"
1515
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1616
import { getApiRequestTimeout } from "./utils/timeout-config"
17-
import { validateApiKeyForByteString } from "./utils/api-key-validation"
17+
import { handleOpenAIError } from "./utils/openai-error-handler"
1818

1919
type CompletionUsage = OpenAI.Chat.Completions.ChatCompletionChunk["usage"]
2020

@@ -30,9 +30,6 @@ export class OllamaHandler extends BaseProvider implements SingleCompletionHandl
3030
// Otherwise use "ollama" as a placeholder for local instances
3131
const apiKey = this.options.ollamaApiKey || "ollama"
3232

33-
// Validate API key for ByteString compatibility
34-
validateApiKeyForByteString(apiKey, "Ollama")
35-
3633
const headers: Record<string, string> = {}
3734
if (this.options.ollamaApiKey) {
3835
headers["Authorization"] = `Bearer ${this.options.ollamaApiKey}`
@@ -58,13 +55,18 @@ export class OllamaHandler extends BaseProvider implements SingleCompletionHandl
5855
...(useR1Format ? convertToR1Format(messages) : convertToOpenAiMessages(messages)),
5956
]
6057

61-
const stream = await this.client.chat.completions.create({
62-
model: this.getModel().id,
63-
messages: openAiMessages,
64-
temperature: this.options.modelTemperature ?? 0,
65-
stream: true,
66-
stream_options: { include_usage: true },
67-
})
58+
let stream
59+
try {
60+
stream = await this.client.chat.completions.create({
61+
model: this.getModel().id,
62+
messages: openAiMessages,
63+
temperature: this.options.modelTemperature ?? 0,
64+
stream: true,
65+
stream_options: { include_usage: true },
66+
})
67+
} catch (error) {
68+
throw handleOpenAIError(error, "Ollama")
69+
}
6870
const matcher = new XmlMatcher(
6971
"think",
7072
(chunk) =>
@@ -110,14 +112,19 @@ export class OllamaHandler extends BaseProvider implements SingleCompletionHandl
110112
try {
111113
const modelId = this.getModel().id
112114
const useR1Format = modelId.toLowerCase().includes("deepseek-r1")
113-
const response = await this.client.chat.completions.create({
114-
model: this.getModel().id,
115-
messages: useR1Format
116-
? convertToR1Format([{ role: "user", content: prompt }])
117-
: [{ role: "user", content: prompt }],
118-
temperature: this.options.modelTemperature ?? (useR1Format ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0),
119-
stream: false,
120-
})
115+
let response
116+
try {
117+
response = await this.client.chat.completions.create({
118+
model: this.getModel().id,
119+
messages: useR1Format
120+
? convertToR1Format([{ role: "user", content: prompt }])
121+
: [{ role: "user", content: prompt }],
122+
temperature: this.options.modelTemperature ?? (useR1Format ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0),
123+
stream: false,
124+
})
125+
} catch (error) {
126+
throw handleOpenAIError(error, "Ollama")
127+
}
121128
return response.choices[0]?.message.content || ""
122129
} catch (error) {
123130
if (error instanceof Error) {

src/api/providers/openai-native.ts

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ import { getModelParams } from "../transform/model-params"
2222

2323
import { BaseProvider } from "./base-provider"
2424
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
25-
import { validateApiKeyForByteString } from "./utils/api-key-validation"
25+
import { createOpenAIClientWithErrorHandling } from "./utils/openai-error-handler"
2626

2727
export type OpenAiNativeModel = ReturnType<OpenAiNativeHandler["getModel"]>
2828

@@ -61,10 +61,10 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
6161
}
6262
const apiKey = this.options.openAiNativeApiKey ?? "not-provided"
6363

64-
// Validate API key for ByteString compatibility
65-
validateApiKeyForByteString(apiKey, "OpenAI Native")
66-
67-
this.client = new OpenAI({ baseURL: this.options.openAiNativeBaseUrl, apiKey })
64+
this.client = createOpenAIClientWithErrorHandling(
65+
() => new OpenAI({ baseURL: this.options.openAiNativeBaseUrl, apiKey }),
66+
"OpenAI Native",
67+
)
6868
}
6969

7070
private normalizeUsage(usage: any, model: OpenAiNativeModel): ApiStreamUsageChunk | undefined {

src/api/providers/openai.ts

Lines changed: 46 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ import { DEFAULT_HEADERS } from "./constants"
2424
import { BaseProvider } from "./base-provider"
2525
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
2626
import { getApiRequestTimeout } from "./utils/timeout-config"
27-
import { validateApiKeyForByteString } from "./utils/api-key-validation"
27+
import { handleOpenAIError } from "./utils/openai-error-handler"
2828

2929
// TODO: Rename this to OpenAICompatibleHandler. Also, I think the
3030
// `OpenAINativeHandler` can subclass from this, since it's obviously
@@ -43,9 +43,6 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
4343
const urlHost = this._getUrlHost(this.options.openAiBaseUrl)
4444
const isAzureOpenAi = urlHost === "azure.com" || urlHost.endsWith(".azure.com") || options.openAiUseAzure
4545

46-
// Validate API key for ByteString compatibility
47-
validateApiKeyForByteString(apiKey, "OpenAI")
48-
4946
const headers = {
5047
...DEFAULT_HEADERS,
5148
...(this.options.openAiHeaders || {}),
@@ -178,10 +175,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
178175
// Add max_tokens if needed
179176
this.addMaxTokensIfNeeded(requestOptions, modelInfo)
180177

181-
const stream = await this.client.chat.completions.create(
182-
requestOptions,
183-
isAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
184-
)
178+
let stream
179+
try {
180+
stream = await this.client.chat.completions.create(
181+
requestOptions,
182+
isAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
183+
)
184+
} catch (error) {
185+
throw handleOpenAIError(error, "OpenAI")
186+
}
185187

186188
const matcher = new XmlMatcher(
187189
"think",
@@ -240,10 +242,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
240242
// Add max_tokens if needed
241243
this.addMaxTokensIfNeeded(requestOptions, modelInfo)
242244

243-
const response = await this.client.chat.completions.create(
244-
requestOptions,
245-
this._isAzureAiInference(modelUrl) ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
246-
)
245+
let response
246+
try {
247+
response = await this.client.chat.completions.create(
248+
requestOptions,
249+
this._isAzureAiInference(modelUrl) ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
250+
)
251+
} catch (error) {
252+
throw handleOpenAIError(error, "OpenAI")
253+
}
247254

248255
yield {
249256
type: "text",
@@ -285,10 +292,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
285292
// Add max_tokens if needed
286293
this.addMaxTokensIfNeeded(requestOptions, modelInfo)
287294

288-
const response = await this.client.chat.completions.create(
289-
requestOptions,
290-
isAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
291-
)
295+
let response
296+
try {
297+
response = await this.client.chat.completions.create(
298+
requestOptions,
299+
isAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
300+
)
301+
} catch (error) {
302+
throw handleOpenAIError(error, "OpenAI")
303+
}
292304

293305
return response.choices[0]?.message.content || ""
294306
} catch (error) {
@@ -331,10 +343,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
331343
// This allows O3 models to limit response length when includeMaxTokens is enabled
332344
this.addMaxTokensIfNeeded(requestOptions, modelInfo)
333345

334-
const stream = await this.client.chat.completions.create(
335-
requestOptions,
336-
methodIsAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
337-
)
346+
let stream
347+
try {
348+
stream = await this.client.chat.completions.create(
349+
requestOptions,
350+
methodIsAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
351+
)
352+
} catch (error) {
353+
throw handleOpenAIError(error, "OpenAI")
354+
}
338355

339356
yield* this.handleStreamResponse(stream)
340357
} else {
@@ -356,10 +373,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
356373
// This allows O3 models to limit response length when includeMaxTokens is enabled
357374
this.addMaxTokensIfNeeded(requestOptions, modelInfo)
358375

359-
const response = await this.client.chat.completions.create(
360-
requestOptions,
361-
methodIsAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
362-
)
376+
let response
377+
try {
378+
response = await this.client.chat.completions.create(
379+
requestOptions,
380+
methodIsAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {},
381+
)
382+
} catch (error) {
383+
throw handleOpenAIError(error, "OpenAI")
384+
}
363385

364386
yield {
365387
type: "text",

0 commit comments

Comments
 (0)