Skip to content

Commit 29d4012

Browse files
committed
feat: properly internationalize all Ollama error messages
- Added t() function import from i18n module - Replaced all 8 hardcoded English error messages with translation keys - Updated tests to expect translation keys instead of English text - All translations already exist in 18 language files - Tests passing: 5/5 in native-ollama.spec.ts
1 parent 6a93ccd commit 29d4012

File tree

2 files changed

+20
-12
lines changed

2 files changed

+20
-12
lines changed

src/api/providers/__tests__/native-ollama.spec.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ describe("NativeOllamaHandler", () => {
106106
for await (const _ of stream) {
107107
// consume stream
108108
}
109-
}).rejects.toThrow("Ollama service is not running")
109+
}).rejects.toThrow("errors.ollama.serviceNotRunning")
110110
})
111111

112112
it("should handle model not found errors", async () => {
@@ -120,7 +120,7 @@ describe("NativeOllamaHandler", () => {
120120
for await (const _ of stream) {
121121
// consume stream
122122
}
123-
}).rejects.toThrow("Model llama2 not found in Ollama")
123+
}).rejects.toThrow("errors.ollama.modelNotFound")
124124
})
125125
})
126126

src/api/providers/native-ollama.ts

Lines changed: 18 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import type { ApiHandlerOptions } from "../../shared/api"
77
import { getOllamaModels } from "./fetchers/ollama"
88
import { XmlMatcher } from "../../utils/xml-matcher"
99
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
10+
import { t } from "../../i18n"
1011

1112
const TOKEN_ESTIMATION_FACTOR = 4 // Industry standard technique for estimating token counts without actually implementing a parser/tokenizer
1213

@@ -171,7 +172,7 @@ export class NativeOllamaHandler extends BaseProvider implements SingleCompletio
171172

172173
this.client = new Ollama(clientOptions)
173174
} catch (error: any) {
174-
throw new Error(`Error creating Ollama client: ${error.message}`)
175+
throw new Error(t("common:errors.ollama.clientCreationError", { error: error.message }))
175176
}
176177
}
177178
return this.client
@@ -199,7 +200,7 @@ export class NativeOllamaHandler extends BaseProvider implements SingleCompletio
199200
const estimatedTokenCount = estimateOllamaTokenCount(ollamaMessages)
200201
if (modelInfo.maxTokens && estimatedTokenCount > modelInfo.maxTokens) {
201202
throw new Error(
202-
`Input message is too long for the selected model. Estimated tokens: ${estimatedTokenCount}, Max tokens: ${modelInfo.maxTokens}. To increase the context window size, please set the OLLAMA_NUM_CTX environment variable or see Ollama documentation.`,
203+
t("common:errors.ollama.inputTooLong", { estimatedTokenCount, maxTokens: modelInfo.maxTokens }),
203204
)
204205
}
205206

@@ -261,7 +262,11 @@ export class NativeOllamaHandler extends BaseProvider implements SingleCompletio
261262
}
262263
} catch (streamError: any) {
263264
console.error("Error processing Ollama stream:", streamError)
264-
throw new Error(`Ollama stream processing error: ${streamError.message || "Unknown error"}`)
265+
throw new Error(
266+
t("common:errors.ollama.streamProcessingError", {
267+
error: streamError.message || t("common:errors.ollama.unknownError"),
268+
}),
269+
)
265270
}
266271
} catch (error: any) {
267272
// Enhance error reporting
@@ -270,12 +275,12 @@ export class NativeOllamaHandler extends BaseProvider implements SingleCompletio
270275

271276
if (error.code === "ECONNREFUSED") {
272277
throw new Error(
273-
`Ollama service is not running at ${this.options.ollamaBaseUrl || "http://localhost:11434"}. Please start Ollama first.`,
278+
t("common:errors.ollama.serviceNotRunning", {
279+
baseUrl: this.options.ollamaBaseUrl || "http://localhost:11434",
280+
}),
274281
)
275282
} else if (statusCode === 404) {
276-
throw new Error(
277-
`Model ${this.getModel().id} not found in Ollama. Please pull the model first with: ollama pull ${this.getModel().id}`,
278-
)
283+
throw new Error(t("common:errors.ollama.modelNotFound", { modelId: this.getModel().id }))
279284
}
280285

281286
console.error(`Ollama API error (${statusCode || "unknown"}): ${errorMessage}`)
@@ -296,8 +301,11 @@ export class NativeOllamaHandler extends BaseProvider implements SingleCompletio
296301
const availableModels = Object.keys(this.models)
297302
const errorMessage =
298303
availableModels.length > 0
299-
? `Model ${modelId} not found. Available models: ${availableModels.join(", ")}`
300-
: `Model ${modelId} not found. No models available. Please pull the model first with: ollama pull ${modelId}`
304+
? t("common:errors.ollama.modelNotFoundWithAvailable", {
305+
modelId,
306+
availableModels: availableModels.join(", "),
307+
})
308+
: t("common:errors.ollama.modelNotFoundNoModels", { modelId })
301309
throw new Error(errorMessage)
302310
}
303311

@@ -329,7 +337,7 @@ export class NativeOllamaHandler extends BaseProvider implements SingleCompletio
329337
return response.message?.content || ""
330338
} catch (error) {
331339
if (error instanceof Error) {
332-
throw new Error(`Ollama completion error: ${error.message}`)
340+
throw new Error(t("common:errors.ollama.completionError", { error: error.message }))
333341
}
334342
throw error
335343
}

0 commit comments

Comments
 (0)