Skip to content

Commit e407b1e

Browse files
committed
fix: improve error handling for LMStudio model compatibility
- Add specific error detection for connection failures - Add model not found error handling - Add context length exceeded error handling - Provide clearer error messages for debugging - Update tests to cover new error scenarios Fixes #8575
1 parent eeaafef commit e407b1e

File tree

2 files changed

+143
-8
lines changed

2 files changed

+143
-8
lines changed

src/api/providers/__tests__/lmstudio.spec.ts

Lines changed: 65 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -114,16 +114,55 @@ describe("LmStudioHandler", () => {
114114
expect(textChunks[0].text).toBe("Test response")
115115
})
116116

117-
it("should handle API errors", async () => {
118-
mockCreate.mockRejectedValueOnce(new Error("API Error"))
117+
it("should handle connection errors", async () => {
118+
const connectionError = new Error("connect ECONNREFUSED 127.0.0.1:1234")
119+
mockCreate.mockRejectedValueOnce(connectionError)
119120

120121
const stream = handler.createMessage(systemPrompt, messages)
121122

122123
await expect(async () => {
123124
for await (const _chunk of stream) {
124125
// Should not reach here
125126
}
126-
}).rejects.toThrow("Please check the LM Studio developer logs to debug what went wrong")
127+
}).rejects.toThrow("Cannot connect to LM Studio at http://localhost:1234")
128+
})
129+
130+
it("should handle model not found errors", async () => {
131+
const modelError = new Error("model 'local-model' not found")
132+
mockCreate.mockRejectedValueOnce(modelError)
133+
134+
const stream = handler.createMessage(systemPrompt, messages)
135+
136+
await expect(async () => {
137+
for await (const _chunk of stream) {
138+
// Should not reach here
139+
}
140+
}).rejects.toThrow('Model "local-model" not found in LM Studio')
141+
})
142+
143+
it("should handle context length errors", async () => {
144+
const contextError = new Error("context length exceeded")
145+
mockCreate.mockRejectedValueOnce(contextError)
146+
147+
const stream = handler.createMessage(systemPrompt, messages)
148+
149+
await expect(async () => {
150+
for await (const _chunk of stream) {
151+
// Should not reach here
152+
}
153+
}).rejects.toThrow("Context length exceeded")
154+
})
155+
156+
it("should handle generic API errors", async () => {
157+
mockCreate.mockRejectedValueOnce(new Error("Unknown API Error"))
158+
159+
const stream = handler.createMessage(systemPrompt, messages)
160+
161+
await expect(async () => {
162+
for await (const _chunk of stream) {
163+
// Should not reach here
164+
}
165+
}).rejects.toThrow("LM Studio completion error")
127166
})
128167
})
129168

@@ -139,13 +178,33 @@ describe("LmStudioHandler", () => {
139178
})
140179
})
141180

142-
it("should handle API errors", async () => {
143-
mockCreate.mockRejectedValueOnce(new Error("API Error"))
181+
it("should handle connection errors", async () => {
182+
const connectionError = new Error("connect ECONNREFUSED 127.0.0.1:1234")
183+
mockCreate.mockRejectedValueOnce(connectionError)
144184
await expect(handler.completePrompt("Test prompt")).rejects.toThrow(
145-
"Please check the LM Studio developer logs to debug what went wrong",
185+
"Cannot connect to LM Studio at http://localhost:1234",
146186
)
147187
})
148188

189+
it("should handle model not found errors", async () => {
190+
const modelError = new Error("model 'local-model' not found")
191+
mockCreate.mockRejectedValueOnce(modelError)
192+
await expect(handler.completePrompt("Test prompt")).rejects.toThrow(
193+
'Model "local-model" not found in LM Studio',
194+
)
195+
})
196+
197+
it("should handle context length errors", async () => {
198+
const contextError = new Error("token limit exceeded")
199+
mockCreate.mockRejectedValueOnce(contextError)
200+
await expect(handler.completePrompt("Test prompt")).rejects.toThrow("Context length exceeded")
201+
})
202+
203+
it("should handle generic API errors", async () => {
204+
mockCreate.mockRejectedValueOnce(new Error("Unknown API Error"))
205+
await expect(handler.completePrompt("Test prompt")).rejects.toThrow("LM Studio completion error")
206+
})
207+
149208
it("should handle empty response", async () => {
150209
mockCreate.mockResolvedValueOnce({
151210
choices: [{ message: { content: "" } }],

src/api/providers/lm-studio.ts

Lines changed: 78 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,38 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
9797
try {
9898
results = await this.client.chat.completions.create(params)
9999
} catch (error) {
100+
// Handle specific error cases
101+
const errorMessage = error instanceof Error ? error.message : String(error)
102+
103+
// Check for connection errors
104+
if (errorMessage.includes("ECONNREFUSED") || errorMessage.includes("ENOTFOUND")) {
105+
throw new Error(
106+
`Cannot connect to LM Studio at ${this.options.lmStudioBaseUrl || "http://localhost:1234"}. Please ensure LM Studio is running and the server is started.`,
107+
)
108+
}
109+
110+
// Check for model not found errors
111+
if (
112+
errorMessage.includes("model") &&
113+
(errorMessage.includes("not found") || errorMessage.includes("does not exist"))
114+
) {
115+
throw new Error(
116+
`Model "${this.getModel().id}" not found in LM Studio. Please ensure the model is loaded in LM Studio.`,
117+
)
118+
}
119+
120+
// Check for context length errors
121+
if (
122+
errorMessage.includes("context") ||
123+
errorMessage.includes("token") ||
124+
errorMessage.includes("length")
125+
) {
126+
throw new Error(
127+
`Context length exceeded for model "${this.getModel().id}". Please load the model with a larger context window in LM Studio, or use a different model that supports longer contexts.`,
128+
)
129+
}
130+
131+
// Use the enhanced error handler for other OpenAI-like errors
100132
throw handleOpenAIError(error, this.providerName)
101133
}
102134

@@ -138,8 +170,14 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
138170
outputTokens,
139171
} as const
140172
} catch (error) {
173+
// If error was already processed and re-thrown above, just re-throw it
174+
if (error instanceof Error && error.message.includes("LM Studio")) {
175+
throw error
176+
}
177+
178+
// Generic fallback error
141179
throw new Error(
142-
"Please check the LM Studio developer logs to debug what went wrong. You may need to load the model with a larger context length to work with Roo Code's prompts.",
180+
`LM Studio error: ${error instanceof Error ? error.message : String(error)}. Please check the LM Studio developer logs for more details.`,
143181
)
144182
}
145183
}
@@ -178,12 +216,50 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
178216
try {
179217
response = await this.client.chat.completions.create(params)
180218
} catch (error) {
219+
// Handle specific error cases
220+
const errorMessage = error instanceof Error ? error.message : String(error)
221+
222+
// Check for connection errors
223+
if (errorMessage.includes("ECONNREFUSED") || errorMessage.includes("ENOTFOUND")) {
224+
throw new Error(
225+
`Cannot connect to LM Studio at ${this.options.lmStudioBaseUrl || "http://localhost:1234"}. Please ensure LM Studio is running and the server is started.`,
226+
)
227+
}
228+
229+
// Check for model not found errors
230+
if (
231+
errorMessage.includes("model") &&
232+
(errorMessage.includes("not found") || errorMessage.includes("does not exist"))
233+
) {
234+
throw new Error(
235+
`Model "${this.getModel().id}" not found in LM Studio. Please ensure the model is loaded in LM Studio.`,
236+
)
237+
}
238+
239+
// Check for context length errors
240+
if (
241+
errorMessage.includes("context") ||
242+
errorMessage.includes("token") ||
243+
errorMessage.includes("length")
244+
) {
245+
throw new Error(
246+
`Context length exceeded for model "${this.getModel().id}". Please load the model with a larger context window in LM Studio, or use a different model that supports longer contexts.`,
247+
)
248+
}
249+
250+
// Use the enhanced error handler for other OpenAI-like errors
181251
throw handleOpenAIError(error, this.providerName)
182252
}
183253
return response.choices[0]?.message.content || ""
184254
} catch (error) {
255+
// If error was already processed and re-thrown above, just re-throw it
256+
if (error instanceof Error && error.message.includes("LM Studio")) {
257+
throw error
258+
}
259+
260+
// Generic fallback error
185261
throw new Error(
186-
"Please check the LM Studio developer logs to debug what went wrong. You may need to load the model with a larger context length to work with Roo Code's prompts.",
262+
`LM Studio error: ${error instanceof Error ? error.message : String(error)}. Please check the LM Studio developer logs for more details.`,
187263
)
188264
}
189265
}

0 commit comments

Comments
 (0)