-
Notifications
You must be signed in to change notification settings - Fork 2.4k
fix: improve error handling for LMStudio model compatibility #8576
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -114,16 +114,55 @@ describe("LmStudioHandler", () => { | |
| expect(textChunks[0].text).toBe("Test response") | ||
| }) | ||
|
|
||
| it("should handle API errors", async () => { | ||
| mockCreate.mockRejectedValueOnce(new Error("API Error")) | ||
| it("should handle connection errors", async () => { | ||
| const connectionError = new Error("connect ECONNREFUSED 127.0.0.1:1234") | ||
| mockCreate.mockRejectedValueOnce(connectionError) | ||
|
|
||
| const stream = handler.createMessage(systemPrompt, messages) | ||
|
|
||
| await expect(async () => { | ||
| for await (const _chunk of stream) { | ||
| // Should not reach here | ||
| } | ||
| }).rejects.toThrow("Please check the LM Studio developer logs to debug what went wrong") | ||
| }).rejects.toThrow("Cannot connect to LM Studio at http://localhost:1234") | ||
| }) | ||
|
|
||
| it("should handle model not found errors", async () => { | ||
| const modelError = new Error("model 'local-model' not found") | ||
| mockCreate.mockRejectedValueOnce(modelError) | ||
|
|
||
| const stream = handler.createMessage(systemPrompt, messages) | ||
|
|
||
| await expect(async () => { | ||
| for await (const _chunk of stream) { | ||
| // Should not reach here | ||
| } | ||
| }).rejects.toThrow('Model "local-model" not found in LM Studio') | ||
| }) | ||
|
|
||
| it("should handle context length errors", async () => { | ||
| const contextError = new Error("context length exceeded") | ||
| mockCreate.mockRejectedValueOnce(contextError) | ||
|
|
||
| const stream = handler.createMessage(systemPrompt, messages) | ||
|
|
||
| await expect(async () => { | ||
| for await (const _chunk of stream) { | ||
| // Should not reach here | ||
| } | ||
| }).rejects.toThrow("Context length exceeded") | ||
| }) | ||
|
|
||
| it("should handle generic API errors", async () => { | ||
| mockCreate.mockRejectedValueOnce(new Error("Unknown API Error")) | ||
|
|
||
| const stream = handler.createMessage(systemPrompt, messages) | ||
|
|
||
| await expect(async () => { | ||
| for await (const _chunk of stream) { | ||
| // Should not reach here | ||
| } | ||
| }).rejects.toThrow("LM Studio completion error") | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The test expects "LM Studio completion error" but based on the implementation, the error "Unknown API Error" doesn't match connection/model/context patterns, so it gets passed to |
||
| }) | ||
|
Comment on lines
+156
to
166
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. P3: Missing test coverage for false positives The tests don't verify that the error detection correctly distinguishes between different error types. Add tests for errors that contain "token", "context", or "length" but aren't actually context length errors, such as:
These should NOT trigger the context length error message.
Comment on lines
+117
to
166
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The tests don't verify that the error detection correctly distinguishes between different error types. Add tests for errors that contain "token", "context", or "length" but aren't actually context length errors, such as "Invalid token format", "Authentication token expired", or "Response length mismatch". These should NOT trigger the context length error message. |
||
| }) | ||
|
|
||
|
|
@@ -139,13 +178,33 @@ describe("LmStudioHandler", () => { | |
| }) | ||
| }) | ||
|
|
||
| it("should handle API errors", async () => { | ||
| mockCreate.mockRejectedValueOnce(new Error("API Error")) | ||
| it("should handle connection errors", async () => { | ||
| const connectionError = new Error("connect ECONNREFUSED 127.0.0.1:1234") | ||
| mockCreate.mockRejectedValueOnce(connectionError) | ||
| await expect(handler.completePrompt("Test prompt")).rejects.toThrow( | ||
| "Please check the LM Studio developer logs to debug what went wrong", | ||
| "Cannot connect to LM Studio at http://localhost:1234", | ||
| ) | ||
| }) | ||
|
|
||
| it("should handle model not found errors", async () => { | ||
| const modelError = new Error("model 'local-model' not found") | ||
| mockCreate.mockRejectedValueOnce(modelError) | ||
| await expect(handler.completePrompt("Test prompt")).rejects.toThrow( | ||
| 'Model "local-model" not found in LM Studio', | ||
| ) | ||
| }) | ||
|
|
||
| it("should handle context length errors", async () => { | ||
| const contextError = new Error("token limit exceeded") | ||
| mockCreate.mockRejectedValueOnce(contextError) | ||
| await expect(handler.completePrompt("Test prompt")).rejects.toThrow("Context length exceeded") | ||
| }) | ||
|
|
||
| it("should handle generic API errors", async () => { | ||
| mockCreate.mockRejectedValueOnce(new Error("Unknown API Error")) | ||
| await expect(handler.completePrompt("Test prompt")).rejects.toThrow("LM Studio completion error") | ||
| }) | ||
|
|
||
| it("should handle empty response", async () => { | ||
| mockCreate.mockResolvedValueOnce({ | ||
| choices: [{ message: { content: "" } }], | ||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -97,6 +97,38 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan | |||||||||||||||||||||||||||||||||||||||
| try { | ||||||||||||||||||||||||||||||||||||||||
| results = await this.client.chat.completions.create(params) | ||||||||||||||||||||||||||||||||||||||||
| } catch (error) { | ||||||||||||||||||||||||||||||||||||||||
| // Handle specific error cases | ||||||||||||||||||||||||||||||||||||||||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Consider refactoring duplicated error handling logic into a helper function. The same error checks (connection errors, model not found, and context length) appear in both |
||||||||||||||||||||||||||||||||||||||||
| const errorMessage = error instanceof Error ? error.message : String(error) | ||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||
| // Check for connection errors | ||||||||||||||||||||||||||||||||||||||||
| if (errorMessage.includes("ECONNREFUSED") || errorMessage.includes("ENOTFOUND")) { | ||||||||||||||||||||||||||||||||||||||||
| throw new Error( | ||||||||||||||||||||||||||||||||||||||||
| `Cannot connect to LM Studio at ${this.options.lmStudioBaseUrl || "http://localhost:1234"}. Please ensure LM Studio is running and the server is started.`, | ||||||||||||||||||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||
| // Check for model not found errors | ||||||||||||||||||||||||||||||||||||||||
| if ( | ||||||||||||||||||||||||||||||||||||||||
| errorMessage.includes("model") && | ||||||||||||||||||||||||||||||||||||||||
| (errorMessage.includes("not found") || errorMessage.includes("does not exist")) | ||||||||||||||||||||||||||||||||||||||||
| ) { | ||||||||||||||||||||||||||||||||||||||||
| throw new Error( | ||||||||||||||||||||||||||||||||||||||||
| `Model "${this.getModel().id}" not found in LM Studio. Please ensure the model is loaded in LM Studio.`, | ||||||||||||||||||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||
| // Check for context length errors | ||||||||||||||||||||||||||||||||||||||||
| if ( | ||||||||||||||||||||||||||||||||||||||||
| errorMessage.includes("context") || | ||||||||||||||||||||||||||||||||||||||||
| errorMessage.includes("token") || | ||||||||||||||||||||||||||||||||||||||||
| errorMessage.includes("length") | ||||||||||||||||||||||||||||||||||||||||
| ) { | ||||||||||||||||||||||||||||||||||||||||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. P1: Overly broad error detection pattern The context length error detection will incorrectly match ANY error containing "context", "token", or "length". This creates false positives for unrelated errors like:
Consider using more specific patterns that match actual LM Studio context length errors, such as checking for phrases like "context length exceeded", "maximum context", or "token limit".
Suggested change
Comment on lines
+120
to
+125
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The context length error detection will incorrectly match ANY error containing "context", "token", or "length". This creates false positives for unrelated errors like "Invalid token format", "Response length too short", or "Authentication token expired". Consider using more specific patterns that match actual LM Studio context length errors, such as checking for phrases like "context length exceeded", "maximum context", or "token limit".
Suggested change
|
||||||||||||||||||||||||||||||||||||||||
| throw new Error( | ||||||||||||||||||||||||||||||||||||||||
| `Context length exceeded for model "${this.getModel().id}". Please load the model with a larger context window in LM Studio, or use a different model that supports longer contexts.`, | ||||||||||||||||||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||
| // Use the enhanced error handler for other OpenAI-like errors | ||||||||||||||||||||||||||||||||||||||||
| throw handleOpenAIError(error, this.providerName) | ||||||||||||||||||||||||||||||||||||||||
|
Comment on lines
+100
to
132
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The error handling logic is duplicated in |
||||||||||||||||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||||||||||||||
|
Comment on lines
99
to
133
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. P2: Code duplication - extract error handling logic The error handling logic (lines 100-132) is duplicated in |
||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||
|
|
@@ -138,8 +170,14 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan | |||||||||||||||||||||||||||||||||||||||
| outputTokens, | ||||||||||||||||||||||||||||||||||||||||
| } as const | ||||||||||||||||||||||||||||||||||||||||
| } catch (error) { | ||||||||||||||||||||||||||||||||||||||||
| // If error was already processed and re-thrown above, just re-throw it | ||||||||||||||||||||||||||||||||||||||||
| if (error instanceof Error && error.message.includes("LM Studio")) { | ||||||||||||||||||||||||||||||||||||||||
| throw error | ||||||||||||||||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||
| // Generic fallback error | ||||||||||||||||||||||||||||||||||||||||
| throw new Error( | ||||||||||||||||||||||||||||||||||||||||
| "Please check the LM Studio developer logs to debug what went wrong. You may need to load the model with a larger context length to work with Roo Code's prompts.", | ||||||||||||||||||||||||||||||||||||||||
| `LM Studio error: ${error instanceof Error ? error.message : String(error)}. Please check the LM Studio developer logs for more details.`, | ||||||||||||||||||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||||||||||||||||||
|
Comment on lines
172
to
181
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This catch block re-checks if the error message contains "LM Studio", but the specific error handlers above already throw errors with "LM Studio" in the message (connection, model not found, context errors), and |
||||||||||||||||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||||||||||||||
|
Comment on lines
172
to
182
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. P2: Inconsistent error handling flow This catch block re-checks if the error message contains "LM Studio", but the specific error handlers above already throw errors with "LM Studio" in the message. This means:
However, The generic fallback at lines 179-181 appears unreachable. Consider simplifying this logic or removing the unreachable code. |
||||||||||||||||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||||||||||||||
|
|
@@ -178,12 +216,50 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan | |||||||||||||||||||||||||||||||||||||||
| try { | ||||||||||||||||||||||||||||||||||||||||
| response = await this.client.chat.completions.create(params) | ||||||||||||||||||||||||||||||||||||||||
| } catch (error) { | ||||||||||||||||||||||||||||||||||||||||
| // Handle specific error cases | ||||||||||||||||||||||||||||||||||||||||
| const errorMessage = error instanceof Error ? error.message : String(error) | ||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||
| // Check for connection errors | ||||||||||||||||||||||||||||||||||||||||
| if (errorMessage.includes("ECONNREFUSED") || errorMessage.includes("ENOTFOUND")) { | ||||||||||||||||||||||||||||||||||||||||
| throw new Error( | ||||||||||||||||||||||||||||||||||||||||
| `Cannot connect to LM Studio at ${this.options.lmStudioBaseUrl || "http://localhost:1234"}. Please ensure LM Studio is running and the server is started.`, | ||||||||||||||||||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||
| // Check for model not found errors | ||||||||||||||||||||||||||||||||||||||||
| if ( | ||||||||||||||||||||||||||||||||||||||||
| errorMessage.includes("model") && | ||||||||||||||||||||||||||||||||||||||||
| (errorMessage.includes("not found") || errorMessage.includes("does not exist")) | ||||||||||||||||||||||||||||||||||||||||
| ) { | ||||||||||||||||||||||||||||||||||||||||
| throw new Error( | ||||||||||||||||||||||||||||||||||||||||
| `Model "${this.getModel().id}" not found in LM Studio. Please ensure the model is loaded in LM Studio.`, | ||||||||||||||||||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||
| // Check for context length errors | ||||||||||||||||||||||||||||||||||||||||
| if ( | ||||||||||||||||||||||||||||||||||||||||
| errorMessage.includes("context") || | ||||||||||||||||||||||||||||||||||||||||
| errorMessage.includes("token") || | ||||||||||||||||||||||||||||||||||||||||
| errorMessage.includes("length") | ||||||||||||||||||||||||||||||||||||||||
| ) { | ||||||||||||||||||||||||||||||||||||||||
| throw new Error( | ||||||||||||||||||||||||||||||||||||||||
| `Context length exceeded for model "${this.getModel().id}". Please load the model with a larger context window in LM Studio, or use a different model that supports longer contexts.`, | ||||||||||||||||||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||
| // Use the enhanced error handler for other OpenAI-like errors | ||||||||||||||||||||||||||||||||||||||||
| throw handleOpenAIError(error, this.providerName) | ||||||||||||||||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||||||||||||||
| return response.choices[0]?.message.content || "" | ||||||||||||||||||||||||||||||||||||||||
| } catch (error) { | ||||||||||||||||||||||||||||||||||||||||
| // If error was already processed and re-thrown above, just re-throw it | ||||||||||||||||||||||||||||||||||||||||
| if (error instanceof Error && error.message.includes("LM Studio")) { | ||||||||||||||||||||||||||||||||||||||||
| throw error | ||||||||||||||||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||
| // Generic fallback error | ||||||||||||||||||||||||||||||||||||||||
| throw new Error( | ||||||||||||||||||||||||||||||||||||||||
| "Please check the LM Studio developer logs to debug what went wrong. You may need to load the model with a larger context length to work with Roo Code's prompts.", | ||||||||||||||||||||||||||||||||||||||||
| `LM Studio error: ${error instanceof Error ? error.message : String(error)}. Please check the LM Studio developer logs for more details.`, | ||||||||||||||||||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
P3: Test expects wrong error message
The test expects
"LM Studio completion error"but based on the implementation:handleOpenAIError()which returns"LM Studio completion error: Unknown API Error""LM Studio completion error: Unknown API Error"The test should verify the complete error message or use
.toMatch()for partial matching.