diff --git a/src/api/providers/__tests__/lmstudio.spec.ts b/src/api/providers/__tests__/lmstudio.spec.ts index 0adebdeea7ac..bfecf3403662 100644 --- a/src/api/providers/__tests__/lmstudio.spec.ts +++ b/src/api/providers/__tests__/lmstudio.spec.ts @@ -114,8 +114,9 @@ describe("LmStudioHandler", () => { expect(textChunks[0].text).toBe("Test response") }) - it("should handle API errors", async () => { - mockCreate.mockRejectedValueOnce(new Error("API Error")) + it("should handle connection errors", async () => { + const connectionError = new Error("connect ECONNREFUSED 127.0.0.1:1234") + mockCreate.mockRejectedValueOnce(connectionError) const stream = handler.createMessage(systemPrompt, messages) @@ -123,7 +124,45 @@ describe("LmStudioHandler", () => { for await (const _chunk of stream) { // Should not reach here } - }).rejects.toThrow("Please check the LM Studio developer logs to debug what went wrong") + }).rejects.toThrow("Cannot connect to LM Studio at http://localhost:1234") + }) + + it("should handle model not found errors", async () => { + const modelError = new Error("model 'local-model' not found") + mockCreate.mockRejectedValueOnce(modelError) + + const stream = handler.createMessage(systemPrompt, messages) + + await expect(async () => { + for await (const _chunk of stream) { + // Should not reach here + } + }).rejects.toThrow('Model "local-model" not found in LM Studio') + }) + + it("should handle context length errors", async () => { + const contextError = new Error("context length exceeded") + mockCreate.mockRejectedValueOnce(contextError) + + const stream = handler.createMessage(systemPrompt, messages) + + await expect(async () => { + for await (const _chunk of stream) { + // Should not reach here + } + }).rejects.toThrow("Context length exceeded") + }) + + it("should handle generic API errors", async () => { + mockCreate.mockRejectedValueOnce(new Error("Unknown API Error")) + + const stream = handler.createMessage(systemPrompt, messages) + + await expect(async () => { + for await (const _chunk of stream) { + // Should not reach here + } + }).rejects.toThrow("LM Studio completion error") }) }) @@ -139,13 +178,33 @@ describe("LmStudioHandler", () => { }) }) - it("should handle API errors", async () => { - mockCreate.mockRejectedValueOnce(new Error("API Error")) + it("should handle connection errors", async () => { + const connectionError = new Error("connect ECONNREFUSED 127.0.0.1:1234") + mockCreate.mockRejectedValueOnce(connectionError) await expect(handler.completePrompt("Test prompt")).rejects.toThrow( - "Please check the LM Studio developer logs to debug what went wrong", + "Cannot connect to LM Studio at http://localhost:1234", ) }) + it("should handle model not found errors", async () => { + const modelError = new Error("model 'local-model' not found") + mockCreate.mockRejectedValueOnce(modelError) + await expect(handler.completePrompt("Test prompt")).rejects.toThrow( + 'Model "local-model" not found in LM Studio', + ) + }) + + it("should handle context length errors", async () => { + const contextError = new Error("token limit exceeded") + mockCreate.mockRejectedValueOnce(contextError) + await expect(handler.completePrompt("Test prompt")).rejects.toThrow("Context length exceeded") + }) + + it("should handle generic API errors", async () => { + mockCreate.mockRejectedValueOnce(new Error("Unknown API Error")) + await expect(handler.completePrompt("Test prompt")).rejects.toThrow("LM Studio completion error") + }) + it("should handle empty response", async () => { mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: "" } }], diff --git a/src/api/providers/lm-studio.ts b/src/api/providers/lm-studio.ts index 6c58a96ae1fa..de7aaa96dbd1 100644 --- a/src/api/providers/lm-studio.ts +++ b/src/api/providers/lm-studio.ts @@ -97,6 +97,38 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan try { results = await this.client.chat.completions.create(params) } catch (error) { + // Handle specific error cases + const errorMessage = error instanceof Error ? error.message : String(error) + + // Check for connection errors + if (errorMessage.includes("ECONNREFUSED") || errorMessage.includes("ENOTFOUND")) { + throw new Error( + `Cannot connect to LM Studio at ${this.options.lmStudioBaseUrl || "http://localhost:1234"}. Please ensure LM Studio is running and the server is started.`, + ) + } + + // Check for model not found errors + if ( + errorMessage.includes("model") && + (errorMessage.includes("not found") || errorMessage.includes("does not exist")) + ) { + throw new Error( + `Model "${this.getModel().id}" not found in LM Studio. Please ensure the model is loaded in LM Studio.`, + ) + } + + // Check for context length errors + if ( + errorMessage.includes("context") || + errorMessage.includes("token") || + errorMessage.includes("length") + ) { + throw new Error( + `Context length exceeded for model "${this.getModel().id}". Please load the model with a larger context window in LM Studio, or use a different model that supports longer contexts.`, + ) + } + + // Use the enhanced error handler for other OpenAI-like errors throw handleOpenAIError(error, this.providerName) } @@ -138,8 +170,14 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan outputTokens, } as const } catch (error) { + // If error was already processed and re-thrown above, just re-throw it + if (error instanceof Error && error.message.includes("LM Studio")) { + throw error + } + + // Generic fallback error throw new Error( - "Please check the LM Studio developer logs to debug what went wrong. You may need to load the model with a larger context length to work with Roo Code's prompts.", + `LM Studio error: ${error instanceof Error ? error.message : String(error)}. Please check the LM Studio developer logs for more details.`, ) } } @@ -178,12 +216,50 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan try { response = await this.client.chat.completions.create(params) } catch (error) { + // Handle specific error cases + const errorMessage = error instanceof Error ? error.message : String(error) + + // Check for connection errors + if (errorMessage.includes("ECONNREFUSED") || errorMessage.includes("ENOTFOUND")) { + throw new Error( + `Cannot connect to LM Studio at ${this.options.lmStudioBaseUrl || "http://localhost:1234"}. Please ensure LM Studio is running and the server is started.`, + ) + } + + // Check for model not found errors + if ( + errorMessage.includes("model") && + (errorMessage.includes("not found") || errorMessage.includes("does not exist")) + ) { + throw new Error( + `Model "${this.getModel().id}" not found in LM Studio. Please ensure the model is loaded in LM Studio.`, + ) + } + + // Check for context length errors + if ( + errorMessage.includes("context") || + errorMessage.includes("token") || + errorMessage.includes("length") + ) { + throw new Error( + `Context length exceeded for model "${this.getModel().id}". Please load the model with a larger context window in LM Studio, or use a different model that supports longer contexts.`, + ) + } + + // Use the enhanced error handler for other OpenAI-like errors throw handleOpenAIError(error, this.providerName) } return response.choices[0]?.message.content || "" } catch (error) { + // If error was already processed and re-thrown above, just re-throw it + if (error instanceof Error && error.message.includes("LM Studio")) { + throw error + } + + // Generic fallback error throw new Error( - "Please check the LM Studio developer logs to debug what went wrong. You may need to load the model with a larger context length to work with Roo Code's prompts.", + `LM Studio error: ${error instanceof Error ? error.message : String(error)}. Please check the LM Studio developer logs for more details.`, ) } }