Skip to content

Commit a694dc5

Browse files
committed
feat: add configurable timeout for LM Studio provider
- Add lmStudioTimeoutSeconds setting to provider schema (30-3600 seconds) - Implement AbortController with configurable timeout in LM Studio handler - Default timeout set to 600 seconds (10 minutes) for local model scenarios - Add comprehensive timeout tests including AbortError handling - Provide user-friendly timeout error messages with troubleshooting tips Fixes #6521
1 parent 079fc22 commit a694dc5

File tree

3 files changed

+167
-10
lines changed

3 files changed

+167
-10
lines changed

packages/types/src/provider-settings.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -167,6 +167,7 @@ const lmStudioSchema = baseProviderSettingsSchema.extend({
167167
lmStudioBaseUrl: z.string().optional(),
168168
lmStudioDraftModelId: z.string().optional(),
169169
lmStudioSpeculativeDecodingEnabled: z.boolean().optional(),
170+
lmStudioTimeoutSeconds: z.number().min(30).max(3600).optional(),
170171
})
171172

172173
const geminiSchema = apiModelIdProviderModelSchema.extend({

src/api/providers/__tests__/lmstudio.spec.ts

Lines changed: 104 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,13 @@ vi.mock("openai", () => {
77
chat: {
88
completions: {
99
create: mockCreate.mockImplementation(async (options) => {
10+
// Check if signal is aborted (for timeout tests)
11+
if (options.signal?.aborted) {
12+
const error = new Error("Request was aborted")
13+
error.name = "AbortError"
14+
throw error
15+
}
16+
1017
if (!options.stream) {
1118
return {
1219
id: "test-completion",
@@ -27,6 +34,13 @@ vi.mock("openai", () => {
2734

2835
return {
2936
[Symbol.asyncIterator]: async function* () {
37+
// Check if signal is aborted during streaming
38+
if (options.signal?.aborted) {
39+
const error = new Error("Request was aborted")
40+
error.name = "AbortError"
41+
throw error
42+
}
43+
3044
yield {
3145
choices: [
3246
{
@@ -131,12 +145,17 @@ describe("LmStudioHandler", () => {
131145
it("should complete prompt successfully", async () => {
132146
const result = await handler.completePrompt("Test prompt")
133147
expect(result).toBe("Test response")
134-
expect(mockCreate).toHaveBeenCalledWith({
135-
model: mockOptions.lmStudioModelId,
136-
messages: [{ role: "user", content: "Test prompt" }],
137-
temperature: 0,
138-
stream: false,
139-
})
148+
expect(mockCreate).toHaveBeenCalledWith(
149+
{
150+
model: mockOptions.lmStudioModelId,
151+
messages: [{ role: "user", content: "Test prompt" }],
152+
temperature: 0,
153+
stream: false,
154+
},
155+
expect.objectContaining({
156+
signal: expect.any(AbortSignal),
157+
}),
158+
)
140159
})
141160

142161
it("should handle API errors", async () => {
@@ -164,4 +183,83 @@ describe("LmStudioHandler", () => {
164183
expect(modelInfo.info.contextWindow).toBe(128_000)
165184
})
166185
})
186+
187+
describe("timeout functionality", () => {
188+
it("should use default timeout of 600 seconds when not configured", () => {
189+
const handlerWithoutTimeout = new LmStudioHandler({
190+
apiModelId: "local-model",
191+
lmStudioModelId: "local-model",
192+
lmStudioBaseUrl: "http://localhost:1234",
193+
})
194+
195+
// Verify that the handler was created successfully
196+
expect(handlerWithoutTimeout).toBeInstanceOf(LmStudioHandler)
197+
})
198+
199+
it("should use custom timeout when configured", () => {
200+
const customTimeoutHandler = new LmStudioHandler({
201+
apiModelId: "local-model",
202+
lmStudioModelId: "local-model",
203+
lmStudioBaseUrl: "http://localhost:1234",
204+
lmStudioTimeoutSeconds: 120, // 2 minutes
205+
})
206+
207+
// Verify that the handler was created successfully with custom timeout
208+
expect(customTimeoutHandler).toBeInstanceOf(LmStudioHandler)
209+
})
210+
211+
it("should handle AbortError and convert to timeout message", async () => {
212+
// Mock an AbortError
213+
const abortError = new Error("Request was aborted")
214+
abortError.name = "AbortError"
215+
mockCreate.mockRejectedValueOnce(abortError)
216+
217+
await expect(handler.completePrompt("Test prompt")).rejects.toThrow(
218+
"LM Studio request timed out after 600 seconds",
219+
)
220+
})
221+
222+
it("should pass AbortSignal to OpenAI client", async () => {
223+
const result = await handler.completePrompt("Test prompt")
224+
225+
expect(mockCreate).toHaveBeenCalledWith(
226+
expect.objectContaining({
227+
model: "local-model",
228+
messages: [{ role: "user", content: "Test prompt" }],
229+
temperature: 0,
230+
stream: false,
231+
}),
232+
expect.objectContaining({
233+
signal: expect.any(AbortSignal),
234+
}),
235+
)
236+
237+
expect(result).toBe("Test response")
238+
})
239+
240+
it("should pass AbortSignal to streaming requests", async () => {
241+
const systemPrompt = "You are a helpful assistant."
242+
const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hello!" }]
243+
244+
const stream = handler.createMessage(systemPrompt, messages)
245+
const chunks: any[] = []
246+
for await (const chunk of stream) {
247+
chunks.push(chunk)
248+
}
249+
250+
expect(mockCreate).toHaveBeenCalledWith(
251+
expect.objectContaining({
252+
model: "local-model",
253+
messages: expect.any(Array),
254+
temperature: 0,
255+
stream: true,
256+
}),
257+
expect.objectContaining({
258+
signal: expect.any(AbortSignal),
259+
}),
260+
)
261+
262+
expect(chunks.length).toBeGreaterThan(0)
263+
})
264+
})
167265
})

src/api/providers/lm-studio.ts

Lines changed: 62 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,9 @@ import { BaseProvider } from "./base-provider"
1515
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1616
import { getModels, getModelsFromCache } from "./fetchers/modelCache"
1717

18+
// Default timeout for LM Studio requests (10 minutes)
19+
const LMSTUDIO_DEFAULT_TIMEOUT_SECONDS = 600
20+
1821
export class LmStudioHandler extends BaseProvider implements SingleCompletionHandler {
1922
protected options: ApiHandlerOptions
2023
private client: OpenAI
@@ -73,7 +76,19 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
7376

7477
let assistantText = ""
7578

79+
// Create AbortController with configurable timeout
80+
const controller = new AbortController()
81+
let timeoutId: NodeJS.Timeout | undefined
82+
83+
// Get timeout from settings or use default (10 minutes)
84+
const timeoutSeconds = this.options.lmStudioTimeoutSeconds ?? LMSTUDIO_DEFAULT_TIMEOUT_SECONDS
85+
const timeoutMs = timeoutSeconds * 1000
86+
7687
try {
88+
timeoutId = setTimeout(() => {
89+
controller.abort()
90+
}, timeoutMs)
91+
7792
const params: OpenAI.Chat.ChatCompletionCreateParamsStreaming & { draft_model?: string } = {
7893
model: this.getModel().id,
7994
messages: openAiMessages,
@@ -85,7 +100,9 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
85100
params.draft_model = this.options.lmStudioDraftModelId
86101
}
87102

88-
const results = await this.client.chat.completions.create(params)
103+
const results = await this.client.chat.completions.create(params, {
104+
signal: controller.signal,
105+
})
89106

90107
const matcher = new XmlMatcher(
91108
"think",
@@ -124,7 +141,20 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
124141
inputTokens,
125142
outputTokens,
126143
} as const
127-
} catch (error) {
144+
145+
// Clear timeout after successful completion
146+
clearTimeout(timeoutId)
147+
} catch (error: unknown) {
148+
// Clear timeout on error
149+
clearTimeout(timeoutId)
150+
151+
// Check if this is an abort error (timeout)
152+
if (error instanceof Error && error.name === "AbortError") {
153+
throw new Error(
154+
`LM Studio request timed out after ${timeoutSeconds} seconds. This can happen with large models that need more processing time. Try increasing the timeout in LM Studio settings or use a smaller model.`,
155+
)
156+
}
157+
128158
throw new Error(
129159
"Please check the LM Studio developer logs to debug what went wrong. You may need to load the model with a larger context length to work with Roo Code's prompts.",
130160
)
@@ -147,7 +177,19 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
147177
}
148178

149179
async completePrompt(prompt: string): Promise<string> {
180+
// Create AbortController with configurable timeout
181+
const controller = new AbortController()
182+
let timeoutId: NodeJS.Timeout | undefined
183+
184+
// Get timeout from settings or use default (10 minutes)
185+
const timeoutSeconds = this.options.lmStudioTimeoutSeconds ?? LMSTUDIO_DEFAULT_TIMEOUT_SECONDS
186+
const timeoutMs = timeoutSeconds * 1000
187+
150188
try {
189+
timeoutId = setTimeout(() => {
190+
controller.abort()
191+
}, timeoutMs)
192+
151193
// Create params object with optional draft model
152194
const params: any = {
153195
model: this.getModel().id,
@@ -161,9 +203,25 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
161203
params.draft_model = this.options.lmStudioDraftModelId
162204
}
163205

164-
const response = await this.client.chat.completions.create(params)
206+
const response = await this.client.chat.completions.create(params, {
207+
signal: controller.signal,
208+
})
209+
210+
// Clear timeout after successful completion
211+
clearTimeout(timeoutId)
212+
165213
return response.choices[0]?.message.content || ""
166-
} catch (error) {
214+
} catch (error: unknown) {
215+
// Clear timeout on error
216+
clearTimeout(timeoutId)
217+
218+
// Check if this is an abort error (timeout)
219+
if (error instanceof Error && error.name === "AbortError") {
220+
throw new Error(
221+
`LM Studio request timed out after ${timeoutSeconds} seconds. This can happen with large models that need more processing time. Try increasing the timeout in LM Studio settings or use a smaller model.`,
222+
)
223+
}
224+
167225
throw new Error(
168226
"Please check the LM Studio developer logs to debug what went wrong. You may need to load the model with a larger context length to work with Roo Code's prompts.",
169227
)

0 commit comments

Comments
 (0)