Skip to content

Commit f9e85a5

Browse files
roomote[bot]ellipsis-dev[bot]roomote-agentdaniel-lxs
authored
feat: add configurable API request timeout for local providers (#6531)
Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com> Co-authored-by: Roo Code <[email protected]> Co-authored-by: daniel-lxs <[email protected]> Co-authored-by: Daniel <[email protected]>
1 parent cee7c98 commit f9e85a5

28 files changed

+519
-18
lines changed
Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
// npx vitest run api/providers/__tests__/lm-studio-timeout.spec.ts
2+
3+
import { LmStudioHandler } from "../lm-studio"
4+
import { ApiHandlerOptions } from "../../../shared/api"
5+
6+
// Mock the timeout config utility
7+
vitest.mock("../utils/timeout-config", () => ({
8+
getApiRequestTimeout: vitest.fn(),
9+
}))
10+
11+
import { getApiRequestTimeout } from "../utils/timeout-config"
12+
13+
// Mock OpenAI
14+
const mockOpenAIConstructor = vitest.fn()
15+
vitest.mock("openai", () => {
16+
return {
17+
__esModule: true,
18+
default: vitest.fn().mockImplementation((config) => {
19+
mockOpenAIConstructor(config)
20+
return {
21+
chat: {
22+
completions: {
23+
create: vitest.fn(),
24+
},
25+
},
26+
}
27+
}),
28+
}
29+
})
30+
31+
describe("LmStudioHandler timeout configuration", () => {
32+
beforeEach(() => {
33+
vitest.clearAllMocks()
34+
})
35+
36+
it("should use default timeout of 600 seconds when no configuration is set", () => {
37+
;(getApiRequestTimeout as any).mockReturnValue(600000)
38+
39+
const options: ApiHandlerOptions = {
40+
apiModelId: "llama2",
41+
lmStudioModelId: "llama2",
42+
lmStudioBaseUrl: "http://localhost:1234",
43+
}
44+
45+
new LmStudioHandler(options)
46+
47+
expect(getApiRequestTimeout).toHaveBeenCalled()
48+
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
49+
expect.objectContaining({
50+
baseURL: "http://localhost:1234/v1",
51+
apiKey: "noop",
52+
timeout: 600000, // 600 seconds in milliseconds
53+
}),
54+
)
55+
})
56+
57+
it("should use custom timeout when configuration is set", () => {
58+
;(getApiRequestTimeout as any).mockReturnValue(1200000) // 20 minutes
59+
60+
const options: ApiHandlerOptions = {
61+
apiModelId: "llama2",
62+
lmStudioModelId: "llama2",
63+
lmStudioBaseUrl: "http://localhost:1234",
64+
}
65+
66+
new LmStudioHandler(options)
67+
68+
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
69+
expect.objectContaining({
70+
timeout: 1200000, // 1200 seconds in milliseconds
71+
}),
72+
)
73+
})
74+
75+
it("should handle zero timeout (no timeout)", () => {
76+
;(getApiRequestTimeout as any).mockReturnValue(0)
77+
78+
const options: ApiHandlerOptions = {
79+
apiModelId: "llama2",
80+
lmStudioModelId: "llama2",
81+
}
82+
83+
new LmStudioHandler(options)
84+
85+
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
86+
expect.objectContaining({
87+
timeout: 0, // No timeout
88+
}),
89+
)
90+
})
91+
})
Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
// npx vitest run api/providers/__tests__/ollama-timeout.spec.ts
2+
3+
import { OllamaHandler } from "../ollama"
4+
import { ApiHandlerOptions } from "../../../shared/api"
5+
6+
// Mock the timeout config utility
7+
vitest.mock("../utils/timeout-config", () => ({
8+
getApiRequestTimeout: vitest.fn(),
9+
}))
10+
11+
import { getApiRequestTimeout } from "../utils/timeout-config"
12+
13+
// Mock OpenAI
14+
const mockOpenAIConstructor = vitest.fn()
15+
vitest.mock("openai", () => {
16+
return {
17+
__esModule: true,
18+
default: vitest.fn().mockImplementation((config) => {
19+
mockOpenAIConstructor(config)
20+
return {
21+
chat: {
22+
completions: {
23+
create: vitest.fn(),
24+
},
25+
},
26+
}
27+
}),
28+
}
29+
})
30+
31+
describe("OllamaHandler timeout configuration", () => {
32+
beforeEach(() => {
33+
vitest.clearAllMocks()
34+
})
35+
36+
it("should use default timeout of 600 seconds when no configuration is set", () => {
37+
;(getApiRequestTimeout as any).mockReturnValue(600000)
38+
39+
const options: ApiHandlerOptions = {
40+
apiModelId: "llama2",
41+
ollamaModelId: "llama2",
42+
ollamaBaseUrl: "http://localhost:11434",
43+
}
44+
45+
new OllamaHandler(options)
46+
47+
expect(getApiRequestTimeout).toHaveBeenCalled()
48+
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
49+
expect.objectContaining({
50+
baseURL: "http://localhost:11434/v1",
51+
apiKey: "ollama",
52+
timeout: 600000, // 600 seconds in milliseconds
53+
}),
54+
)
55+
})
56+
57+
it("should use custom timeout when configuration is set", () => {
58+
;(getApiRequestTimeout as any).mockReturnValue(3600000) // 1 hour
59+
60+
const options: ApiHandlerOptions = {
61+
apiModelId: "llama2",
62+
ollamaModelId: "llama2",
63+
}
64+
65+
new OllamaHandler(options)
66+
67+
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
68+
expect.objectContaining({
69+
timeout: 3600000, // 3600 seconds in milliseconds
70+
}),
71+
)
72+
})
73+
74+
it("should handle zero timeout (no timeout)", () => {
75+
;(getApiRequestTimeout as any).mockReturnValue(0)
76+
77+
const options: ApiHandlerOptions = {
78+
apiModelId: "llama2",
79+
ollamaModelId: "llama2",
80+
ollamaBaseUrl: "http://localhost:11434",
81+
}
82+
83+
new OllamaHandler(options)
84+
85+
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
86+
expect.objectContaining({
87+
timeout: 0, // No timeout
88+
}),
89+
)
90+
})
91+
92+
it("should use default base URL when not provided", () => {
93+
;(getApiRequestTimeout as any).mockReturnValue(600000)
94+
95+
const options: ApiHandlerOptions = {
96+
apiModelId: "llama2",
97+
ollamaModelId: "llama2",
98+
}
99+
100+
new OllamaHandler(options)
101+
102+
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
103+
expect.objectContaining({
104+
baseURL: "http://localhost:11434/v1",
105+
}),
106+
)
107+
})
108+
})
Lines changed: 144 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
// npx vitest run api/providers/__tests__/openai-timeout.spec.ts
2+
3+
import { OpenAiHandler } from "../openai"
4+
import { ApiHandlerOptions } from "../../../shared/api"
5+
6+
// Mock the timeout config utility
7+
vitest.mock("../utils/timeout-config", () => ({
8+
getApiRequestTimeout: vitest.fn(),
9+
}))
10+
11+
import { getApiRequestTimeout } from "../utils/timeout-config"
12+
13+
// Mock OpenAI and AzureOpenAI
14+
const mockOpenAIConstructor = vitest.fn()
15+
const mockAzureOpenAIConstructor = vitest.fn()
16+
17+
vitest.mock("openai", () => {
18+
return {
19+
__esModule: true,
20+
default: vitest.fn().mockImplementation((config) => {
21+
mockOpenAIConstructor(config)
22+
return {
23+
chat: {
24+
completions: {
25+
create: vitest.fn(),
26+
},
27+
},
28+
}
29+
}),
30+
AzureOpenAI: vitest.fn().mockImplementation((config) => {
31+
mockAzureOpenAIConstructor(config)
32+
return {
33+
chat: {
34+
completions: {
35+
create: vitest.fn(),
36+
},
37+
},
38+
}
39+
}),
40+
}
41+
})
42+
43+
describe("OpenAiHandler timeout configuration", () => {
44+
beforeEach(() => {
45+
vitest.clearAllMocks()
46+
})
47+
48+
it("should use default timeout for standard OpenAI", () => {
49+
;(getApiRequestTimeout as any).mockReturnValue(600000)
50+
51+
const options: ApiHandlerOptions = {
52+
apiModelId: "gpt-4",
53+
openAiModelId: "gpt-4",
54+
openAiApiKey: "test-key",
55+
}
56+
57+
new OpenAiHandler(options)
58+
59+
expect(getApiRequestTimeout).toHaveBeenCalled()
60+
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
61+
expect.objectContaining({
62+
baseURL: "https://api.openai.com/v1",
63+
apiKey: "test-key",
64+
timeout: 600000, // 600 seconds in milliseconds
65+
}),
66+
)
67+
})
68+
69+
it("should use custom timeout for OpenAI-compatible providers", () => {
70+
;(getApiRequestTimeout as any).mockReturnValue(1800000) // 30 minutes
71+
72+
const options: ApiHandlerOptions = {
73+
apiModelId: "custom-model",
74+
openAiModelId: "custom-model",
75+
openAiBaseUrl: "http://localhost:8080/v1",
76+
openAiApiKey: "test-key",
77+
}
78+
79+
new OpenAiHandler(options)
80+
81+
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
82+
expect.objectContaining({
83+
baseURL: "http://localhost:8080/v1",
84+
timeout: 1800000, // 1800 seconds in milliseconds
85+
}),
86+
)
87+
})
88+
89+
it("should use timeout for Azure OpenAI", () => {
90+
;(getApiRequestTimeout as any).mockReturnValue(900000) // 15 minutes
91+
92+
const options: ApiHandlerOptions = {
93+
apiModelId: "gpt-4",
94+
openAiModelId: "gpt-4",
95+
openAiBaseUrl: "https://myinstance.openai.azure.com",
96+
openAiApiKey: "test-key",
97+
openAiUseAzure: true,
98+
}
99+
100+
new OpenAiHandler(options)
101+
102+
expect(mockAzureOpenAIConstructor).toHaveBeenCalledWith(
103+
expect.objectContaining({
104+
timeout: 900000, // 900 seconds in milliseconds
105+
}),
106+
)
107+
})
108+
109+
it("should use timeout for Azure AI Inference", () => {
110+
;(getApiRequestTimeout as any).mockReturnValue(1200000) // 20 minutes
111+
112+
const options: ApiHandlerOptions = {
113+
apiModelId: "deepseek",
114+
openAiModelId: "deepseek",
115+
openAiBaseUrl: "https://myinstance.services.ai.azure.com",
116+
openAiApiKey: "test-key",
117+
}
118+
119+
new OpenAiHandler(options)
120+
121+
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
122+
expect.objectContaining({
123+
timeout: 1200000, // 1200 seconds in milliseconds
124+
}),
125+
)
126+
})
127+
128+
it("should handle zero timeout (no timeout)", () => {
129+
;(getApiRequestTimeout as any).mockReturnValue(0)
130+
131+
const options: ApiHandlerOptions = {
132+
apiModelId: "gpt-4",
133+
openAiModelId: "gpt-4",
134+
}
135+
136+
new OpenAiHandler(options)
137+
138+
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
139+
expect.objectContaining({
140+
timeout: 0, // No timeout
141+
}),
142+
)
143+
})
144+
})

src/api/providers/__tests__/openai.spec.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,7 @@ describe("OpenAiHandler", () => {
115115
"X-Title": "Roo Code",
116116
"User-Agent": `RooCode/${Package.version}`,
117117
},
118+
timeout: expect.any(Number),
118119
})
119120
})
120121
})

src/api/providers/lm-studio.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ import { ApiStream } from "../transform/stream"
1414
import { BaseProvider } from "./base-provider"
1515
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1616
import { getModels, getModelsFromCache } from "./fetchers/modelCache"
17+
import { getApiRequestTimeout } from "./utils/timeout-config"
1718

1819
export class LmStudioHandler extends BaseProvider implements SingleCompletionHandler {
1920
protected options: ApiHandlerOptions
@@ -22,9 +23,11 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
2223
constructor(options: ApiHandlerOptions) {
2324
super()
2425
this.options = options
26+
2527
this.client = new OpenAI({
2628
baseURL: (this.options.lmStudioBaseUrl || "http://localhost:1234") + "/v1",
2729
apiKey: "noop",
30+
timeout: getApiRequestTimeout(),
2831
})
2932
}
3033

src/api/providers/ollama.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ import { ApiStream } from "../transform/stream"
1313

1414
import { BaseProvider } from "./base-provider"
1515
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
16+
import { getApiRequestTimeout } from "./utils/timeout-config"
1617

1718
type CompletionUsage = OpenAI.Chat.Completions.ChatCompletionChunk["usage"]
1819

@@ -23,9 +24,11 @@ export class OllamaHandler extends BaseProvider implements SingleCompletionHandl
2324
constructor(options: ApiHandlerOptions) {
2425
super()
2526
this.options = options
27+
2628
this.client = new OpenAI({
2729
baseURL: (this.options.ollamaBaseUrl || "http://localhost:11434") + "/v1",
2830
apiKey: "ollama",
31+
timeout: getApiRequestTimeout(),
2932
})
3033
}
3134

0 commit comments

Comments
 (0)