Skip to content

Commit fddfde9

Browse files
committed
refactor: centralize API timeout configuration and add validation tests
- Create centralized getApiRequestTimeout() utility function - Add comprehensive validation for timeout values (non-negative, handle NaN/null/undefined) - Add extensive test coverage for all scenarios in a new test file - Remove code duplication across providers (lm-studio.ts, ollama.ts, openai.ts) - Maintain backward compatibility with existing behavior
1 parent b62015d commit fddfde9

25 files changed

+190
-86
lines changed

src/api/providers/__tests__/lm-studio-timeout.spec.ts

Lines changed: 9 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -2,17 +2,14 @@
22

33
import { LmStudioHandler } from "../lm-studio"
44
import { ApiHandlerOptions } from "../../../shared/api"
5-
import * as vscode from "vscode"
65

7-
// Mock vscode
8-
vitest.mock("vscode", () => ({
9-
workspace: {
10-
getConfiguration: vitest.fn().mockReturnValue({
11-
get: vitest.fn(),
12-
}),
13-
},
6+
// Mock the timeout config utility
7+
vitest.mock("../utils/timeout-config", () => ({
8+
getApiRequestTimeout: vitest.fn(),
149
}))
1510

11+
import { getApiRequestTimeout } from "../utils/timeout-config"
12+
1613
// Mock OpenAI
1714
const mockOpenAIConstructor = vitest.fn()
1815
vitest.mock("openai", () => {
@@ -32,18 +29,12 @@ vitest.mock("openai", () => {
3229
})
3330

3431
describe("LmStudioHandler timeout configuration", () => {
35-
let mockGetConfig: any
36-
3732
beforeEach(() => {
3833
vitest.clearAllMocks()
39-
mockGetConfig = vitest.fn()
40-
;(vscode.workspace.getConfiguration as any).mockReturnValue({
41-
get: mockGetConfig,
42-
})
4334
})
4435

4536
it("should use default timeout of 600 seconds when no configuration is set", () => {
46-
mockGetConfig.mockReturnValue(600)
37+
;(getApiRequestTimeout as any).mockReturnValue(600000)
4738

4839
const options: ApiHandlerOptions = {
4940
apiModelId: "llama2",
@@ -53,8 +44,7 @@ describe("LmStudioHandler timeout configuration", () => {
5344

5445
new LmStudioHandler(options)
5546

56-
expect(vscode.workspace.getConfiguration).toHaveBeenCalledWith("roo-cline")
57-
expect(mockGetConfig).toHaveBeenCalledWith("apiRequestTimeout", 600)
47+
expect(getApiRequestTimeout).toHaveBeenCalled()
5848
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
5949
expect.objectContaining({
6050
baseURL: "http://localhost:1234/v1",
@@ -65,7 +55,7 @@ describe("LmStudioHandler timeout configuration", () => {
6555
})
6656

6757
it("should use custom timeout when configuration is set", () => {
68-
mockGetConfig.mockReturnValue(1200) // 20 minutes
58+
;(getApiRequestTimeout as any).mockReturnValue(1200000) // 20 minutes
6959

7060
const options: ApiHandlerOptions = {
7161
apiModelId: "llama2",
@@ -83,7 +73,7 @@ describe("LmStudioHandler timeout configuration", () => {
8373
})
8474

8575
it("should handle zero timeout (no timeout)", () => {
86-
mockGetConfig.mockReturnValue(0)
76+
;(getApiRequestTimeout as any).mockReturnValue(0)
8777

8878
const options: ApiHandlerOptions = {
8979
apiModelId: "llama2",

src/api/providers/__tests__/ollama-timeout.spec.ts

Lines changed: 10 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -2,17 +2,14 @@
22

33
import { OllamaHandler } from "../ollama"
44
import { ApiHandlerOptions } from "../../../shared/api"
5-
import * as vscode from "vscode"
65

7-
// Mock vscode
8-
vitest.mock("vscode", () => ({
9-
workspace: {
10-
getConfiguration: vitest.fn().mockReturnValue({
11-
get: vitest.fn(),
12-
}),
13-
},
6+
// Mock the timeout config utility
7+
vitest.mock("../utils/timeout-config", () => ({
8+
getApiRequestTimeout: vitest.fn(),
149
}))
1510

11+
import { getApiRequestTimeout } from "../utils/timeout-config"
12+
1613
// Mock OpenAI
1714
const mockOpenAIConstructor = vitest.fn()
1815
vitest.mock("openai", () => {
@@ -32,18 +29,12 @@ vitest.mock("openai", () => {
3229
})
3330

3431
describe("OllamaHandler timeout configuration", () => {
35-
let mockGetConfig: any
36-
3732
beforeEach(() => {
3833
vitest.clearAllMocks()
39-
mockGetConfig = vitest.fn()
40-
;(vscode.workspace.getConfiguration as any).mockReturnValue({
41-
get: mockGetConfig,
42-
})
4334
})
4435

4536
it("should use default timeout of 600 seconds when no configuration is set", () => {
46-
mockGetConfig.mockReturnValue(600)
37+
;(getApiRequestTimeout as any).mockReturnValue(600000)
4738

4839
const options: ApiHandlerOptions = {
4940
apiModelId: "llama2",
@@ -53,8 +44,7 @@ describe("OllamaHandler timeout configuration", () => {
5344

5445
new OllamaHandler(options)
5546

56-
expect(vscode.workspace.getConfiguration).toHaveBeenCalledWith("roo-cline")
57-
expect(mockGetConfig).toHaveBeenCalledWith("apiRequestTimeout", 600)
47+
expect(getApiRequestTimeout).toHaveBeenCalled()
5848
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
5949
expect.objectContaining({
6050
baseURL: "http://localhost:11434/v1",
@@ -65,7 +55,7 @@ describe("OllamaHandler timeout configuration", () => {
6555
})
6656

6757
it("should use custom timeout when configuration is set", () => {
68-
mockGetConfig.mockReturnValue(3600) // 1 hour
58+
;(getApiRequestTimeout as any).mockReturnValue(3600000) // 1 hour
6959

7060
const options: ApiHandlerOptions = {
7161
apiModelId: "llama2",
@@ -82,7 +72,7 @@ describe("OllamaHandler timeout configuration", () => {
8272
})
8373

8474
it("should handle zero timeout (no timeout)", () => {
85-
mockGetConfig.mockReturnValue(0)
75+
;(getApiRequestTimeout as any).mockReturnValue(0)
8676

8777
const options: ApiHandlerOptions = {
8878
apiModelId: "llama2",
@@ -100,7 +90,7 @@ describe("OllamaHandler timeout configuration", () => {
10090
})
10191

10292
it("should use default base URL when not provided", () => {
103-
mockGetConfig.mockReturnValue(600)
93+
;(getApiRequestTimeout as any).mockReturnValue(600000)
10494

10595
const options: ApiHandlerOptions = {
10696
apiModelId: "llama2",

src/api/providers/__tests__/openai-timeout.spec.ts

Lines changed: 11 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -2,17 +2,14 @@
22

33
import { OpenAiHandler } from "../openai"
44
import { ApiHandlerOptions } from "../../../shared/api"
5-
import * as vscode from "vscode"
65

7-
// Mock vscode
8-
vitest.mock("vscode", () => ({
9-
workspace: {
10-
getConfiguration: vitest.fn().mockReturnValue({
11-
get: vitest.fn(),
12-
}),
13-
},
6+
// Mock the timeout config utility
7+
vitest.mock("../utils/timeout-config", () => ({
8+
getApiRequestTimeout: vitest.fn(),
149
}))
1510

11+
import { getApiRequestTimeout } from "../utils/timeout-config"
12+
1613
// Mock OpenAI and AzureOpenAI
1714
const mockOpenAIConstructor = vitest.fn()
1815
const mockAzureOpenAIConstructor = vitest.fn()
@@ -44,18 +41,12 @@ vitest.mock("openai", () => {
4441
})
4542

4643
describe("OpenAiHandler timeout configuration", () => {
47-
let mockGetConfig: any
48-
4944
beforeEach(() => {
5045
vitest.clearAllMocks()
51-
mockGetConfig = vitest.fn()
52-
;(vscode.workspace.getConfiguration as any).mockReturnValue({
53-
get: mockGetConfig,
54-
})
5546
})
5647

5748
it("should use default timeout for standard OpenAI", () => {
58-
mockGetConfig.mockReturnValue(600)
49+
;(getApiRequestTimeout as any).mockReturnValue(600000)
5950

6051
const options: ApiHandlerOptions = {
6152
apiModelId: "gpt-4",
@@ -65,8 +56,7 @@ describe("OpenAiHandler timeout configuration", () => {
6556

6657
new OpenAiHandler(options)
6758

68-
expect(vscode.workspace.getConfiguration).toHaveBeenCalledWith("roo-cline")
69-
expect(mockGetConfig).toHaveBeenCalledWith("apiRequestTimeout", 600)
59+
expect(getApiRequestTimeout).toHaveBeenCalled()
7060
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
7161
expect.objectContaining({
7262
baseURL: "https://api.openai.com/v1",
@@ -77,7 +67,7 @@ describe("OpenAiHandler timeout configuration", () => {
7767
})
7868

7969
it("should use custom timeout for OpenAI-compatible providers", () => {
80-
mockGetConfig.mockReturnValue(1800) // 30 minutes
70+
;(getApiRequestTimeout as any).mockReturnValue(1800000) // 30 minutes
8171

8272
const options: ApiHandlerOptions = {
8373
apiModelId: "custom-model",
@@ -97,7 +87,7 @@ describe("OpenAiHandler timeout configuration", () => {
9787
})
9888

9989
it("should use timeout for Azure OpenAI", () => {
100-
mockGetConfig.mockReturnValue(900) // 15 minutes
90+
;(getApiRequestTimeout as any).mockReturnValue(900000) // 15 minutes
10191

10292
const options: ApiHandlerOptions = {
10393
apiModelId: "gpt-4",
@@ -117,7 +107,7 @@ describe("OpenAiHandler timeout configuration", () => {
117107
})
118108

119109
it("should use timeout for Azure AI Inference", () => {
120-
mockGetConfig.mockReturnValue(1200) // 20 minutes
110+
;(getApiRequestTimeout as any).mockReturnValue(1200000) // 20 minutes
121111

122112
const options: ApiHandlerOptions = {
123113
apiModelId: "deepseek",
@@ -136,7 +126,7 @@ describe("OpenAiHandler timeout configuration", () => {
136126
})
137127

138128
it("should handle zero timeout (no timeout)", () => {
139-
mockGetConfig.mockReturnValue(0)
129+
;(getApiRequestTimeout as any).mockReturnValue(0)
140130

141131
const options: ApiHandlerOptions = {
142132
apiModelId: "gpt-4",

src/api/providers/lm-studio.ts

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
22
import OpenAI from "openai"
33
import axios from "axios"
4-
import * as vscode from "vscode"
54

65
import { type ModelInfo, openAiModelInfoSaneDefaults, LMSTUDIO_DEFAULT_TEMPERATURE } from "@roo-code/types"
76

@@ -15,6 +14,7 @@ import { ApiStream } from "../transform/stream"
1514
import { BaseProvider } from "./base-provider"
1615
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1716
import { getModels, getModelsFromCache } from "./fetchers/modelCache"
17+
import { getApiRequestTimeout } from "./utils/timeout-config"
1818

1919
export class LmStudioHandler extends BaseProvider implements SingleCompletionHandler {
2020
protected options: ApiHandlerOptions
@@ -23,12 +23,11 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
2323
constructor(options: ApiHandlerOptions) {
2424
super()
2525
this.options = options
26-
const timeoutSeconds = vscode.workspace.getConfiguration("roo-cline").get<number>("apiRequestTimeout", 600)
2726

2827
this.client = new OpenAI({
2928
baseURL: (this.options.lmStudioBaseUrl || "http://localhost:1234") + "/v1",
3029
apiKey: "noop",
31-
timeout: timeoutSeconds * 1000, // Convert to milliseconds
30+
timeout: getApiRequestTimeout(),
3231
})
3332
}
3433

src/api/providers/ollama.ts

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
22
import OpenAI from "openai"
3-
import * as vscode from "vscode"
43

54
import { type ModelInfo, openAiModelInfoSaneDefaults, DEEP_SEEK_DEFAULT_TEMPERATURE } from "@roo-code/types"
65

@@ -14,6 +13,7 @@ import { ApiStream } from "../transform/stream"
1413

1514
import { BaseProvider } from "./base-provider"
1615
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
16+
import { getApiRequestTimeout } from "./utils/timeout-config"
1717

1818
type CompletionUsage = OpenAI.Chat.Completions.ChatCompletionChunk["usage"]
1919

@@ -24,12 +24,11 @@ export class OllamaHandler extends BaseProvider implements SingleCompletionHandl
2424
constructor(options: ApiHandlerOptions) {
2525
super()
2626
this.options = options
27-
const timeoutSeconds = vscode.workspace.getConfiguration("roo-cline").get<number>("apiRequestTimeout", 600)
2827

2928
this.client = new OpenAI({
3029
baseURL: (this.options.ollamaBaseUrl || "http://localhost:11434") + "/v1",
3130
apiKey: "ollama",
32-
timeout: timeoutSeconds * 1000, // Convert to milliseconds
31+
timeout: getApiRequestTimeout(),
3332
})
3433
}
3534

src/api/providers/openai.ts

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
22
import OpenAI, { AzureOpenAI } from "openai"
33
import axios from "axios"
4-
import * as vscode from "vscode"
54

65
import {
76
type ModelInfo,
@@ -24,6 +23,7 @@ import { getModelParams } from "../transform/model-params"
2423
import { DEFAULT_HEADERS } from "./constants"
2524
import { BaseProvider } from "./base-provider"
2625
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
26+
import { getApiRequestTimeout } from "./utils/timeout-config"
2727

2828
// TODO: Rename this to OpenAICompatibleHandler. Also, I think the
2929
// `OpenAINativeHandler` can subclass from this, since it's obviously
@@ -47,8 +47,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
4747
...(this.options.openAiHeaders || {}),
4848
}
4949

50-
const timeoutSeconds = vscode.workspace.getConfiguration("roo-cline").get<number>("apiRequestTimeout", 600)
51-
const timeout = timeoutSeconds * 1000 // Convert to milliseconds
50+
const timeout = getApiRequestTimeout()
5251

5352
if (isAzureAiInference) {
5453
// Azure AI Inference Service (e.g., for DeepSeek) uses a different path structure

0 commit comments

Comments
 (0)