Skip to content

Commit 373fbc9

Browse files
committed
Revert "feat(models): add per-model timeout disable to avoid global override for long-running models (e.g., gpt-5-pro)"
This reverts commit ed2a17a.
1 parent dcc8791 commit 373fbc9

File tree

7 files changed

+3
-96
lines changed

7 files changed

+3
-96
lines changed

packages/types/src/model.ts

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,8 +63,6 @@ export const modelInfoSchema = z.object({
6363
supportsReasoningBudget: z.boolean().optional(),
6464
// Capability flag to indicate whether the model supports temperature parameter
6565
supportsTemperature: z.boolean().optional(),
66-
// When true, force-disable request timeouts for this model (providers will set timeout=0)
67-
disableTimeout: z.boolean().optional(),
6866
requiredReasoningBudget: z.boolean().optional(),
6967
supportsReasoningEffort: z.boolean().optional(),
7068
supportedParameters: z.array(modelParametersSchema).optional(),

src/api/providers/__tests__/lm-studio-timeout.spec.ts

Lines changed: 0 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -88,34 +88,4 @@ describe("LmStudioHandler timeout configuration", () => {
8888
}),
8989
)
9090
})
91-
92-
it("should force zero timeout when model info disables timeout", () => {
93-
;(getApiRequestTimeout as any).mockReturnValue(600000)
94-
95-
const spy = vitest.spyOn(LmStudioHandler.prototype as any, "getModel").mockReturnValue({
96-
id: "llama2",
97-
info: {
98-
maxTokens: -1,
99-
contextWindow: 128000,
100-
supportsPromptCache: false,
101-
supportsImages: true,
102-
disableTimeout: true,
103-
},
104-
})
105-
106-
const options: ApiHandlerOptions = {
107-
apiModelId: "llama2",
108-
lmStudioModelId: "llama2",
109-
}
110-
111-
new LmStudioHandler(options)
112-
113-
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
114-
expect.objectContaining({
115-
timeout: 0,
116-
}),
117-
)
118-
119-
spy.mockRestore()
120-
})
12191
})

src/api/providers/__tests__/ollama-timeout.spec.ts

Lines changed: 0 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -89,37 +89,6 @@ describe("OllamaHandler timeout configuration", () => {
8989
)
9090
})
9191

92-
it("should force zero timeout when model info disables timeout", () => {
93-
;(getApiRequestTimeout as any).mockReturnValue(600000)
94-
95-
const spy = vitest.spyOn(OllamaHandler.prototype as any, "getModel").mockReturnValue({
96-
id: "llama2",
97-
info: {
98-
maxTokens: -1,
99-
contextWindow: 128000,
100-
supportsPromptCache: false,
101-
supportsImages: true,
102-
disableTimeout: true,
103-
},
104-
})
105-
106-
const options: ApiHandlerOptions = {
107-
apiModelId: "llama2",
108-
ollamaModelId: "llama2",
109-
ollamaBaseUrl: "http://localhost:11434",
110-
}
111-
112-
new OllamaHandler(options)
113-
114-
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
115-
expect.objectContaining({
116-
timeout: 0,
117-
}),
118-
)
119-
120-
spy.mockRestore()
121-
})
122-
12392
it("should use default base URL when not provided", () => {
12493
;(getApiRequestTimeout as any).mockReturnValue(600000)
12594

src/api/providers/__tests__/openai-timeout.spec.ts

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -141,28 +141,4 @@ describe("OpenAiHandler timeout configuration", () => {
141141
}),
142142
)
143143
})
144-
145-
it("should force zero timeout when model info disables timeout", () => {
146-
;(getApiRequestTimeout as any).mockReturnValue(600000)
147-
148-
const options: ApiHandlerOptions = {
149-
apiModelId: "gpt-4",
150-
openAiModelId: "gpt-4",
151-
openAiCustomModelInfo: {
152-
maxTokens: -1,
153-
contextWindow: 128000,
154-
supportsPromptCache: false,
155-
supportsImages: true,
156-
disableTimeout: true,
157-
} as any,
158-
}
159-
160-
new OpenAiHandler(options)
161-
162-
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
163-
expect.objectContaining({
164-
timeout: 0, // Forced no timeout via model info
165-
}),
166-
)
167-
})
168144
})

src/api/providers/lm-studio.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
3232
this.client = new OpenAI({
3333
baseURL: (this.options.lmStudioBaseUrl || "http://localhost:1234") + "/v1",
3434
apiKey: apiKey,
35-
timeout: this.getModel().info?.disableTimeout === true ? 0 : getApiRequestTimeout(),
35+
timeout: getApiRequestTimeout(),
3636
})
3737
}
3838

src/api/providers/ollama.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ export class OllamaHandler extends BaseProvider implements SingleCompletionHandl
3939
this.client = new OpenAI({
4040
baseURL: (this.options.ollamaBaseUrl || "http://localhost:11434") + "/v1",
4141
apiKey: apiKey,
42-
timeout: this.getModel().info?.disableTimeout === true ? 0 : getApiRequestTimeout(),
42+
timeout: getApiRequestTimeout(),
4343
defaultHeaders: headers,
4444
})
4545
}

src/api/providers/openai.ts

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -49,13 +49,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
4949
...(this.options.openAiHeaders || {}),
5050
}
5151

52-
let timeout = getApiRequestTimeout()
53-
try {
54-
const modelInfo = this.getModel().info
55-
if (modelInfo?.disableTimeout === true) {
56-
timeout = 0
57-
}
58-
} catch {}
52+
const timeout = getApiRequestTimeout()
5953

6054
if (isAzureAiInference) {
6155
// Azure AI Inference Service (e.g., for DeepSeek) uses a different path structure

0 commit comments

Comments
 (0)