Skip to content

Commit c4ddbf6

Browse files
committed
Propagate AbortSignal to all provider fetchers and use native timeout
- Remove inline withTimeout helper in favor of AbortSignal.timeout() - Add optional AbortSignal parameter to all provider model fetchers: - openrouter, requesty, glama, unbound, litellm, ollama, lmstudio - deepinfra, io-intelligence, vercel-ai-gateway, huggingface, roo - Standardize timeout handling across modelCache and modelEndpointCache - Add useRouterModelsAll hook for settings UI to fetch all providers - Update Unbound and ApiOptions to use requestRouterModelsAll This ensures consistent cancellation behavior and prepares for better request lifecycle management across the codebase.
1 parent 632bbe7 commit c4ddbf6

File tree

18 files changed

+365
-340
lines changed

18 files changed

+365
-340
lines changed

src/api/providers/fetchers/deepinfra.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,14 +35,15 @@ const DeepInfraModelsResponseSchema = z.object({ data: z.array(DeepInfraModelSch
3535
export async function getDeepInfraModels(
3636
apiKey?: string,
3737
baseUrl: string = "https://api.deepinfra.com/v1/openai",
38+
signal?: AbortSignal,
3839
): Promise<Record<string, ModelInfo>> {
3940
const headers: Record<string, string> = { ...DEFAULT_HEADERS }
4041
if (apiKey) headers["Authorization"] = `Bearer ${apiKey}`
4142

4243
const url = `${baseUrl.replace(/\/$/, "")}/models`
4344
const models: Record<string, ModelInfo> = {}
4445

45-
const response = await axios.get(url, { headers })
46+
const response = await axios.get(url, { headers, signal })
4647
const parsed = DeepInfraModelsResponseSchema.safeParse(response.data)
4748
const data = parsed.success ? parsed.data.data : response.data?.data || []
4849

src/api/providers/fetchers/glama.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,11 @@ import type { ModelInfo } from "@roo-code/types"
44

55
import { parseApiPrice } from "../../../shared/cost"
66

7-
export async function getGlamaModels(): Promise<Record<string, ModelInfo>> {
7+
export async function getGlamaModels(signal?: AbortSignal): Promise<Record<string, ModelInfo>> {
88
const models: Record<string, ModelInfo> = {}
99

1010
try {
11-
const response = await axios.get("https://glama.ai/api/gateway/v1/models")
11+
const response = await axios.get("https://glama.ai/api/gateway/v1/models", { signal })
1212
const rawModels = response.data
1313

1414
for (const rawModel of rawModels) {

src/api/providers/fetchers/huggingface.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ function parseHuggingFaceModel(model: HuggingFaceModel, provider?: HuggingFacePr
107107
* @returns A promise that resolves to a record of model IDs to model info
108108
* @throws Will throw an error if the request fails
109109
*/
110-
export async function getHuggingFaceModels(): Promise<ModelRecord> {
110+
export async function getHuggingFaceModels(signal?: AbortSignal): Promise<ModelRecord> {
111111
const now = Date.now()
112112

113113
if (cache && now - cache.timestamp < HUGGINGFACE_CACHE_DURATION) {
@@ -128,7 +128,7 @@ export async function getHuggingFaceModels(): Promise<ModelRecord> {
128128
Pragma: "no-cache",
129129
"Cache-Control": "no-cache",
130130
},
131-
timeout: 10000,
131+
signal,
132132
})
133133

134134
const result = huggingFaceApiResponseSchema.safeParse(response.data)
@@ -236,7 +236,7 @@ export async function getHuggingFaceModelsWithMetadata(): Promise<HuggingFaceMod
236236
Pragma: "no-cache",
237237
"Cache-Control": "no-cache",
238238
},
239-
timeout: 10000,
239+
signal: AbortSignal.timeout(30000),
240240
})
241241

242242
const models = response.data?.data || []

src/api/providers/fetchers/io-intelligence.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ function parseIOIntelligenceModel(model: IOIntelligenceModel): ModelInfo {
8383
* Fetches available models from IO Intelligence
8484
* <mcreference link="https://docs.io.net/reference/get-started-with-io-intelligence-api" index="1">1</mcreference>
8585
*/
86-
export async function getIOIntelligenceModels(apiKey?: string): Promise<ModelRecord> {
86+
export async function getIOIntelligenceModels(apiKey?: string, signal?: AbortSignal): Promise<ModelRecord> {
8787
const now = Date.now()
8888

8989
if (cache && now - cache.timestamp < IO_INTELLIGENCE_CACHE_DURATION) {
@@ -108,7 +108,7 @@ export async function getIOIntelligenceModels(apiKey?: string): Promise<ModelRec
108108
"https://api.intelligence.io.solutions/api/v1/models",
109109
{
110110
headers,
111-
timeout: 10_000,
111+
signal,
112112
},
113113
)
114114

src/api/providers/fetchers/litellm.ts

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ import { DEFAULT_HEADERS } from "../constants"
1111
* @returns A promise that resolves to a record of model IDs to model info
1212
* @throws Will throw an error if the request fails or the response is not as expected.
1313
*/
14-
export async function getLiteLLMModels(apiKey: string, baseUrl: string): Promise<ModelRecord> {
14+
export async function getLiteLLMModels(apiKey: string, baseUrl: string, signal?: AbortSignal): Promise<ModelRecord> {
1515
try {
1616
const headers: Record<string, string> = {
1717
"Content-Type": "application/json",
@@ -27,8 +27,7 @@ export async function getLiteLLMModels(apiKey: string, baseUrl: string): Promise
2727
// Normalize the pathname by removing trailing slashes and multiple slashes
2828
urlObj.pathname = urlObj.pathname.replace(/\/+$/, "").replace(/\/+/g, "/") + "/v1/model/info"
2929
const url = urlObj.href
30-
// Added timeout to prevent indefinite hanging
31-
const response = await axios.get(url, { headers, timeout: 5000 })
30+
const response = await axios.get(url, { headers, signal })
3231
const models: ModelRecord = {}
3332

3433
// Process the model info from the response

src/api/providers/fetchers/lmstudio.ts

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,10 @@ export const parseLMStudioModel = (rawModel: LLMInstanceInfo | LLMInfo): ModelIn
4949
return modelInfo
5050
}
5151

52-
export async function getLMStudioModels(baseUrl = "http://localhost:1234"): Promise<Record<string, ModelInfo>> {
52+
export async function getLMStudioModels(
53+
baseUrl = "http://localhost:1234",
54+
signal?: AbortSignal,
55+
): Promise<Record<string, ModelInfo>> {
5356
// clear the set of models that have full details loaded
5457
modelsWithLoadedDetails.clear()
5558
// clearing the input can leave an empty string; use the default in that case
@@ -66,7 +69,7 @@ export async function getLMStudioModels(baseUrl = "http://localhost:1234"): Prom
6669

6770
// test the connection to LM Studio first
6871
// errors will be caught further down
69-
await axios.get(`${baseUrl}/v1/models`)
72+
await axios.get(`${baseUrl}/v1/models`, { signal })
7073

7174
const client = new LMStudioClient({ baseUrl: lmsUrl })
7275

0 commit comments

Comments
 (0)