Skip to content
20 changes: 10 additions & 10 deletions src/api/providers/fetchers/__tests__/litellm.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ describe("getLiteLLMModels", () => {
"Content-Type": "application/json",
...DEFAULT_HEADERS,
},
timeout: 5000,
signal: undefined,
})
})

Expand All @@ -56,7 +56,7 @@ describe("getLiteLLMModels", () => {
"Content-Type": "application/json",
...DEFAULT_HEADERS,
},
timeout: 5000,
signal: undefined,
})
})

Expand All @@ -77,7 +77,7 @@ describe("getLiteLLMModels", () => {
"Content-Type": "application/json",
...DEFAULT_HEADERS,
},
timeout: 5000,
signal: undefined,
})
})

Expand All @@ -98,7 +98,7 @@ describe("getLiteLLMModels", () => {
"Content-Type": "application/json",
...DEFAULT_HEADERS,
},
timeout: 5000,
signal: undefined,
})
})

Expand All @@ -119,7 +119,7 @@ describe("getLiteLLMModels", () => {
"Content-Type": "application/json",
...DEFAULT_HEADERS,
},
timeout: 5000,
signal: undefined,
})
})

Expand All @@ -140,7 +140,7 @@ describe("getLiteLLMModels", () => {
"Content-Type": "application/json",
...DEFAULT_HEADERS,
},
timeout: 5000,
signal: undefined,
})
})

Expand All @@ -161,7 +161,7 @@ describe("getLiteLLMModels", () => {
"Content-Type": "application/json",
...DEFAULT_HEADERS,
},
timeout: 5000,
signal: undefined,
})
})

Expand Down Expand Up @@ -213,7 +213,7 @@ describe("getLiteLLMModels", () => {
"Content-Type": "application/json",
...DEFAULT_HEADERS,
},
timeout: 5000,
signal: undefined,
})

expect(result).toEqual({
Expand Down Expand Up @@ -254,7 +254,7 @@ describe("getLiteLLMModels", () => {
"Content-Type": "application/json",
...DEFAULT_HEADERS,
},
timeout: 5000,
signal: undefined,
})
})

Expand Down Expand Up @@ -381,7 +381,7 @@ describe("getLiteLLMModels", () => {
expect(mockedAxios.get).toHaveBeenCalledWith(
"http://localhost:4000/v1/model/info",
expect.objectContaining({
timeout: 5000,
signal: undefined,
}),
)
})
Expand Down
12 changes: 6 additions & 6 deletions src/api/providers/fetchers/__tests__/lmstudio.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ describe("LMStudio Fetcher", () => {
const result = await getLMStudioModels(baseUrl)

expect(mockedAxios.get).toHaveBeenCalledTimes(1)
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`)
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`, { signal: undefined })
expect(MockedLMStudioClientConstructor).toHaveBeenCalledTimes(1)
expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: lmsUrl })
expect(mockListDownloadedModels).toHaveBeenCalledTimes(1)
Expand All @@ -133,7 +133,7 @@ describe("LMStudio Fetcher", () => {
const result = await getLMStudioModels(baseUrl)

expect(mockedAxios.get).toHaveBeenCalledTimes(1)
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`)
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`, { signal: undefined })
expect(MockedLMStudioClientConstructor).toHaveBeenCalledTimes(1)
expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: lmsUrl })
expect(mockListDownloadedModels).toHaveBeenCalledTimes(1)
Expand Down Expand Up @@ -373,7 +373,7 @@ describe("LMStudio Fetcher", () => {

await getLMStudioModels("")

expect(mockedAxios.get).toHaveBeenCalledWith(`${defaultBaseUrl}/v1/models`)
expect(mockedAxios.get).toHaveBeenCalledWith(`${defaultBaseUrl}/v1/models`, { signal: undefined })
expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: defaultLmsUrl })
})

Expand All @@ -385,7 +385,7 @@ describe("LMStudio Fetcher", () => {

await getLMStudioModels(httpsBaseUrl)

expect(mockedAxios.get).toHaveBeenCalledWith(`${httpsBaseUrl}/v1/models`)
expect(mockedAxios.get).toHaveBeenCalledWith(`${httpsBaseUrl}/v1/models`, { signal: undefined })
expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: wssLmsUrl })
})

Expand All @@ -407,7 +407,7 @@ describe("LMStudio Fetcher", () => {
const result = await getLMStudioModels(baseUrl)

expect(mockedAxios.get).toHaveBeenCalledTimes(1)
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`)
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`, { signal: undefined })
expect(MockedLMStudioClientConstructor).not.toHaveBeenCalled()
expect(mockListLoaded).not.toHaveBeenCalled()
expect(consoleErrorSpy).toHaveBeenCalledWith(
Expand All @@ -426,7 +426,7 @@ describe("LMStudio Fetcher", () => {
const result = await getLMStudioModels(baseUrl)

expect(mockedAxios.get).toHaveBeenCalledTimes(1)
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`)
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`, { signal: undefined })
expect(MockedLMStudioClientConstructor).not.toHaveBeenCalled()
expect(mockListLoaded).not.toHaveBeenCalled()
expect(consoleInfoSpy).toHaveBeenCalledWith(`Error connecting to LMStudio at ${baseUrl}`)
Expand Down
10 changes: 7 additions & 3 deletions src/api/providers/fetchers/__tests__/modelCache.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,11 @@ describe("getModels with new GetModelsOptions", () => {
baseUrl: "http://localhost:4000",
})

expect(mockGetLiteLLMModels).toHaveBeenCalledWith("test-api-key", "http://localhost:4000")
expect(mockGetLiteLLMModels).toHaveBeenCalledWith(
"test-api-key",
"http://localhost:4000",
expect.any(AbortSignal),
)
expect(result).toEqual(mockModels)
})

Expand Down Expand Up @@ -103,7 +107,7 @@ describe("getModels with new GetModelsOptions", () => {

const result = await getModels({ provider: "requesty", apiKey: DUMMY_REQUESTY_KEY })

expect(mockGetRequestyModels).toHaveBeenCalledWith(undefined, DUMMY_REQUESTY_KEY)
expect(mockGetRequestyModels).toHaveBeenCalledWith(undefined, DUMMY_REQUESTY_KEY, expect.any(AbortSignal))
expect(result).toEqual(mockModels)
})

Expand Down Expand Up @@ -137,7 +141,7 @@ describe("getModels with new GetModelsOptions", () => {

const result = await getModels({ provider: "unbound", apiKey: DUMMY_UNBOUND_KEY })

expect(mockGetUnboundModels).toHaveBeenCalledWith(DUMMY_UNBOUND_KEY)
expect(mockGetUnboundModels).toHaveBeenCalledWith(DUMMY_UNBOUND_KEY, expect.any(AbortSignal))
expect(result).toEqual(mockModels)
})

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,9 @@ describe("Vercel AI Gateway Fetchers", () => {

const models = await getVercelAiGatewayModels()

expect(mockedAxios.get).toHaveBeenCalledWith("https://ai-gateway.vercel.sh/v1/models")
expect(mockedAxios.get).toHaveBeenCalledWith("https://ai-gateway.vercel.sh/v1/models", {
signal: undefined,
})
expect(Object.keys(models)).toHaveLength(2) // Only language models
expect(models["anthropic/claude-sonnet-4"]).toBeDefined()
expect(models["anthropic/claude-3.5-haiku"]).toBeDefined()
Expand Down
3 changes: 2 additions & 1 deletion src/api/providers/fetchers/deepinfra.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,14 +35,15 @@ const DeepInfraModelsResponseSchema = z.object({ data: z.array(DeepInfraModelSch
export async function getDeepInfraModels(
apiKey?: string,
baseUrl: string = "https://api.deepinfra.com/v1/openai",
signal?: AbortSignal,
): Promise<Record<string, ModelInfo>> {
const headers: Record<string, string> = { ...DEFAULT_HEADERS }
if (apiKey) headers["Authorization"] = `Bearer ${apiKey}`

const url = `${baseUrl.replace(/\/$/, "")}/models`
const models: Record<string, ModelInfo> = {}

const response = await axios.get(url, { headers })
const response = await axios.get(url, { headers, signal })
const parsed = DeepInfraModelsResponseSchema.safeParse(response.data)
const data = parsed.success ? parsed.data.data : response.data?.data || []

Expand Down
4 changes: 2 additions & 2 deletions src/api/providers/fetchers/glama.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@ import type { ModelInfo } from "@roo-code/types"

import { parseApiPrice } from "../../../shared/cost"

export async function getGlamaModels(): Promise<Record<string, ModelInfo>> {
export async function getGlamaModels(signal?: AbortSignal): Promise<Record<string, ModelInfo>> {
const models: Record<string, ModelInfo> = {}

try {
const response = await axios.get("https://glama.ai/api/gateway/v1/models")
const response = await axios.get("https://glama.ai/api/gateway/v1/models", { signal })
const rawModels = response.data

for (const rawModel of rawModels) {
Expand Down
6 changes: 3 additions & 3 deletions src/api/providers/fetchers/huggingface.ts
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ function parseHuggingFaceModel(model: HuggingFaceModel, provider?: HuggingFacePr
* @returns A promise that resolves to a record of model IDs to model info
* @throws Will throw an error if the request fails
*/
export async function getHuggingFaceModels(): Promise<ModelRecord> {
export async function getHuggingFaceModels(signal?: AbortSignal): Promise<ModelRecord> {
const now = Date.now()

if (cache && now - cache.timestamp < HUGGINGFACE_CACHE_DURATION) {
Expand All @@ -128,7 +128,7 @@ export async function getHuggingFaceModels(): Promise<ModelRecord> {
Pragma: "no-cache",
"Cache-Control": "no-cache",
},
timeout: 10000,
signal,
})

const result = huggingFaceApiResponseSchema.safeParse(response.data)
Expand Down Expand Up @@ -236,7 +236,7 @@ export async function getHuggingFaceModelsWithMetadata(): Promise<HuggingFaceMod
Pragma: "no-cache",
"Cache-Control": "no-cache",
},
timeout: 10000,
signal: AbortSignal.timeout(30000),
})

const models = response.data?.data || []
Expand Down
4 changes: 2 additions & 2 deletions src/api/providers/fetchers/io-intelligence.ts
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ function parseIOIntelligenceModel(model: IOIntelligenceModel): ModelInfo {
* Fetches available models from IO Intelligence
* <mcreference link="https://docs.io.net/reference/get-started-with-io-intelligence-api" index="1">1</mcreference>
*/
export async function getIOIntelligenceModels(apiKey?: string): Promise<ModelRecord> {
export async function getIOIntelligenceModels(apiKey?: string, signal?: AbortSignal): Promise<ModelRecord> {
const now = Date.now()

if (cache && now - cache.timestamp < IO_INTELLIGENCE_CACHE_DURATION) {
Expand All @@ -108,7 +108,7 @@ export async function getIOIntelligenceModels(apiKey?: string): Promise<ModelRec
"https://api.intelligence.io.solutions/api/v1/models",
{
headers,
timeout: 10_000,
signal,
},
)

Expand Down
5 changes: 2 additions & 3 deletions src/api/providers/fetchers/litellm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import { DEFAULT_HEADERS } from "../constants"
* @returns A promise that resolves to a record of model IDs to model info
* @throws Will throw an error if the request fails or the response is not as expected.
*/
export async function getLiteLLMModels(apiKey: string, baseUrl: string): Promise<ModelRecord> {
export async function getLiteLLMModels(apiKey: string, baseUrl: string, signal?: AbortSignal): Promise<ModelRecord> {
try {
const headers: Record<string, string> = {
"Content-Type": "application/json",
Expand All @@ -27,8 +27,7 @@ export async function getLiteLLMModels(apiKey: string, baseUrl: string): Promise
// Normalize the pathname by removing trailing slashes and multiple slashes
urlObj.pathname = urlObj.pathname.replace(/\/+$/, "").replace(/\/+/g, "/") + "/v1/model/info"
const url = urlObj.href
// Added timeout to prevent indefinite hanging
const response = await axios.get(url, { headers, timeout: 5000 })
const response = await axios.get(url, { headers, signal })
const models: ModelRecord = {}

// Process the model info from the response
Expand Down
7 changes: 5 additions & 2 deletions src/api/providers/fetchers/lmstudio.ts
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,10 @@ export const parseLMStudioModel = (rawModel: LLMInstanceInfo | LLMInfo): ModelIn
return modelInfo
}

export async function getLMStudioModels(baseUrl = "http://localhost:1234"): Promise<Record<string, ModelInfo>> {
export async function getLMStudioModels(
baseUrl = "http://localhost:1234",
signal?: AbortSignal,
): Promise<Record<string, ModelInfo>> {
// clear the set of models that have full details loaded
modelsWithLoadedDetails.clear()
// clearing the input can leave an empty string; use the default in that case
Expand All @@ -66,7 +69,7 @@ export async function getLMStudioModels(baseUrl = "http://localhost:1234"): Prom

// test the connection to LM Studio first
// errors will be caught further down
await axios.get(`${baseUrl}/v1/models`)
await axios.get(`${baseUrl}/v1/models`, { signal })

const client = new LMStudioClient({ baseUrl: lmsUrl })

Expand Down
Loading