Skip to content

Commit 44ca7d4

Browse files
committed
fix: add fallback to OpenAI-compatible API for LM Studio model detection
- Modified getLMStudioModels to fall back to /v1/models endpoint when SDK methods fail - This fixes the issue where models like openai/gpt-oss-20b were not being detected - Added comprehensive test coverage for the fallback behavior - Fixes #6766
1 parent 2b647ed commit 44ca7d4

File tree

2 files changed

+113
-13
lines changed

2 files changed

+113
-13
lines changed

src/api/providers/fetchers/__tests__/lmstudio.test.ts

Lines changed: 81 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -212,11 +212,12 @@ describe("LMStudio Fetcher", () => {
212212
consoleInfoSpy.mockRestore()
213213
})
214214

215-
it("should return an empty object and log error if listDownloadedModels fails", async () => {
216-
const consoleErrorSpy = vi.spyOn(console, "error").mockImplementation(() => {})
215+
it("should return an empty object and log warning if listLoaded fails", async () => {
216+
const consoleWarnSpy = vi.spyOn(console, "warn").mockImplementation(() => {})
217217
const listError = new Error("LMStudio SDK internal error")
218218

219219
mockedAxios.get.mockResolvedValueOnce({ data: {} })
220+
mockListDownloadedModels.mockRejectedValueOnce(new Error("Failed to list downloaded"))
220221
mockListLoaded.mockRejectedValueOnce(listError)
221222

222223
const result = await getLMStudioModels(baseUrl)
@@ -225,11 +226,86 @@ describe("LMStudio Fetcher", () => {
225226
expect(MockedLMStudioClientConstructor).toHaveBeenCalledTimes(1)
226227
expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: lmsUrl })
227228
expect(mockListLoaded).toHaveBeenCalledTimes(1)
228-
expect(consoleErrorSpy).toHaveBeenCalledWith(
229-
`Error fetching LMStudio models: ${JSON.stringify(listError, Object.getOwnPropertyNames(listError), 2)}`,
229+
// Now it should log a warning for failed SDK methods, not an error
230+
expect(consoleWarnSpy).toHaveBeenCalledWith(
231+
"Failed to list downloaded models, falling back to loaded models only",
230232
)
233+
expect(consoleWarnSpy).toHaveBeenCalledWith("Failed to list loaded models via SDK")
231234
expect(result).toEqual({})
232-
consoleErrorSpy.mockRestore()
235+
consoleWarnSpy.mockRestore()
236+
})
237+
238+
it("should fall back to OpenAI API models when SDK methods fail", async () => {
239+
const consoleLogSpy = vi.spyOn(console, "log").mockImplementation(() => {})
240+
const consoleWarnSpy = vi.spyOn(console, "warn").mockImplementation(() => {})
241+
242+
// Mock OpenAI API response with models
243+
const openAiModels = {
244+
data: [
245+
{ id: "openai/gpt-oss-20b", object: "model", owned_by: "organization_owner" },
246+
{ id: "unsloth/gpt-oss-20b", object: "model", owned_by: "organization_owner" },
247+
{ id: "qwen/qwen3-coder-30b", object: "model", owned_by: "organization_owner" },
248+
],
249+
object: "list",
250+
}
251+
252+
mockedAxios.get.mockResolvedValueOnce({ data: openAiModels })
253+
254+
// Make SDK methods fail
255+
mockListDownloadedModels.mockRejectedValueOnce(new Error("SDK not available"))
256+
mockListLoaded.mockRejectedValueOnce(new Error("SDK not available"))
257+
258+
const result = await getLMStudioModels(baseUrl)
259+
260+
// Should have called the OpenAI endpoint
261+
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`)
262+
263+
// Should have tried SDK methods
264+
expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: lmsUrl })
265+
expect(mockListDownloadedModels).toHaveBeenCalled()
266+
expect(mockListLoaded).toHaveBeenCalled()
267+
268+
// Should have logged the fallback
269+
expect(consoleLogSpy).toHaveBeenCalledWith("Falling back to OpenAI-compatible API models")
270+
271+
// Should return models from OpenAI API
272+
expect(Object.keys(result)).toHaveLength(3)
273+
expect(result["openai/gpt-oss-20b"]).toBeDefined()
274+
expect(result["openai/gpt-oss-20b"].description).toBe("openai/gpt-oss-20b")
275+
expect(result["unsloth/gpt-oss-20b"]).toBeDefined()
276+
expect(result["qwen/qwen3-coder-30b"]).toBeDefined()
277+
278+
consoleLogSpy.mockRestore()
279+
consoleWarnSpy.mockRestore()
280+
})
281+
282+
it("should not use OpenAI API fallback if SDK returns models", async () => {
283+
const consoleLogSpy = vi.spyOn(console, "log").mockImplementation(() => {})
284+
285+
// Mock OpenAI API response with models
286+
const openAiModels = {
287+
data: [{ id: "openai/gpt-oss-20b", object: "model", owned_by: "organization_owner" }],
288+
object: "list",
289+
}
290+
291+
mockedAxios.get.mockResolvedValueOnce({ data: openAiModels })
292+
293+
// SDK returns models successfully
294+
mockListDownloadedModels.mockResolvedValueOnce([])
295+
mockListLoaded.mockResolvedValueOnce([{ getModelInfo: mockGetModelInfo }])
296+
mockGetModelInfo.mockResolvedValueOnce(mockRawModel)
297+
298+
const result = await getLMStudioModels(baseUrl)
299+
300+
// Should NOT log the fallback message
301+
expect(consoleLogSpy).not.toHaveBeenCalledWith("Falling back to OpenAI-compatible API models")
302+
303+
// Should return SDK models, not OpenAI API models
304+
expect(Object.keys(result)).toHaveLength(1)
305+
expect(result[mockRawModel.modelKey]).toBeDefined()
306+
expect(result["openai/gpt-oss-20b"]).toBeUndefined()
307+
308+
consoleLogSpy.mockRestore()
233309
})
234310
})
235311
})

src/api/providers/fetchers/lmstudio.ts

Lines changed: 32 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -65,9 +65,9 @@ export async function getLMStudioModels(baseUrl = "http://localhost:1234"): Prom
6565
return models
6666
}
6767

68-
// test the connection to LM Studio first
68+
// test the connection to LM Studio first and get models from OpenAI-compatible endpoint
6969
// errors will be caught further down
70-
await axios.get(`${baseUrl}/v1/models`)
70+
const openAiModelsResponse = await axios.get(`${baseUrl}/v1/models`)
7171

7272
const client = new LMStudioClient({ baseUrl: lmsUrl })
7373

@@ -82,13 +82,37 @@ export async function getLMStudioModels(baseUrl = "http://localhost:1234"): Prom
8282
console.warn("Failed to list downloaded models, falling back to loaded models only")
8383
}
8484
// We want to list loaded models *anyway* since they provide valuable extra info (context size)
85-
const loadedModels = (await client.llm.listLoaded().then((models: LLM[]) => {
86-
return Promise.all(models.map((m) => m.getModelInfo()))
87-
})) as Array<LLMInstanceInfo>
85+
try {
86+
const loadedModels = (await client.llm.listLoaded().then((models: LLM[]) => {
87+
return Promise.all(models.map((m) => m.getModelInfo()))
88+
})) as Array<LLMInstanceInfo>
89+
90+
for (const lmstudioModel of loadedModels) {
91+
models[lmstudioModel.modelKey] = parseLMStudioModel(lmstudioModel)
92+
modelsWithLoadedDetails.add(lmstudioModel.modelKey)
93+
}
94+
} catch (error) {
95+
console.warn("Failed to list loaded models via SDK")
96+
}
8897

89-
for (const lmstudioModel of loadedModels) {
90-
models[lmstudioModel.modelKey] = parseLMStudioModel(lmstudioModel)
91-
modelsWithLoadedDetails.add(lmstudioModel.modelKey)
98+
// If we didn't get any models from the SDK, fall back to OpenAI-compatible API
99+
if (Object.keys(models).length === 0 && openAiModelsResponse.data?.data) {
100+
console.log("Falling back to OpenAI-compatible API models")
101+
const openAiModels = openAiModelsResponse.data.data
102+
103+
for (const model of openAiModels) {
104+
// Use the model ID as the key
105+
models[model.id] = {
106+
...lMStudioDefaultModelInfo,
107+
description: model.id,
108+
// We don't have detailed info from the OpenAI API, so use defaults
109+
contextWindow: lMStudioDefaultModelInfo.contextWindow,
110+
maxTokens: lMStudioDefaultModelInfo.maxTokens,
111+
supportsPromptCache: true,
112+
supportsImages: false, // Conservative default
113+
supportsComputerUse: false,
114+
}
115+
}
92116
}
93117
} catch (error) {
94118
if (error.code === "ECONNREFUSED") {

0 commit comments

Comments
 (0)