Skip to content

Commit 6f7c4bc

Browse files
committed
fix: prevent duplicate LM Studio models by only showing loaded models
- Remove listDownloadedModels() call and its try/catch block - Only fetch loaded models as they are the only ones that can process requests - Use model.path as consistent key to prevent duplicates - Update tests to reflect the simplified implementation This fixes the issue where models appeared twice in the Provider Configuration Profile when they were both downloaded and loaded in LM Studio. Fixes #6954
1 parent f3864ff commit 6f7c4bc

File tree

2 files changed

+8
-54
lines changed

2 files changed

+8
-54
lines changed

src/api/providers/fetchers/__tests__/lmstudio.test.ts

Lines changed: 4 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -11,16 +11,12 @@ const mockedAxios = axios as any
1111
// Mock @lmstudio/sdk
1212
const mockGetModelInfo = vi.fn()
1313
const mockListLoaded = vi.fn()
14-
const mockListDownloadedModels = vi.fn()
1514
vi.mock("@lmstudio/sdk", () => {
1615
return {
1716
LMStudioClient: vi.fn().mockImplementation(() => ({
1817
llm: {
1918
listLoaded: mockListLoaded,
2019
},
21-
system: {
22-
listDownloadedModels: mockListDownloadedModels,
23-
},
2420
})),
2521
}
2622
})
@@ -32,7 +28,6 @@ describe("LMStudio Fetcher", () => {
3228
MockedLMStudioClientConstructor.mockClear()
3329
mockListLoaded.mockClear()
3430
mockGetModelInfo.mockClear()
35-
mockListDownloadedModels.mockClear()
3631
})
3732

3833
describe("parseLMStudioModel", () => {
@@ -93,40 +88,8 @@ describe("LMStudio Fetcher", () => {
9388
trainedForToolUse: false, // Added
9489
}
9590

96-
it("should fetch downloaded models using system.listDownloadedModels", async () => {
97-
const mockLLMInfo: LLMInfo = {
98-
type: "llm" as const,
99-
modelKey: "mistralai/devstral-small-2505",
100-
format: "safetensors",
101-
displayName: "Devstral Small 2505",
102-
path: "mistralai/devstral-small-2505",
103-
sizeBytes: 13277565112,
104-
architecture: "mistral",
105-
vision: false,
106-
trainedForToolUse: false,
107-
maxContextLength: 131072,
108-
}
109-
110-
mockedAxios.get.mockResolvedValueOnce({ data: { status: "ok" } })
111-
mockListDownloadedModels.mockResolvedValueOnce([mockLLMInfo])
112-
113-
const result = await getLMStudioModels(baseUrl)
114-
115-
expect(mockedAxios.get).toHaveBeenCalledTimes(1)
116-
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`)
117-
expect(MockedLMStudioClientConstructor).toHaveBeenCalledTimes(1)
118-
expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: lmsUrl })
119-
expect(mockListDownloadedModels).toHaveBeenCalledTimes(1)
120-
expect(mockListDownloadedModels).toHaveBeenCalledWith("llm")
121-
expect(mockListLoaded).toHaveBeenCalled() // we now call it to get context data
122-
123-
const expectedParsedModel = parseLMStudioModel(mockLLMInfo)
124-
expect(result).toEqual({ [mockLLMInfo.path]: expectedParsedModel })
125-
})
126-
127-
it("should fall back to listLoaded when listDownloadedModels fails", async () => {
91+
it("should fetch only loaded models and use model.path as key", async () => {
12892
mockedAxios.get.mockResolvedValueOnce({ data: { status: "ok" } })
129-
mockListDownloadedModels.mockRejectedValueOnce(new Error("Method not available"))
13093
mockListLoaded.mockResolvedValueOnce([{ getModelInfo: mockGetModelInfo }])
13194
mockGetModelInfo.mockResolvedValueOnce(mockRawModel)
13295

@@ -136,11 +99,11 @@ describe("LMStudio Fetcher", () => {
13699
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`)
137100
expect(MockedLMStudioClientConstructor).toHaveBeenCalledTimes(1)
138101
expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: lmsUrl })
139-
expect(mockListDownloadedModels).toHaveBeenCalledTimes(1)
140102
expect(mockListLoaded).toHaveBeenCalledTimes(1)
141103

142104
const expectedParsedModel = parseLMStudioModel(mockRawModel)
143-
expect(result).toEqual({ [mockRawModel.modelKey]: expectedParsedModel })
105+
// Now using model.path as the key instead of modelKey
106+
expect(result).toEqual({ [mockRawModel.path]: expectedParsedModel })
144107
})
145108

146109
it("should use default baseUrl if an empty string is provided", async () => {
@@ -212,7 +175,7 @@ describe("LMStudio Fetcher", () => {
212175
consoleInfoSpy.mockRestore()
213176
})
214177

215-
it("should return an empty object and log error if listDownloadedModels fails", async () => {
178+
it("should return an empty object and log error if listLoaded fails", async () => {
216179
const consoleErrorSpy = vi.spyOn(console, "error").mockImplementation(() => {})
217180
const listError = new Error("LMStudio SDK internal error")
218181

src/api/providers/fetchers/lmstudio.ts

Lines changed: 4 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -71,24 +71,15 @@ export async function getLMStudioModels(baseUrl = "http://localhost:1234"): Prom
7171

7272
const client = new LMStudioClient({ baseUrl: lmsUrl })
7373

74-
// First, try to get all downloaded models
75-
try {
76-
const downloadedModels = await client.system.listDownloadedModels("llm")
77-
for (const model of downloadedModels) {
78-
// Use the model path as the key since that's what users select
79-
models[model.path] = parseLMStudioModel(model)
80-
}
81-
} catch (error) {
82-
console.warn("Failed to list downloaded models, falling back to loaded models only")
83-
}
84-
// We want to list loaded models *anyway* since they provide valuable extra info (context size)
74+
// Only get loaded models - these are the only ones that can actually process requests
8575
const loadedModels = (await client.llm.listLoaded().then((models: LLM[]) => {
8676
return Promise.all(models.map((m) => m.getModelInfo()))
8777
})) as Array<LLMInstanceInfo>
8878

8979
for (const lmstudioModel of loadedModels) {
90-
models[lmstudioModel.modelKey] = parseLMStudioModel(lmstudioModel)
91-
modelsWithLoadedDetails.add(lmstudioModel.modelKey)
80+
// Use model.path as the consistent key to prevent duplicates
81+
models[lmstudioModel.path] = parseLMStudioModel(lmstudioModel)
82+
modelsWithLoadedDetails.add(lmstudioModel.path)
9283
}
9384
} catch (error) {
9485
if (error.code === "ECONNREFUSED") {

0 commit comments

Comments
 (0)