diff --git a/src/api/providers/fetchers/__tests__/lmstudio.test.ts b/src/api/providers/fetchers/__tests__/lmstudio.test.ts index ff9a109e50..f46bc76080 100644 --- a/src/api/providers/fetchers/__tests__/lmstudio.test.ts +++ b/src/api/providers/fetchers/__tests__/lmstudio.test.ts @@ -11,16 +11,12 @@ const mockedAxios = axios as any // Mock @lmstudio/sdk const mockGetModelInfo = vi.fn() const mockListLoaded = vi.fn() -const mockListDownloadedModels = vi.fn() vi.mock("@lmstudio/sdk", () => { return { LMStudioClient: vi.fn().mockImplementation(() => ({ llm: { listLoaded: mockListLoaded, }, - system: { - listDownloadedModels: mockListDownloadedModels, - }, })), } }) @@ -32,7 +28,6 @@ describe("LMStudio Fetcher", () => { MockedLMStudioClientConstructor.mockClear() mockListLoaded.mockClear() mockGetModelInfo.mockClear() - mockListDownloadedModels.mockClear() }) describe("parseLMStudioModel", () => { @@ -93,40 +88,8 @@ describe("LMStudio Fetcher", () => { trainedForToolUse: false, // Added } - it("should fetch downloaded models using system.listDownloadedModels", async () => { - const mockLLMInfo: LLMInfo = { - type: "llm" as const, - modelKey: "mistralai/devstral-small-2505", - format: "safetensors", - displayName: "Devstral Small 2505", - path: "mistralai/devstral-small-2505", - sizeBytes: 13277565112, - architecture: "mistral", - vision: false, - trainedForToolUse: false, - maxContextLength: 131072, - } - - mockedAxios.get.mockResolvedValueOnce({ data: { status: "ok" } }) - mockListDownloadedModels.mockResolvedValueOnce([mockLLMInfo]) - - const result = await getLMStudioModels(baseUrl) - - expect(mockedAxios.get).toHaveBeenCalledTimes(1) - expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`) - expect(MockedLMStudioClientConstructor).toHaveBeenCalledTimes(1) - expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: lmsUrl }) - expect(mockListDownloadedModels).toHaveBeenCalledTimes(1) - expect(mockListDownloadedModels).toHaveBeenCalledWith("llm") - expect(mockListLoaded).toHaveBeenCalled() // we now call it to get context data - - const expectedParsedModel = parseLMStudioModel(mockLLMInfo) - expect(result).toEqual({ [mockLLMInfo.path]: expectedParsedModel }) - }) - - it("should fall back to listLoaded when listDownloadedModels fails", async () => { + it("should fetch only loaded models and use model.path as key", async () => { mockedAxios.get.mockResolvedValueOnce({ data: { status: "ok" } }) - mockListDownloadedModels.mockRejectedValueOnce(new Error("Method not available")) mockListLoaded.mockResolvedValueOnce([{ getModelInfo: mockGetModelInfo }]) mockGetModelInfo.mockResolvedValueOnce(mockRawModel) @@ -136,11 +99,11 @@ describe("LMStudio Fetcher", () => { expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`) expect(MockedLMStudioClientConstructor).toHaveBeenCalledTimes(1) expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: lmsUrl }) - expect(mockListDownloadedModels).toHaveBeenCalledTimes(1) expect(mockListLoaded).toHaveBeenCalledTimes(1) const expectedParsedModel = parseLMStudioModel(mockRawModel) - expect(result).toEqual({ [mockRawModel.modelKey]: expectedParsedModel }) + // Now using model.path as the key instead of modelKey + expect(result).toEqual({ [mockRawModel.path]: expectedParsedModel }) }) it("should use default baseUrl if an empty string is provided", async () => { @@ -212,7 +175,7 @@ describe("LMStudio Fetcher", () => { consoleInfoSpy.mockRestore() }) - it("should return an empty object and log error if listDownloadedModels fails", async () => { + it("should return an empty object and log error if listLoaded fails", async () => { const consoleErrorSpy = vi.spyOn(console, "error").mockImplementation(() => {}) const listError = new Error("LMStudio SDK internal error") diff --git a/src/api/providers/fetchers/lmstudio.ts b/src/api/providers/fetchers/lmstudio.ts index 976822c67d..a9b98f64bc 100644 --- a/src/api/providers/fetchers/lmstudio.ts +++ b/src/api/providers/fetchers/lmstudio.ts @@ -71,24 +71,15 @@ export async function getLMStudioModels(baseUrl = "http://localhost:1234"): Prom const client = new LMStudioClient({ baseUrl: lmsUrl }) - // First, try to get all downloaded models - try { - const downloadedModels = await client.system.listDownloadedModels("llm") - for (const model of downloadedModels) { - // Use the model path as the key since that's what users select - models[model.path] = parseLMStudioModel(model) - } - } catch (error) { - console.warn("Failed to list downloaded models, falling back to loaded models only") - } - // We want to list loaded models *anyway* since they provide valuable extra info (context size) + // Only get loaded models - these are the only ones that can actually process requests const loadedModels = (await client.llm.listLoaded().then((models: LLM[]) => { return Promise.all(models.map((m) => m.getModelInfo())) })) as Array for (const lmstudioModel of loadedModels) { - models[lmstudioModel.modelKey] = parseLMStudioModel(lmstudioModel) - modelsWithLoadedDetails.add(lmstudioModel.modelKey) + // Use model.path as the consistent key to prevent duplicates + models[lmstudioModel.path] = parseLMStudioModel(lmstudioModel) + modelsWithLoadedDetails.add(lmstudioModel.path) } } catch (error) { if (error.code === "ECONNREFUSED") {