Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions packages/types/src/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ export * from "./groq.js"
export * from "./lite-llm.js"
export * from "./lm-studio.js"
export * from "./mistral.js"
export * from "./ollama.js"
export * from "./openai.js"
export * from "./openrouter.js"
export * from "./requesty.js"
Expand Down
18 changes: 18 additions & 0 deletions packages/types/src/providers/lm-studio.ts
Original file line number Diff line number Diff line change
@@ -1 +1,19 @@
import type { ModelInfo } from "../model.js"

export const LMSTUDIO_DEFAULT_TEMPERATURE = 0

// LM Studio
// https://lmstudio.ai/docs/cli/ls
export const lMStudioDefaultModelId = "mistralai/devstral-small-2505"
export const lMStudioDefaultModelInfo: ModelInfo = {
maxTokens: 8192,
contextWindow: 200_000,
supportsImages: true,
supportsComputerUse: true,
supportsPromptCache: true,
inputPrice: 0,
outputPrice: 0,
cacheWritesPrice: 0,
cacheReadsPrice: 0,
description: "LM Studio hosted models",
}
17 changes: 17 additions & 0 deletions packages/types/src/providers/ollama.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import type { ModelInfo } from "../model.js"

// Ollama
// https://ollama.com/models
export const ollamaDefaultModelId = "devstral:24b"
export const ollamaDefaultModelInfo: ModelInfo = {
maxTokens: 4096,
contextWindow: 200_000,
supportsImages: true,
supportsComputerUse: true,
supportsPromptCache: true,
inputPrice: 0,
outputPrice: 0,
cacheWritesPrice: 0,
cacheReadsPrice: 0,
description: "Ollama hosted models",
}
32 changes: 32 additions & 0 deletions pnpm-lock.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
{
"mistralai/devstral-small-2505": {
"type": "llm",
"modelKey": "mistralai/devstral-small-2505",
"format": "safetensors",
"displayName": "Devstral Small 2505",
"path": "mistralai/devstral-small-2505",
"sizeBytes": 13277565112,
"architecture": "mistral",
"vision": false,
"trainedForToolUse": false,
"maxContextLength": 131072
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
{
"qwen3-2to16:latest": {
"license": " Apache License\\n Version 2.0, January 2004\\n...",
"modelfile": "model.modelfile,# To build a new Modelfile based on this, replace FROM with:...",
"parameters": "repeat_penalty 1\\nstop \\\\nstop...",
"template": "{{- if .Messages }}\\n{{- if or .System .Tools }}<|im_start|>system...",
"details": {
"parent_model": "/Users/brad/.ollama/models/blobs/sha256-3291abe70f16ee9682de7bfae08db5373ea9d6497e614aaad63340ad421d6312",
"format": "gguf",
"family": "qwen3",
"families": ["qwen3"],
"parameter_size": "32.8B",
"quantization_level": "Q4_K_M"
},
"model_info": {
"general.architecture": "qwen3",
"general.basename": "Qwen3",
"general.file_type": 15,
"general.parameter_count": 32762123264,
"general.quantization_version": 2,
"general.size_label": "32B",
"general.type": "model",
"qwen3.attention.head_count": 64,
"qwen3.attention.head_count_kv": 8,
"qwen3.attention.key_length": 128,
"qwen3.attention.layer_norm_rms_epsilon": 0.000001,
"qwen3.attention.value_length": 128,
"qwen3.block_count": 64,
"qwen3.context_length": 40960,
"qwen3.embedding_length": 5120,
"qwen3.feed_forward_length": 25600,
"qwen3.rope.freq_base": 1000000,
"tokenizer.ggml.add_bos_token": false,
"tokenizer.ggml.bos_token_id": 151643,
"tokenizer.ggml.eos_token_id": 151645,
"tokenizer.ggml.merges": null,
"tokenizer.ggml.model": "gpt2",
"tokenizer.ggml.padding_token_id": 151643,
"tokenizer.ggml.pre": "qwen2",
"tokenizer.ggml.token_type": null,
"tokenizer.ggml.tokens": null
},
"tensors": [
{
"name": "output.weight",
"type": "Q6_K",
"shape": [5120, 151936]
},
{
"name": "output_norm.weight",
"type": "F32",
"shape": [5120]
}
],
"capabilities": ["completion", "tools"],
"modified_at": "2025-06-02T22:16:13.644123606-04:00"
}
}
197 changes: 197 additions & 0 deletions src/api/providers/fetchers/__tests__/lmstudio.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,197 @@
import axios from "axios"
import { vi, describe, it, expect, beforeEach } from "vitest"
import { LMStudioClient, LLM, LLMInstanceInfo } from "@lmstudio/sdk" // LLMInfo is a type
import { getLMStudioModels, parseLMStudioModel } from "../lmstudio"
import { ModelInfo, lMStudioDefaultModelInfo } from "@roo-code/types" // ModelInfo is a type

// Mock axios
vi.mock("axios")
const mockedAxios = axios as any

// Mock @lmstudio/sdk
const mockGetModelInfo = vi.fn()
const mockListLoaded = vi.fn()
vi.mock("@lmstudio/sdk", () => {
return {
LMStudioClient: vi.fn().mockImplementation(() => ({
llm: {
listLoaded: mockListLoaded,
},
})),
}
})
const MockedLMStudioClientConstructor = LMStudioClient as any

describe("LMStudio Fetcher", () => {
beforeEach(() => {
vi.clearAllMocks()
MockedLMStudioClientConstructor.mockClear()
mockListLoaded.mockClear()
mockGetModelInfo.mockClear()
})

describe("parseLMStudioModel", () => {
it("should correctly parse raw LLMInfo to ModelInfo", () => {
const rawModel: LLMInstanceInfo = {
type: "llm",
modelKey: "mistralai/devstral-small-2505",
format: "safetensors",
displayName: "Devstral Small 2505",
path: "mistralai/devstral-small-2505",
sizeBytes: 13277565112,
architecture: "mistral",
identifier: "mistralai/devstral-small-2505",
instanceReference: "RAP5qbeHVjJgBiGFQ6STCuTJ",
vision: false,
trainedForToolUse: false,
maxContextLength: 131072,
contextLength: 7161,
}

const expectedModelInfo: ModelInfo = {
...lMStudioDefaultModelInfo,
description: `${rawModel.displayName} - ${rawModel.path}`,
contextWindow: rawModel.contextLength,
supportsPromptCache: true,
supportsImages: rawModel.vision,
supportsComputerUse: false,
maxTokens: rawModel.contextLength,
inputPrice: 0,
outputPrice: 0,
cacheWritesPrice: 0,
cacheReadsPrice: 0,
}

const result = parseLMStudioModel(rawModel)
expect(result).toEqual(expectedModelInfo)
})
})

describe("getLMStudioModels", () => {
const baseUrl = "http://localhost:1234"
const lmsUrl = "ws://localhost:1234"

const mockRawModel: LLMInstanceInfo = {
architecture: "test-arch",
identifier: "mistralai/devstral-small-2505",
instanceReference: "RAP5qbeHVjJgBiGFQ6STCuTJ",
modelKey: "test-model-key-1",
path: "/path/to/test-model-1",
type: "llm",
displayName: "Test Model One",
maxContextLength: 2048,
contextLength: 7161,
paramsString: "1B params, 2k context",
vision: true,
format: "gguf",
sizeBytes: 1000000000,
trainedForToolUse: false, // Added
}

it("should fetch and parse models successfully", async () => {
mockedAxios.get.mockResolvedValueOnce({ data: { status: "ok" } })
mockListLoaded.mockResolvedValueOnce([{ getModelInfo: mockGetModelInfo }])
mockGetModelInfo.mockResolvedValueOnce(mockRawModel)

const result = await getLMStudioModels(baseUrl)

expect(mockedAxios.get).toHaveBeenCalledTimes(1)
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`)
expect(MockedLMStudioClientConstructor).toHaveBeenCalledTimes(1)
expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: lmsUrl })
expect(mockListLoaded).toHaveBeenCalledTimes(1)

const expectedParsedModel = parseLMStudioModel(mockRawModel)
expect(result).toEqual({ [mockRawModel.modelKey]: expectedParsedModel })
})

it("should use default baseUrl if an empty string is provided", async () => {
const defaultBaseUrl = "http://localhost:1234"
const defaultLmsUrl = "ws://localhost:1234"
mockedAxios.get.mockResolvedValueOnce({ data: {} })
mockListLoaded.mockResolvedValueOnce([])

await getLMStudioModels("")

expect(mockedAxios.get).toHaveBeenCalledWith(`${defaultBaseUrl}/v1/models`)
expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: defaultLmsUrl })
})

it("should transform https baseUrl to wss for LMStudioClient", async () => {
const httpsBaseUrl = "https://securehost:4321"
const wssLmsUrl = "wss://securehost:4321"
mockedAxios.get.mockResolvedValueOnce({ data: {} })
mockListLoaded.mockResolvedValueOnce([])

await getLMStudioModels(httpsBaseUrl)

expect(mockedAxios.get).toHaveBeenCalledWith(`${httpsBaseUrl}/v1/models`)
expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: wssLmsUrl })
})

it("should return an empty object if lmsUrl is unparsable", async () => {
const unparsableBaseUrl = "http://localhost:invalid:port" // Leads to ws://localhost:invalid:port

const result = await getLMStudioModels(unparsableBaseUrl)

expect(result).toEqual({})
expect(mockedAxios.get).not.toHaveBeenCalled()
expect(MockedLMStudioClientConstructor).not.toHaveBeenCalled()
})

it("should return an empty object and log error if axios.get fails with a generic error", async () => {
const consoleErrorSpy = vi.spyOn(console, "error").mockImplementation(() => {})
const networkError = new Error("Network connection failed")
mockedAxios.get.mockRejectedValueOnce(networkError)

const result = await getLMStudioModels(baseUrl)

expect(mockedAxios.get).toHaveBeenCalledTimes(1)
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`)
expect(MockedLMStudioClientConstructor).not.toHaveBeenCalled()
expect(mockListLoaded).not.toHaveBeenCalled()
expect(consoleErrorSpy).toHaveBeenCalledWith(
`Error fetching LMStudio models: ${JSON.stringify(networkError, Object.getOwnPropertyNames(networkError), 2)}`,
)
expect(result).toEqual({})
consoleErrorSpy.mockRestore()
})

it("should return an empty object and log info if axios.get fails with ECONNREFUSED", async () => {
const consoleInfoSpy = vi.spyOn(console, "warn").mockImplementation(() => {})
const econnrefusedError = new Error("Connection refused")
;(econnrefusedError as any).code = "ECONNREFUSED"
mockedAxios.get.mockRejectedValueOnce(econnrefusedError)

const result = await getLMStudioModels(baseUrl)

expect(mockedAxios.get).toHaveBeenCalledTimes(1)
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`)
expect(MockedLMStudioClientConstructor).not.toHaveBeenCalled()
expect(mockListLoaded).not.toHaveBeenCalled()
expect(consoleInfoSpy).toHaveBeenCalledWith(`Error connecting to LMStudio at ${baseUrl}`)
expect(result).toEqual({})
consoleInfoSpy.mockRestore()
})

it("should return an empty object and log error if listDownloadedModels fails", async () => {
const consoleErrorSpy = vi.spyOn(console, "error").mockImplementation(() => {})
const listError = new Error("LMStudio SDK internal error")

mockedAxios.get.mockResolvedValueOnce({ data: {} })
mockListLoaded.mockRejectedValueOnce(listError)

const result = await getLMStudioModels(baseUrl)

expect(mockedAxios.get).toHaveBeenCalledTimes(1)
expect(MockedLMStudioClientConstructor).toHaveBeenCalledTimes(1)
expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: lmsUrl })
expect(mockListLoaded).toHaveBeenCalledTimes(1)
expect(consoleErrorSpy).toHaveBeenCalledWith(
`Error fetching LMStudio models: ${JSON.stringify(listError, Object.getOwnPropertyNames(listError), 2)}`,
)
expect(result).toEqual({})
consoleErrorSpy.mockRestore()
})
})
})
Loading
Loading