Skip to content

Commit ce11d4a

Browse files
thecolorbluedaniel-lxs
authored andcommitted
added fetcher tests; fixed bug in webviewMessageHandler around ollama/lmstudio model requests (#2462)
1 parent fc3a003 commit ce11d4a

File tree

9 files changed

+415
-9
lines changed

9 files changed

+415
-9
lines changed
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
{
2+
"mistralai/devstral-small-2505": {
3+
"type": "llm",
4+
"modelKey": "mistralai/devstral-small-2505",
5+
"format": "safetensors",
6+
"displayName": "Devstral Small 2505",
7+
"path": "mistralai/devstral-small-2505",
8+
"sizeBytes": 13277565112,
9+
"architecture": "mistral",
10+
"vision": false,
11+
"trainedForToolUse": false,
12+
"maxContextLength": 131072
13+
}
14+
}
Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
{
2+
"qwen3-2to16:latest": {
3+
"license": " Apache License\\n Version 2.0, January 2004\\n...",
4+
"modelfile": "model.modelfile,# To build a new Modelfile based on this, replace FROM with:...",
5+
"parameters": "repeat_penalty 1\\nstop \\\\nstop...",
6+
"template": "{{- if .Messages }}\\n{{- if or .System .Tools }}<|im_start|>system...",
7+
"details": {
8+
"parent_model": "/Users/brad/.ollama/models/blobs/sha256-3291abe70f16ee9682de7bfae08db5373ea9d6497e614aaad63340ad421d6312",
9+
"format": "gguf",
10+
"family": "qwen3",
11+
"families": ["qwen3"],
12+
"parameter_size": "32.8B",
13+
"quantization_level": "Q4_K_M"
14+
},
15+
"model_info": {
16+
"general.architecture": "qwen3",
17+
"general.basename": "Qwen3",
18+
"general.file_type": 15,
19+
"general.parameter_count": 32762123264,
20+
"general.quantization_version": 2,
21+
"general.size_label": "32B",
22+
"general.type": "model",
23+
"qwen3.attention.head_count": 64,
24+
"qwen3.attention.head_count_kv": 8,
25+
"qwen3.attention.key_length": 128,
26+
"qwen3.attention.layer_norm_rms_epsilon": 0.000001,
27+
"qwen3.attention.value_length": 128,
28+
"qwen3.block_count": 64,
29+
"qwen3.context_length": 40960,
30+
"qwen3.embedding_length": 5120,
31+
"qwen3.feed_forward_length": 25600,
32+
"qwen3.rope.freq_base": 1000000,
33+
"tokenizer.ggml.add_bos_token": false,
34+
"tokenizer.ggml.bos_token_id": 151643,
35+
"tokenizer.ggml.eos_token_id": 151645,
36+
"tokenizer.ggml.merges": null,
37+
"tokenizer.ggml.model": "gpt2",
38+
"tokenizer.ggml.padding_token_id": 151643,
39+
"tokenizer.ggml.pre": "qwen2",
40+
"tokenizer.ggml.token_type": null,
41+
"tokenizer.ggml.tokens": null
42+
},
43+
"tensors": [
44+
{
45+
"name": "output.weight",
46+
"type": "Q6_K",
47+
"shape": [5120, 151936]
48+
},
49+
{
50+
"name": "output_norm.weight",
51+
"type": "F32",
52+
"shape": [5120]
53+
}
54+
],
55+
"capabilities": ["completion", "tools"],
56+
"modified_at": "2025-06-02T22:16:13.644123606-04:00"
57+
}
58+
}
Lines changed: 190 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,190 @@
1+
import axios from "axios"
2+
import { LMStudioClient, LLMInfo } from "@lmstudio/sdk" // LLMInfo is a type
3+
import { getLMStudioModels, parseLMStudioModel } from "../lmstudio"
4+
import { ModelInfo, lMStudioDefaultModelInfo } from "@roo-code/types" // ModelInfo is a type
5+
6+
// Mock axios
7+
jest.mock("axios")
8+
const mockedAxios = axios as jest.Mocked<typeof axios>
9+
10+
// Mock @lmstudio/sdk
11+
const mockListDownloadedModels = jest.fn()
12+
jest.mock("@lmstudio/sdk", () => {
13+
const originalModule = jest.requireActual("@lmstudio/sdk")
14+
return {
15+
...originalModule,
16+
LMStudioClient: jest.fn().mockImplementation(() => ({
17+
system: {
18+
listDownloadedModels: mockListDownloadedModels,
19+
},
20+
})),
21+
}
22+
})
23+
const MockedLMStudioClientConstructor = LMStudioClient as jest.MockedClass<typeof LMStudioClient>
24+
25+
describe("LMStudio Fetcher", () => {
26+
beforeEach(() => {
27+
jest.clearAllMocks()
28+
MockedLMStudioClientConstructor.mockClear()
29+
})
30+
31+
describe("parseLMStudioModel", () => {
32+
it("should correctly parse raw LLMInfo to ModelInfo", () => {
33+
const rawModel: LLMInfo = {
34+
architecture: "llama",
35+
modelKey: "mistral-7b-instruct-v0.2.Q4_K_M.gguf",
36+
path: "/Users/username/.cache/lm-studio/models/Mistral AI/Mistral-7B-Instruct-v0.2/mistral-7b-instruct-v0.2.Q4_K_M.gguf",
37+
type: "llm",
38+
displayName: "Mistral-7B-Instruct-v0.2-Q4_K_M",
39+
maxContextLength: 8192,
40+
paramsString: "7B params, 8k context",
41+
vision: false,
42+
format: "gguf",
43+
sizeBytes: 4080000000,
44+
trainedForToolUse: false, // Added
45+
}
46+
47+
const expectedModelInfo: ModelInfo = {
48+
...lMStudioDefaultModelInfo,
49+
description: `${rawModel.displayName} - ${rawModel.paramsString} - ${rawModel.path}`,
50+
contextWindow: rawModel.maxContextLength,
51+
supportsPromptCache: true,
52+
supportsImages: rawModel.vision,
53+
supportsComputerUse: false,
54+
maxTokens: rawModel.maxContextLength,
55+
inputPrice: 0,
56+
outputPrice: 0,
57+
cacheWritesPrice: 0,
58+
cacheReadsPrice: 0,
59+
}
60+
61+
const result = parseLMStudioModel(rawModel)
62+
expect(result).toEqual(expectedModelInfo)
63+
})
64+
})
65+
66+
describe("getLMStudioModels", () => {
67+
const baseUrl = "http://localhost:1234"
68+
const lmsUrl = "ws://localhost:1234"
69+
70+
const mockRawModel: LLMInfo = {
71+
architecture: "test-arch",
72+
modelKey: "test-model-key-1",
73+
path: "/path/to/test-model-1",
74+
type: "llm",
75+
displayName: "Test Model One",
76+
maxContextLength: 2048,
77+
paramsString: "1B params, 2k context",
78+
vision: true,
79+
format: "gguf",
80+
sizeBytes: 1000000000,
81+
trainedForToolUse: false, // Added
82+
}
83+
84+
it("should fetch and parse models successfully", async () => {
85+
const mockApiResponse: LLMInfo[] = [mockRawModel]
86+
mockedAxios.get.mockResolvedValueOnce({ data: { status: "ok" } })
87+
mockListDownloadedModels.mockResolvedValueOnce(mockApiResponse)
88+
89+
const result = await getLMStudioModels(baseUrl)
90+
91+
expect(mockedAxios.get).toHaveBeenCalledTimes(1)
92+
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`)
93+
expect(MockedLMStudioClientConstructor).toHaveBeenCalledTimes(1)
94+
expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: lmsUrl })
95+
expect(mockListDownloadedModels).toHaveBeenCalledTimes(1)
96+
97+
const expectedParsedModel = parseLMStudioModel(mockRawModel)
98+
expect(result).toEqual({ [mockRawModel.modelKey]: expectedParsedModel })
99+
})
100+
101+
it("should use default baseUrl if an empty string is provided", async () => {
102+
const defaultBaseUrl = "http://localhost:1234"
103+
const defaultLmsUrl = "ws://localhost:1234"
104+
mockedAxios.get.mockResolvedValueOnce({ data: {} })
105+
mockListDownloadedModels.mockResolvedValueOnce([])
106+
107+
await getLMStudioModels("")
108+
109+
expect(mockedAxios.get).toHaveBeenCalledWith(`${defaultBaseUrl}/v1/models`)
110+
expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: defaultLmsUrl })
111+
})
112+
113+
it("should transform https baseUrl to wss for LMStudioClient", async () => {
114+
const httpsBaseUrl = "https://securehost:4321"
115+
const wssLmsUrl = "wss://securehost:4321"
116+
mockedAxios.get.mockResolvedValueOnce({ data: {} })
117+
mockListDownloadedModels.mockResolvedValueOnce([])
118+
119+
await getLMStudioModels(httpsBaseUrl)
120+
121+
expect(mockedAxios.get).toHaveBeenCalledWith(`${httpsBaseUrl}/v1/models`)
122+
expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: wssLmsUrl })
123+
})
124+
125+
it("should return an empty object if lmsUrl is unparsable", async () => {
126+
const unparsableBaseUrl = "http://localhost:invalid:port" // Leads to ws://localhost:invalid:port
127+
128+
const result = await getLMStudioModels(unparsableBaseUrl)
129+
130+
expect(result).toEqual({})
131+
expect(mockedAxios.get).not.toHaveBeenCalled()
132+
expect(MockedLMStudioClientConstructor).not.toHaveBeenCalled()
133+
})
134+
135+
it("should return an empty object and log error if axios.get fails with a generic error", async () => {
136+
const consoleErrorSpy = jest.spyOn(console, "error").mockImplementation(() => {})
137+
const networkError = new Error("Network connection failed")
138+
mockedAxios.get.mockRejectedValueOnce(networkError)
139+
140+
const result = await getLMStudioModels(baseUrl)
141+
142+
expect(mockedAxios.get).toHaveBeenCalledTimes(1)
143+
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`)
144+
expect(MockedLMStudioClientConstructor).not.toHaveBeenCalled()
145+
expect(mockListDownloadedModels).not.toHaveBeenCalled()
146+
expect(consoleErrorSpy).toHaveBeenCalledWith(
147+
`Error fetching LMStudio models: ${JSON.stringify(networkError, Object.getOwnPropertyNames(networkError), 2)}`,
148+
)
149+
expect(result).toEqual({})
150+
consoleErrorSpy.mockRestore()
151+
})
152+
153+
it("should return an empty object and log info if axios.get fails with ECONNREFUSED", async () => {
154+
const consoleInfoSpy = jest.spyOn(console, "warn").mockImplementation(() => {})
155+
const econnrefusedError = new Error("Connection refused")
156+
;(econnrefusedError as any).code = "ECONNREFUSED"
157+
mockedAxios.get.mockRejectedValueOnce(econnrefusedError)
158+
159+
const result = await getLMStudioModels(baseUrl)
160+
161+
expect(mockedAxios.get).toHaveBeenCalledTimes(1)
162+
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/v1/models`)
163+
expect(MockedLMStudioClientConstructor).not.toHaveBeenCalled()
164+
expect(mockListDownloadedModels).not.toHaveBeenCalled()
165+
expect(consoleInfoSpy).toHaveBeenCalledWith(`Error connecting to LMStudio at ${baseUrl}`)
166+
expect(result).toEqual({})
167+
consoleInfoSpy.mockRestore()
168+
})
169+
170+
it("should return an empty object and log error if listDownloadedModels fails", async () => {
171+
const consoleErrorSpy = jest.spyOn(console, "error").mockImplementation(() => {})
172+
const listError = new Error("LMStudio SDK internal error")
173+
174+
mockedAxios.get.mockResolvedValueOnce({ data: {} })
175+
mockListDownloadedModels.mockRejectedValueOnce(listError)
176+
177+
const result = await getLMStudioModels(baseUrl)
178+
179+
expect(mockedAxios.get).toHaveBeenCalledTimes(1)
180+
expect(MockedLMStudioClientConstructor).toHaveBeenCalledTimes(1)
181+
expect(MockedLMStudioClientConstructor).toHaveBeenCalledWith({ baseUrl: lmsUrl })
182+
expect(mockListDownloadedModels).toHaveBeenCalledTimes(1)
183+
expect(consoleErrorSpy).toHaveBeenCalledWith(
184+
`Error fetching LMStudio models: ${JSON.stringify(listError, Object.getOwnPropertyNames(listError), 2)}`,
185+
)
186+
expect(result).toEqual({})
187+
consoleErrorSpy.mockRestore()
188+
})
189+
})
190+
})
Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,130 @@
1+
import axios from "axios"
2+
import path from "path"
3+
import { getOllamaModels, parseOllamaModel } from "../ollama"
4+
import * as ollamaModelsData from "./fixtures/ollama-model-details.json"
5+
6+
// Mock axios
7+
jest.mock("axios")
8+
const mockedAxios = axios as jest.Mocked<typeof axios>
9+
10+
describe("Ollama Fetcher", () => {
11+
beforeEach(() => {
12+
jest.clearAllMocks()
13+
})
14+
15+
describe("parseOllamaModel", () => {
16+
const ollamaModels = ollamaModelsData as Record<string, any>
17+
const parsedModel = parseOllamaModel(ollamaModels["qwen3-2to16:latest"])
18+
19+
expect(parsedModel).toEqual({
20+
maxTokens: 40960,
21+
contextWindow: 40960,
22+
supportsImages: false,
23+
supportsComputerUse: false,
24+
supportsPromptCache: true,
25+
inputPrice: 0,
26+
outputPrice: 0,
27+
cacheWritesPrice: 0,
28+
cacheReadsPrice: 0,
29+
description: "Family: qwen3, Context: 40960, Size: 32.8B",
30+
})
31+
})
32+
33+
describe("getOllamaModels", () => {
34+
it("should fetch model list from /api/tags and details for each model from /api/show", async () => {
35+
const baseUrl = "http://localhost:11434"
36+
const modelName = "devstral2to16:latest"
37+
38+
const mockApiTagsResponse = {
39+
models: [
40+
{
41+
name: modelName,
42+
model: modelName,
43+
modified_at: "2025-06-03T09:23:22.610222878-04:00",
44+
size: 14333928010,
45+
digest: "6a5f0c01d2c96c687d79e32fdd25b87087feb376bf9838f854d10be8cf3c10a5",
46+
details: {
47+
family: "llama",
48+
families: ["llama"],
49+
format: "gguf",
50+
parameter_size: "23.6B",
51+
parent_model: "",
52+
quantization_level: "Q4_K_M",
53+
},
54+
},
55+
],
56+
}
57+
const mockApiShowResponse = {
58+
license: "Mock License",
59+
modelfile: "FROM /path/to/blob\nTEMPLATE {{ .Prompt }}",
60+
parameters: "num_ctx 4096\nstop_token <eos>",
61+
template: "{{ .System }}USER: {{ .Prompt }}ASSISTANT:",
62+
modified_at: "2025-06-03T09:23:22.610222878-04:00",
63+
details: {
64+
parent_model: "",
65+
format: "gguf",
66+
family: "llama",
67+
families: ["llama"],
68+
parameter_size: "23.6B",
69+
quantization_level: "Q4_K_M",
70+
},
71+
model_info: {
72+
"ollama.context_length": 4096,
73+
"some.other.info": "value",
74+
},
75+
capabilities: ["completion"],
76+
}
77+
78+
mockedAxios.get.mockResolvedValueOnce({ data: mockApiTagsResponse })
79+
mockedAxios.post.mockResolvedValueOnce({ data: mockApiShowResponse })
80+
81+
const result = await getOllamaModels(baseUrl)
82+
83+
expect(mockedAxios.get).toHaveBeenCalledTimes(1)
84+
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`)
85+
86+
expect(mockedAxios.post).toHaveBeenCalledTimes(1)
87+
expect(mockedAxios.post).toHaveBeenCalledWith(`${baseUrl}/api/show`, { model: modelName })
88+
89+
expect(typeof result).toBe("object")
90+
expect(result).not.toBeInstanceOf(Array)
91+
expect(Object.keys(result).length).toBe(1)
92+
expect(result[modelName]).toBeDefined()
93+
94+
const expectedParsedDetails = parseOllamaModel(mockApiShowResponse as any)
95+
expect(result[modelName]).toEqual(expectedParsedDetails)
96+
})
97+
98+
it("should return an empty list if the initial /api/tags call fails", async () => {
99+
const baseUrl = "http://localhost:11434"
100+
mockedAxios.get.mockRejectedValueOnce(new Error("Network error"))
101+
const consoleInfoSpy = jest.spyOn(console, "error").mockImplementation(() => {}) // Spy and suppress output
102+
103+
const result = await getOllamaModels(baseUrl)
104+
105+
expect(mockedAxios.get).toHaveBeenCalledTimes(1)
106+
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`)
107+
expect(mockedAxios.post).not.toHaveBeenCalled()
108+
expect(result).toEqual({})
109+
})
110+
111+
it("should log an info message and return an empty object on ECONNREFUSED", async () => {
112+
const baseUrl = "http://localhost:11434"
113+
const consoleInfoSpy = jest.spyOn(console, "warn").mockImplementation(() => {}) // Spy and suppress output
114+
115+
const econnrefusedError = new Error("Connection refused") as any
116+
econnrefusedError.code = "ECONNREFUSED"
117+
mockedAxios.get.mockRejectedValueOnce(econnrefusedError)
118+
119+
const result = await getOllamaModels(baseUrl)
120+
121+
expect(mockedAxios.get).toHaveBeenCalledTimes(1)
122+
expect(mockedAxios.get).toHaveBeenCalledWith(`${baseUrl}/api/tags`)
123+
expect(mockedAxios.post).not.toHaveBeenCalled()
124+
expect(consoleInfoSpy).toHaveBeenCalledWith(`Failed connecting to Ollama at ${baseUrl}`)
125+
expect(result).toEqual({})
126+
127+
consoleInfoSpy.mockRestore() // Restore original console.info
128+
})
129+
})
130+
})

0 commit comments

Comments
 (0)