Skip to content

Commit 3b92afa

Browse files
AngleAngle
authored andcommitted
Merge branch 'fix/issue-5075-lmstudio-context-length' of https://github.com/RooCodeInc/Roo-Code into fix/issue-5075-lmstudio-context-length
2 parents ac85aa6 + 258e82a commit 3b92afa

File tree

2 files changed

+37
-49
lines changed

2 files changed

+37
-49
lines changed

src/api/providers/__tests__/lmstudio.spec.ts

Lines changed: 12 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -58,20 +58,20 @@ vi.mock("openai", () => {
5858
}
5959
})
6060

61-
// Mock LM Studio fetcher
62-
vi.mock("../fetchers/lmstudio", () => ({
63-
getLMStudioModels: vi.fn(),
61+
// Mock model cache
62+
vi.mock("../fetchers/modelCache", () => ({
63+
getModels: vi.fn(),
6464
}))
6565

6666
import type { Anthropic } from "@anthropic-ai/sdk"
6767
import type { ModelInfo } from "@roo-code/types"
6868

6969
import { LmStudioHandler } from "../lm-studio"
7070
import type { ApiHandlerOptions } from "../../../shared/api"
71-
import { getLMStudioModels } from "../fetchers/lmstudio"
71+
import { getModels } from "../fetchers/modelCache"
7272

7373
// Get the mocked function
74-
const mockGetLMStudioModels = vi.mocked(getLMStudioModels)
74+
const mockGetModels = vi.mocked(getModels)
7575

7676
describe("LmStudioHandler", () => {
7777
let handler: LmStudioHandler
@@ -98,7 +98,7 @@ describe("LmStudioHandler", () => {
9898
}
9999
handler = new LmStudioHandler(mockOptions)
100100
mockCreate.mockClear()
101-
mockGetLMStudioModels.mockClear()
101+
mockGetModels.mockClear()
102102
})
103103

104104
describe("constructor", () => {
@@ -190,7 +190,7 @@ describe("LmStudioHandler", () => {
190190

191191
it("should return fetched model info when available", async () => {
192192
// Mock the fetched models
193-
mockGetLMStudioModels.mockResolvedValueOnce({
193+
mockGetModels.mockResolvedValueOnce({
194194
"local-model": mockModelInfo,
195195
})
196196

@@ -204,7 +204,7 @@ describe("LmStudioHandler", () => {
204204

205205
it("should fallback to default when model not found in fetched models", async () => {
206206
// Mock fetched models without our target model
207-
mockGetLMStudioModels.mockResolvedValueOnce({
207+
mockGetModels.mockResolvedValueOnce({
208208
"other-model": mockModelInfo,
209209
})
210210

@@ -219,32 +219,21 @@ describe("LmStudioHandler", () => {
219219

220220
describe("fetchModel", () => {
221221
it("should fetch models successfully", async () => {
222-
mockGetLMStudioModels.mockResolvedValueOnce({
222+
mockGetModels.mockResolvedValueOnce({
223223
"local-model": mockModelInfo,
224224
})
225225

226226
const result = await handler.fetchModel()
227227

228-
expect(mockGetLMStudioModels).toHaveBeenCalledWith(mockOptions.lmStudioBaseUrl)
228+
expect(mockGetModels).toHaveBeenCalledWith({ provider: "lmstudio" })
229229
expect(result.id).toBe(mockOptions.lmStudioModelId)
230230
expect(result.info).toEqual(mockModelInfo)
231231
})
232232

233233
it("should handle fetch errors gracefully", async () => {
234-
const consoleSpy = vi.spyOn(console, "warn").mockImplementation(() => {})
235-
mockGetLMStudioModels.mockRejectedValueOnce(new Error("Connection failed"))
234+
mockGetModels.mockRejectedValueOnce(new Error("Connection failed"))
236235

237-
const result = await handler.fetchModel()
238-
239-
expect(consoleSpy).toHaveBeenCalledWith(
240-
"Failed to fetch LM Studio models, using defaults:",
241-
expect.any(Error),
242-
)
243-
expect(result.id).toBe(mockOptions.lmStudioModelId)
244-
expect(result.info.maxTokens).toBe(-1)
245-
expect(result.info.contextWindow).toBe(128_000)
246-
247-
consoleSpy.mockRestore()
236+
await expect(handler.fetchModel()).rejects.toThrow("Connection failed")
248237
})
249238
})
250239
})

src/api/providers/lm-studio.ts

Lines changed: 25 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,9 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
22
import OpenAI from "openai"
3-
import axios from "axios"
43

54
import { type ModelInfo, openAiModelInfoSaneDefaults, LMSTUDIO_DEFAULT_TEMPERATURE } from "@roo-code/types"
65

7-
import type { ApiHandlerOptions } from "../../shared/api"
6+
import type { ApiHandlerOptions, ModelRecord } from "../../shared/api"
87

98
import { XmlMatcher } from "../../utils/xml-matcher"
109

@@ -13,7 +12,7 @@ import { ApiStream } from "../transform/stream"
1312

1413
import { BaseProvider } from "./base-provider"
1514
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
16-
import { getLMStudioModels } from "./fetchers/lmstudio"
15+
import { getModels } from "./fetchers/modelCache"
1716

1817
export class LmStudioHandler extends BaseProvider implements SingleCompletionHandler {
1918
protected options: ApiHandlerOptions
@@ -75,8 +74,9 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
7574
let assistantText = ""
7675

7776
try {
77+
const modelInfo = await this.fetchModel()
7878
const params: OpenAI.Chat.ChatCompletionCreateParamsStreaming & { draft_model?: string } = {
79-
model: this.getModel().id,
79+
model: modelInfo.id,
8080
messages: openAiMessages,
8181
temperature: this.options.modelTemperature ?? LMSTUDIO_DEFAULT_TEMPERATURE,
8282
stream: true,
@@ -133,20 +133,32 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
133133
}
134134

135135
public async fetchModel() {
136-
try {
137-
this.models = await getLMStudioModels(this.options.lmStudioBaseUrl)
138-
} catch (error) {
139-
console.warn("Failed to fetch LM Studio models, using defaults:", error)
140-
this.models = {}
141-
}
136+
this.models = await getModels({ provider: "lmstudio" })
142137
return this.getModel()
143138
}
144139

145140
override getModel(): { id: string; info: ModelInfo } {
146141
const id = this.options.lmStudioModelId || ""
147142

148143
// Try to get the actual model info from fetched models
149-
const info = this.models[id] || openAiModelInfoSaneDefaults
144+
// The fetcher uses model.path or modelKey as keys, so try both
145+
let info: ModelInfo | undefined = undefined
146+
147+
if (this.models && Object.keys(this.models).length > 0) {
148+
info = this.models[id]
149+
150+
// If not found by exact ID, try to find by partial match (for model paths)
151+
if (!info) {
152+
const modelKeys = Object.keys(this.models)
153+
const matchingKey = modelKeys.find((key) => key === id || key.includes(id) || id.includes(key))
154+
if (matchingKey) {
155+
info = this.models[matchingKey]
156+
}
157+
}
158+
}
159+
160+
// Fall back to defaults if still not found
161+
info = info || openAiModelInfoSaneDefaults
150162

151163
return {
152164
id,
@@ -157,8 +169,9 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
157169
async completePrompt(prompt: string): Promise<string> {
158170
try {
159171
// Create params object with optional draft model
172+
const modelInfo = await this.fetchModel()
160173
const params: any = {
161-
model: this.getModel().id,
174+
model: modelInfo.id,
162175
messages: [{ role: "user", content: prompt }],
163176
temperature: this.options.modelTemperature ?? LMSTUDIO_DEFAULT_TEMPERATURE,
164177
stream: false,
@@ -178,17 +191,3 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
178191
}
179192
}
180193
}
181-
182-
export async function getLmStudioModels(baseUrl = "http://localhost:1234") {
183-
try {
184-
if (!URL.canParse(baseUrl)) {
185-
return []
186-
}
187-
188-
const response = await axios.get(`${baseUrl}/v1/models`)
189-
const modelsArray = response.data?.data?.map((model: any) => model.id) || []
190-
return [...new Set<string>(modelsArray)]
191-
} catch (error) {
192-
return []
193-
}
194-
}

0 commit comments

Comments
 (0)