Skip to content

Commit 40d1708

Browse files
QuinsZoulsmrubens
authored andcommitted
Bugfix/fix vscodellm model information (#2832)
* feat: initialize VS Code Language Model client in constructor * feat: add VS Code LLM models and configuration * feat: integrate VS Code LLM models into API configuration normalization * Fix tests --------- Co-authored-by: Matt Rubens <[email protected]>
1 parent 92171fe commit 40d1708

File tree

4 files changed

+232
-15
lines changed

4 files changed

+232
-15
lines changed

src/api/providers/__tests__/vscode-lm.test.ts

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,9 @@ describe("VsCodeLmHandler", () => {
134134
const mockModel = { ...mockLanguageModelChat }
135135
;(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel])
136136
mockLanguageModelChat.countTokens.mockResolvedValue(10)
137+
138+
// Override the default client with our test client
139+
handler["client"] = mockLanguageModelChat
137140
})
138141

139142
it("should stream text responses", async () => {
@@ -229,12 +232,7 @@ describe("VsCodeLmHandler", () => {
229232

230233
mockLanguageModelChat.sendRequest.mockRejectedValueOnce(new Error("API Error"))
231234

232-
await expect(async () => {
233-
const stream = handler.createMessage(systemPrompt, messages)
234-
for await (const _ of stream) {
235-
// consume stream
236-
}
237-
}).rejects.toThrow("API Error")
235+
await expect(handler.createMessage(systemPrompt, messages).next()).rejects.toThrow("API Error")
238236
})
239237
})
240238

@@ -253,6 +251,8 @@ describe("VsCodeLmHandler", () => {
253251
})
254252

255253
it("should return fallback model info when no client exists", () => {
254+
// Clear the client first
255+
handler["client"] = null
256256
const model = handler.getModel()
257257
expect(model.id).toBe("test-vendor/test-family")
258258
expect(model.info).toBeDefined()
@@ -276,6 +276,10 @@ describe("VsCodeLmHandler", () => {
276276
})(),
277277
})
278278

279+
// Override the default client with our test client to ensure it uses
280+
// the mock implementation rather than the default fallback
281+
handler["client"] = mockLanguageModelChat
282+
279283
const result = await handler.completePrompt("Test prompt")
280284
expect(result).toBe(responseText)
281285
expect(mockLanguageModelChat.sendRequest).toHaveBeenCalled()
@@ -287,9 +291,11 @@ describe("VsCodeLmHandler", () => {
287291

288292
mockLanguageModelChat.sendRequest.mockRejectedValueOnce(new Error("Completion failed"))
289293

290-
await expect(handler.completePrompt("Test prompt")).rejects.toThrow(
291-
"VSCode LM completion error: Completion failed",
292-
)
294+
// Make sure we're using the mock client
295+
handler["client"] = mockLanguageModelChat
296+
297+
const promise = handler.completePrompt("Test prompt")
298+
await expect(promise).rejects.toThrow("VSCode LM completion error: Completion failed")
293299
})
294300
})
295301
})

src/api/providers/vscode-lm.ts

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan
6161
}
6262
}
6363
})
64+
this.initializeClient()
6465
} catch (error) {
6566
// Ensure cleanup if constructor fails
6667
this.dispose()
@@ -70,7 +71,30 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan
7071
)
7172
}
7273
}
73-
74+
/**
75+
* Initializes the VS Code Language Model client.
76+
* This method is called during the constructor to set up the client.
77+
* This useful when the client is not created yet and call getModel() before the client is created.
78+
* @returns Promise<void>
79+
* @throws Error when client initialization fails
80+
*/
81+
async initializeClient(): Promise<void> {
82+
try {
83+
// Check if the client is already initialized
84+
if (this.client) {
85+
console.debug("Roo Code <Language Model API>: Client already initialized")
86+
return
87+
}
88+
// Create a new client instance
89+
this.client = await this.createClient(this.options.vsCodeLmModelSelector || {})
90+
console.debug("Roo Code <Language Model API>: Client initialized successfully")
91+
} catch (error) {
92+
// Handle errors during client initialization
93+
const errorMessage = error instanceof Error ? error.message : "Unknown error"
94+
console.error("Roo Code <Language Model API>: Client initialization failed:", errorMessage)
95+
throw new Error(`Roo Code <Language Model API>: Failed to initialize client: ${errorMessage}`)
96+
}
97+
}
7498
/**
7599
* Creates a language model chat client based on the provided selector.
76100
*

src/shared/api.ts

Lines changed: 183 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1179,3 +1179,186 @@ export const xaiModels = {
11791179
description: "xAI's Grok Beta model (legacy) with 131K context window",
11801180
},
11811181
} as const satisfies Record<string, ModelInfo>
1182+
1183+
export type VscodeLlmModelId = keyof typeof vscodeLlmModels
1184+
export const vscodeLlmDefaultModelId: VscodeLlmModelId = "claude-3.5-sonnet"
1185+
export const vscodeLlmModels = {
1186+
"gpt-3.5-turbo": {
1187+
contextWindow: 12114,
1188+
supportsImages: false,
1189+
supportsPromptCache: false,
1190+
inputPrice: 0,
1191+
outputPrice: 0,
1192+
family: "gpt-3.5-turbo",
1193+
version: "gpt-3.5-turbo-0613",
1194+
name: "GPT 3.5 Turbo",
1195+
supportsToolCalling: true,
1196+
maxInputTokens: 12114,
1197+
},
1198+
"gpt-4o-mini": {
1199+
contextWindow: 12115,
1200+
supportsImages: false,
1201+
supportsPromptCache: false,
1202+
inputPrice: 0,
1203+
outputPrice: 0,
1204+
family: "gpt-4o-mini",
1205+
version: "gpt-4o-mini-2024-07-18",
1206+
name: "GPT-4o mini",
1207+
supportsToolCalling: true,
1208+
maxInputTokens: 12115,
1209+
},
1210+
"gpt-4": {
1211+
contextWindow: 28501,
1212+
supportsImages: false,
1213+
supportsPromptCache: false,
1214+
inputPrice: 0,
1215+
outputPrice: 0,
1216+
family: "gpt-4",
1217+
version: "gpt-4-0613",
1218+
name: "GPT 4",
1219+
supportsToolCalling: true,
1220+
maxInputTokens: 28501,
1221+
},
1222+
"gpt-4-0125-preview": {
1223+
contextWindow: 63826,
1224+
supportsImages: false,
1225+
supportsPromptCache: false,
1226+
inputPrice: 0,
1227+
outputPrice: 0,
1228+
family: "gpt-4-turbo",
1229+
version: "gpt-4-0125-preview",
1230+
name: "GPT 4 Turbo",
1231+
supportsToolCalling: true,
1232+
maxInputTokens: 63826,
1233+
},
1234+
"gpt-4o": {
1235+
contextWindow: 63827,
1236+
supportsImages: true,
1237+
supportsPromptCache: false,
1238+
inputPrice: 0,
1239+
outputPrice: 0,
1240+
family: "gpt-4o",
1241+
version: "gpt-4o-2024-11-20",
1242+
name: "GPT-4o",
1243+
supportsToolCalling: true,
1244+
maxInputTokens: 63827,
1245+
},
1246+
o1: {
1247+
contextWindow: 19827,
1248+
supportsImages: false,
1249+
supportsPromptCache: false,
1250+
inputPrice: 0,
1251+
outputPrice: 0,
1252+
family: "o1-ga",
1253+
version: "o1-2024-12-17",
1254+
name: "o1 (Preview)",
1255+
supportsToolCalling: true,
1256+
maxInputTokens: 19827,
1257+
},
1258+
"o3-mini": {
1259+
contextWindow: 63827,
1260+
supportsImages: false,
1261+
supportsPromptCache: false,
1262+
inputPrice: 0,
1263+
outputPrice: 0,
1264+
family: "o3-mini",
1265+
version: "o3-mini-2025-01-31",
1266+
name: "o3-mini",
1267+
supportsToolCalling: true,
1268+
maxInputTokens: 63827,
1269+
},
1270+
"claude-3.5-sonnet": {
1271+
contextWindow: 81638,
1272+
supportsImages: true,
1273+
supportsPromptCache: false,
1274+
inputPrice: 0,
1275+
outputPrice: 0,
1276+
family: "claude-3.5-sonnet",
1277+
version: "claude-3.5-sonnet",
1278+
name: "Claude 3.5 Sonnet",
1279+
supportsToolCalling: true,
1280+
maxInputTokens: 81638,
1281+
},
1282+
"claude-3.7-sonnet": {
1283+
contextWindow: 89827,
1284+
supportsImages: true,
1285+
supportsPromptCache: false,
1286+
inputPrice: 0,
1287+
outputPrice: 0,
1288+
family: "claude-3.7-sonnet",
1289+
version: "claude-3.7-sonnet",
1290+
name: "Claude 3.7 Sonnet",
1291+
supportsToolCalling: true,
1292+
maxInputTokens: 89827,
1293+
},
1294+
"claude-3.7-sonnet-thought": {
1295+
contextWindow: 89827,
1296+
supportsImages: true,
1297+
supportsPromptCache: false,
1298+
inputPrice: 0,
1299+
outputPrice: 0,
1300+
family: "claude-3.7-sonnet-thought",
1301+
version: "claude-3.7-sonnet-thought",
1302+
name: "Claude 3.7 Sonnet Thinking",
1303+
supportsToolCalling: false,
1304+
maxInputTokens: 89827,
1305+
thinking: true,
1306+
},
1307+
"gemini-2.0-flash-001": {
1308+
contextWindow: 127827,
1309+
supportsImages: true,
1310+
supportsPromptCache: false,
1311+
inputPrice: 0,
1312+
outputPrice: 0,
1313+
family: "gemini-2.0-flash",
1314+
version: "gemini-2.0-flash-001",
1315+
name: "Gemini 2.0 Flash",
1316+
supportsToolCalling: false,
1317+
maxInputTokens: 127827,
1318+
},
1319+
"gemini-2.5-pro": {
1320+
contextWindow: 63830,
1321+
supportsImages: true,
1322+
supportsPromptCache: false,
1323+
inputPrice: 0,
1324+
outputPrice: 0,
1325+
family: "gemini-2.5-pro",
1326+
version: "gemini-2.5-pro-preview-03-25",
1327+
name: "Gemini 2.5 Pro (Preview)",
1328+
supportsToolCalling: true,
1329+
maxInputTokens: 63830,
1330+
},
1331+
"o4-mini": {
1332+
contextWindow: 111446,
1333+
supportsImages: false,
1334+
supportsPromptCache: false,
1335+
inputPrice: 0,
1336+
outputPrice: 0,
1337+
family: "o4-mini",
1338+
version: "o4-mini-2025-04-16",
1339+
name: "o4-mini (Preview)",
1340+
supportsToolCalling: true,
1341+
maxInputTokens: 111446,
1342+
},
1343+
"gpt-4.1": {
1344+
contextWindow: 111446,
1345+
supportsImages: true,
1346+
supportsPromptCache: false,
1347+
inputPrice: 0,
1348+
outputPrice: 0,
1349+
family: "gpt-4.1",
1350+
version: "gpt-4.1-2025-04-14",
1351+
name: "GPT-4.1 (Preview)",
1352+
supportsToolCalling: true,
1353+
maxInputTokens: 111446,
1354+
},
1355+
} as const satisfies Record<
1356+
string,
1357+
ModelInfo & {
1358+
family: string
1359+
version: string
1360+
name: string
1361+
supportsToolCalling: boolean
1362+
maxInputTokens: number
1363+
}
1364+
>

webview-ui/src/components/settings/ApiOptions.tsx

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,8 @@ import {
3838
xaiDefaultModelId,
3939
xaiModels,
4040
ApiProvider,
41+
vscodeLlmModels,
42+
vscodeLlmDefaultModelId,
4143
} from "@roo/shared/api"
4244
import { ExtensionMessage } from "@roo/shared/ExtensionMessage"
4345

@@ -1738,7 +1740,6 @@ const ApiOptions = ({
17381740
export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) {
17391741
const provider = apiConfiguration?.apiProvider || "anthropic"
17401742
const modelId = apiConfiguration?.apiModelId
1741-
17421743
const getProviderData = (models: Record<string, ModelInfo>, defaultId: string) => {
17431744
let selectedModelId: string
17441745
let selectedModelInfo: ModelInfo
@@ -1827,15 +1828,18 @@ export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) {
18271828
selectedModelInfo: openAiModelInfoSaneDefaults,
18281829
}
18291830
case "vscode-lm":
1831+
const modelFamily = apiConfiguration?.vsCodeLmModelSelector?.family ?? vscodeLlmDefaultModelId
1832+
const modelInfo = {
1833+
...openAiModelInfoSaneDefaults,
1834+
...vscodeLlmModels[modelFamily as keyof typeof vscodeLlmModels],
1835+
supportsImages: false, // VSCode LM API currently doesn't support images.
1836+
}
18301837
return {
18311838
selectedProvider: provider,
18321839
selectedModelId: apiConfiguration?.vsCodeLmModelSelector
18331840
? `${apiConfiguration.vsCodeLmModelSelector.vendor}/${apiConfiguration.vsCodeLmModelSelector.family}`
18341841
: "",
1835-
selectedModelInfo: {
1836-
...openAiModelInfoSaneDefaults,
1837-
supportsImages: false, // VSCode LM API currently doesn't support images.
1838-
},
1842+
selectedModelInfo: modelInfo,
18391843
}
18401844
default:
18411845
return getProviderData(anthropicModels, anthropicDefaultModelId)

0 commit comments

Comments
 (0)