Skip to content

Commit 2da41b4

Browse files
committed
fix: resolve VS Code LM API model selection issue
- Updated UI to use model.id instead of vendor/family for model selection - Modified VsCodeLmHandler to select models by ID when available - Added proper error handling for model approval failures - Improved error messages for better user experience - Added test coverage for ID-based model selection Fixes #8991
1 parent 5b753f5 commit 2da41b4

File tree

3 files changed

+89
-17
lines changed

3 files changed

+89
-17
lines changed

src/api/providers/__tests__/vscode-lm.spec.ts

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,25 @@ describe("VsCodeLmHandler", () => {
120120
})
121121
})
122122

123+
it("should select model by ID when ID is provided", async () => {
124+
const mockModel = { ...mockLanguageModelChat, id: "specific-model-id" }
125+
// Mock returning multiple models when fetching all
126+
;(vscode.lm.selectChatModels as Mock).mockResolvedValueOnce([
127+
{ ...mockLanguageModelChat, id: "other-model" },
128+
mockModel,
129+
{ ...mockLanguageModelChat, id: "another-model" },
130+
])
131+
132+
const client = await handler["createClient"]({
133+
id: "specific-model-id",
134+
})
135+
136+
expect(client).toBeDefined()
137+
expect(client.id).toBe("specific-model-id")
138+
// When selecting by ID, we fetch all models first
139+
expect(vscode.lm.selectChatModels).toHaveBeenCalledWith({})
140+
})
141+
123142
it("should return default client when no models available", async () => {
124143
;(vscode.lm.selectChatModels as Mock).mockResolvedValueOnce([])
125144

src/api/providers/vscode-lm.ts

Lines changed: 58 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -111,14 +111,35 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan
111111
*/
112112
async createClient(selector: vscode.LanguageModelChatSelector): Promise<vscode.LanguageModelChat> {
113113
try {
114+
// If we have an ID, try to find the specific model by ID first
115+
if (selector.id) {
116+
// Get all available models
117+
const allModels = await vscode.lm.selectChatModels({})
118+
119+
// Find the model with the matching ID
120+
const modelById = allModels.find((model) => model.id === selector.id)
121+
122+
if (modelById) {
123+
console.debug(`Roo Code <Language Model API>: Found model by ID: ${modelById.id}`)
124+
return modelById
125+
} else {
126+
console.warn(
127+
`Roo Code <Language Model API>: Model with ID '${selector.id}' not found, falling back to selector`,
128+
)
129+
}
130+
}
131+
132+
// Fallback to selector-based selection
114133
const models = await vscode.lm.selectChatModels(selector)
115134

116135
// Use first available model or create a minimal model object
117136
if (models && Array.isArray(models) && models.length > 0) {
137+
console.debug(`Roo Code <Language Model API>: Selected model: ${models[0].id}`)
118138
return models[0]
119139
}
120140

121141
// Create a minimal model if no models are available
142+
console.warn(`Roo Code <Language Model API>: No models available, creating fallback model`)
122143
return {
123144
id: "default-lm",
124145
name: "Default Language Model",
@@ -363,17 +384,38 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan
363384
try {
364385
// Create the response stream with minimal required options
365386
const requestOptions: vscode.LanguageModelChatRequestOptions = {
366-
justification: `Roo Code would like to use '${client.name}' from '${client.vendor}', Click 'Allow' to proceed.`,
387+
justification: `Roo Code would like to use '${client.name || client.id}' from '${client.vendor}'. Click 'Allow' to proceed.`,
367388
}
368389

369390
// Note: Tool support is currently provided by the VSCode Language Model API directly
370391
// Extensions can register tools using vscode.lm.registerTool()
371392

372-
const response: vscode.LanguageModelChatResponse = await client.sendRequest(
373-
vsCodeLmMessages,
374-
requestOptions,
375-
this.currentRequestCancellation.token,
376-
)
393+
let response: vscode.LanguageModelChatResponse
394+
try {
395+
response = await client.sendRequest(
396+
vsCodeLmMessages,
397+
requestOptions,
398+
this.currentRequestCancellation.token,
399+
)
400+
} catch (error) {
401+
// Check if this is a model approval error
402+
if (error instanceof Error) {
403+
if (
404+
error.message.includes("model_not_supported") ||
405+
error.message.includes("Model is not supported")
406+
) {
407+
throw new Error(
408+
"Model not approved. Please select the model in settings, then click 'Allow' when prompted by VS Code to approve access to the language model.",
409+
)
410+
} else if (error.message.includes("cancelled") || error.message.includes("Cancelled")) {
411+
throw new Error(
412+
"Model access was cancelled. Please approve access to use the VS Code Language Model API.",
413+
)
414+
}
415+
}
416+
// Re-throw the original error if it's not a known approval issue
417+
throw error
418+
}
377419

378420
// Consume the stream and handle both text and tool call chunks
379421
for await (const chunk of response.stream) {
@@ -566,7 +608,16 @@ const VSCODE_LM_STATIC_BLACKLIST: string[] = ["claude-3.7-sonnet", "claude-3.7-s
566608
export async function getVsCodeLmModels() {
567609
try {
568610
const models = (await vscode.lm.selectChatModels({})) || []
569-
return models.filter((model) => !VSCODE_LM_STATIC_BLACKLIST.includes(model.id))
611+
// Filter blacklisted models and ensure all required fields are present
612+
return models
613+
.filter((model) => !VSCODE_LM_STATIC_BLACKLIST.includes(model.id))
614+
.map((model) => ({
615+
id: model.id,
616+
vendor: model.vendor,
617+
family: model.family,
618+
name: model.name,
619+
version: model.version,
620+
}))
570621
} catch (error) {
571622
console.error(
572623
`Error fetching VS Code LM models: ${JSON.stringify(error, Object.getOwnPropertyNames(error), 2)}`,

webview-ui/src/components/settings/providers/VSCodeLM.tsx

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -53,23 +53,25 @@ export const VSCodeLM = ({ apiConfiguration, setApiConfigurationField }: VSCodeL
5353
<label className="block font-medium mb-1">{t("settings:providers.vscodeLmModel")}</label>
5454
{vsCodeLmModels.length > 0 ? (
5555
<Select
56-
value={
57-
apiConfiguration?.vsCodeLmModelSelector
58-
? `${apiConfiguration.vsCodeLmModelSelector.vendor ?? ""}/${apiConfiguration.vsCodeLmModelSelector.family ?? ""}`
59-
: ""
60-
}
56+
value={apiConfiguration?.vsCodeLmModelSelector?.id || ""}
6157
onValueChange={handleInputChange("vsCodeLmModelSelector", (value) => {
62-
const [vendor, family] = value.split("/")
63-
return { vendor, family }
58+
// Find the selected model to get all its properties
59+
const selectedModel = vsCodeLmModels.find((model) => model.id === value)
60+
if (selectedModel) {
61+
return {
62+
id: selectedModel.id,
63+
vendor: selectedModel.vendor,
64+
family: selectedModel.family,
65+
}
66+
}
67+
return { id: value }
6468
})}>
6569
<SelectTrigger className="w-full">
6670
<SelectValue placeholder={t("settings:common.select")} />
6771
</SelectTrigger>
6872
<SelectContent>
6973
{vsCodeLmModels.map((model) => (
70-
<SelectItem
71-
key={`${model.vendor}/${model.family}`}
72-
value={`${model.vendor}/${model.family}`}>
74+
<SelectItem key={model.id || `${model.vendor}/${model.family}`} value={model.id || ""}>
7375
{`${model.vendor} - ${model.family}`}
7476
</SelectItem>
7577
))}

0 commit comments

Comments
 (0)