Skip to content

Commit 26178ec

Browse files
committed
feat: implement specific handlers for Ollama and LM Studio models in webviewMessageHandler
1 parent 96b4660 commit 26178ec

File tree

2 files changed

+53
-5
lines changed

2 files changed

+53
-5
lines changed

src/core/webview/webviewMessageHandler.ts

Lines changed: 51 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -354,10 +354,11 @@ export const webviewMessageHandler = async (
354354
{ key: "requesty", options: { provider: "requesty", apiKey: apiConfiguration.requestyApiKey } },
355355
{ key: "glama", options: { provider: "glama" } },
356356
{ key: "unbound", options: { provider: "unbound", apiKey: apiConfiguration.unboundApiKey } },
357-
{ key: "ollama", options: { provider: "ollama", baseUrl: apiConfiguration.ollamaBaseUrl } },
358-
{ key: "lmstudio", options: { provider: "lmstudio", baseUrl: apiConfiguration.lmStudioBaseUrl } },
359357
]
360358

359+
// Don't fetch Ollama and LM Studio models by default anymore
360+
// They have their own specific handlers: requestOllamaModels and requestLmStudioModels
361+
361362
const litellmApiKey = apiConfiguration.litellmApiKey || message?.values?.litellmApiKey
362363
const litellmBaseUrl = apiConfiguration.litellmBaseUrl || message?.values?.litellmBaseUrl
363364
if (litellmApiKey && litellmBaseUrl) {
@@ -374,7 +375,12 @@ export const webviewMessageHandler = async (
374375
}),
375376
)
376377

377-
const fetchedRouterModels: Partial<Record<RouterName, ModelRecord>> = { ...routerModels }
378+
const fetchedRouterModels: Partial<Record<RouterName, ModelRecord>> = {
379+
...routerModels,
380+
// Initialize ollama and lmstudio with empty objects since they use separate handlers
381+
ollama: {},
382+
lmstudio: {},
383+
}
378384

379385
results.forEach((result, index) => {
380386
const routerName = modelFetchPromises[index].key // Get RouterName using index
@@ -416,6 +422,48 @@ export const webviewMessageHandler = async (
416422
})
417423

418424
break
425+
case "requestOllamaModels": {
426+
// Specific handler for Ollama models only
427+
const { apiConfiguration: ollamaApiConfig } = await provider.getState()
428+
try {
429+
const ollamaModels = await getModels({
430+
provider: "ollama",
431+
baseUrl: ollamaApiConfig.ollamaBaseUrl,
432+
})
433+
434+
if (Object.keys(ollamaModels).length > 0) {
435+
provider.postMessageToWebview({
436+
type: "ollamaModels",
437+
ollamaModels: Object.keys(ollamaModels),
438+
})
439+
}
440+
} catch (error) {
441+
// Silently fail - user hasn't configured Ollama yet
442+
console.debug("Ollama models fetch failed:", error)
443+
}
444+
break
445+
}
446+
case "requestLmStudioModels": {
447+
// Specific handler for LM Studio models only
448+
const { apiConfiguration: lmStudioApiConfig } = await provider.getState()
449+
try {
450+
const lmStudioModels = await getModels({
451+
provider: "lmstudio",
452+
baseUrl: lmStudioApiConfig.lmStudioBaseUrl,
453+
})
454+
455+
if (Object.keys(lmStudioModels).length > 0) {
456+
provider.postMessageToWebview({
457+
type: "lmStudioModels",
458+
lmStudioModels: Object.keys(lmStudioModels),
459+
})
460+
}
461+
} catch (error) {
462+
// Silently fail - user hasn't configured LM Studio yet
463+
console.debug("LM Studio models fetch failed:", error)
464+
}
465+
break
466+
}
419467
case "requestOpenAiModels":
420468
if (message?.values?.baseUrl && message?.values?.apiKey) {
421469
const openAiModels = await getOpenAiModels(

webview-ui/src/components/settings/ApiOptions.tsx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -162,9 +162,9 @@ const ApiOptions = ({
162162
},
163163
})
164164
} else if (selectedProvider === "ollama") {
165-
vscode.postMessage({ type: "requestRouterModels" })
165+
vscode.postMessage({ type: "requestOllamaModels" })
166166
} else if (selectedProvider === "lmstudio") {
167-
vscode.postMessage({ type: "requestRouterModels" })
167+
vscode.postMessage({ type: "requestLmStudioModels" })
168168
} else if (selectedProvider === "vscode-lm") {
169169
vscode.postMessage({ type: "requestVsCodeLmModels" })
170170
} else if (selectedProvider === "litellm") {

0 commit comments

Comments
 (0)