Skip to content

Commit b5ae4d3

Browse files
committed
fix: streamline model refresh by automatically flushing cache in handlers for LMStudio and Ollama components
1 parent 1747b29 commit b5ae4d3

File tree

3 files changed

+10
-18
lines changed

3 files changed

+10
-18
lines changed

src/core/webview/webviewMessageHandler.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -448,6 +448,9 @@ export const webviewMessageHandler = async (
448448
// Specific handler for Ollama models only
449449
const { apiConfiguration: ollamaApiConfig } = await provider.getState()
450450
try {
451+
// Flush cache first to ensure fresh models
452+
await flushModels("ollama")
453+
451454
const ollamaModels = await getModels({
452455
provider: "ollama",
453456
baseUrl: ollamaApiConfig.ollamaBaseUrl,
@@ -469,6 +472,9 @@ export const webviewMessageHandler = async (
469472
// Specific handler for LM Studio models only
470473
const { apiConfiguration: lmStudioApiConfig } = await provider.getState()
471474
try {
475+
// Flush cache first to ensure fresh models
476+
await flushModels("lmstudio")
477+
472478
const lmStudioModels = await getModels({
473479
provider: "lmstudio",
474480
baseUrl: lmStudioApiConfig.lmStudioBaseUrl,

webview-ui/src/components/settings/providers/LMStudio.tsx

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -52,15 +52,8 @@ export const LMStudio = ({ apiConfiguration, setApiConfigurationField }: LMStudi
5252

5353
// Refresh models on mount
5454
useEffect(() => {
55-
// Flush cache first to ensure we get fresh models
56-
vscode.postMessage({ type: "flushRouterModels", text: "lmstudio" })
57-
58-
// Request fresh LM Studio models after a small delay to ensure cache is flushed
59-
const timer = setTimeout(() => {
60-
vscode.postMessage({ type: "requestLmStudioModels" })
61-
}, 100)
62-
63-
return () => clearTimeout(timer)
55+
// Request fresh models - the handler now flushes cache automatically
56+
vscode.postMessage({ type: "requestLmStudioModels" })
6457
}, [])
6558

6659
// Check if the selected model exists in the fetched models

webview-ui/src/components/settings/providers/Ollama.tsx

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -51,15 +51,8 @@ export const Ollama = ({ apiConfiguration, setApiConfigurationField }: OllamaPro
5151

5252
// Refresh models on mount
5353
useEffect(() => {
54-
// Flush cache first to ensure we get fresh models
55-
vscode.postMessage({ type: "flushRouterModels", text: "ollama" })
56-
57-
// Request fresh Ollama models after a small delay to ensure cache is flushed
58-
const timer = setTimeout(() => {
59-
vscode.postMessage({ type: "requestOllamaModels" })
60-
}, 100)
61-
62-
return () => clearTimeout(timer)
54+
// Request fresh models - the handler now flushes cache automatically
55+
vscode.postMessage({ type: "requestOllamaModels" })
6356
}, [])
6457

6558
// Check if the selected model exists in the fetched models

0 commit comments

Comments
 (0)