Skip to content

Commit 1747b29

Browse files
committed
fix: update model refresh logic to flush cache before requesting fresh models for LMStudio and Ollama components
1 parent d3b6a93 commit 1747b29

File tree

2 files changed

+10
-14
lines changed

2 files changed

+10
-14
lines changed

webview-ui/src/components/settings/providers/LMStudio.tsx

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -52,15 +52,13 @@ export const LMStudio = ({ apiConfiguration, setApiConfigurationField }: LMStudi
5252

5353
// Refresh models on mount
5454
useEffect(() => {
55-
// Request fresh models without flushing first
56-
// This ensures cached models remain visible while new ones load
57-
vscode.postMessage({ type: "requestRouterModels" })
55+
// Flush cache first to ensure we get fresh models
56+
vscode.postMessage({ type: "flushRouterModels", text: "lmstudio" })
5857

59-
// Optionally flush cache after a delay to ensure fresh data on next load
60-
// This won't affect the current session since models are already being fetched
58+
// Request fresh LM Studio models after a small delay to ensure cache is flushed
6159
const timer = setTimeout(() => {
62-
vscode.postMessage({ type: "flushRouterModels", text: "lmstudio" })
63-
}, 1000)
60+
vscode.postMessage({ type: "requestLmStudioModels" })
61+
}, 100)
6462

6563
return () => clearTimeout(timer)
6664
}, [])

webview-ui/src/components/settings/providers/Ollama.tsx

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -51,15 +51,13 @@ export const Ollama = ({ apiConfiguration, setApiConfigurationField }: OllamaPro
5151

5252
// Refresh models on mount
5353
useEffect(() => {
54-
// Request fresh models without flushing first
55-
// This ensures cached models remain visible while new ones load
56-
vscode.postMessage({ type: "requestRouterModels" })
54+
// Flush cache first to ensure we get fresh models
55+
vscode.postMessage({ type: "flushRouterModels", text: "ollama" })
5756

58-
// Optionally flush cache after a delay to ensure fresh data on next load
59-
// This won't affect the current session since models are already being fetched
57+
// Request fresh Ollama models after a small delay to ensure cache is flushed
6058
const timer = setTimeout(() => {
61-
vscode.postMessage({ type: "flushRouterModels", text: "ollama" })
62-
}, 1000)
59+
vscode.postMessage({ type: "requestOllamaModels" })
60+
}, 100)
6361

6462
return () => clearTimeout(timer)
6563
}, [])

0 commit comments

Comments
 (0)