Skip to content

Commit d3b6a93

Browse files
committed
feat: add model refresh functionality on mount for LMStudio and Ollama components
1 parent d6b8b4b commit d3b6a93

File tree

2 files changed

+34
-2
lines changed

2 files changed

+34
-2
lines changed

webview-ui/src/components/settings/providers/LMStudio.tsx

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { useCallback, useState, useMemo } from "react"
1+
import { useCallback, useState, useMemo, useEffect } from "react"
22
import { useEvent } from "react-use"
33
import { Trans } from "react-i18next"
44
import { Checkbox } from "vscrui"
@@ -9,6 +9,7 @@ import type { ProviderSettings } from "@roo-code/types"
99
import { useAppTranslation } from "@src/i18n/TranslationContext"
1010
import { ExtensionMessage } from "@roo/ExtensionMessage"
1111
import { useRouterModels } from "@src/components/ui/hooks/useRouterModels"
12+
import { vscode } from "@src/utils/vscode"
1213

1314
import { inputEventTransform } from "../transforms"
1415

@@ -49,6 +50,21 @@ export const LMStudio = ({ apiConfiguration, setApiConfigurationField }: LMStudi
4950

5051
useEvent("message", onMessage)
5152

53+
// Refresh models on mount
54+
useEffect(() => {
55+
// Request fresh models without flushing first
56+
// This ensures cached models remain visible while new ones load
57+
vscode.postMessage({ type: "requestRouterModels" })
58+
59+
// Optionally flush cache after a delay to ensure fresh data on next load
60+
// This won't affect the current session since models are already being fetched
61+
const timer = setTimeout(() => {
62+
vscode.postMessage({ type: "flushRouterModels", text: "lmstudio" })
63+
}, 1000)
64+
65+
return () => clearTimeout(timer)
66+
}, [])
67+
5268
// Check if the selected model exists in the fetched models
5369
const modelNotAvailable = useMemo(() => {
5470
const selectedModel = apiConfiguration?.lmStudioModelId

webview-ui/src/components/settings/providers/Ollama.tsx

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { useState, useCallback, useMemo } from "react"
1+
import { useState, useCallback, useMemo, useEffect } from "react"
22
import { useEvent } from "react-use"
33
import { VSCodeTextField, VSCodeRadioGroup, VSCodeRadio } from "@vscode/webview-ui-toolkit/react"
44

@@ -8,6 +8,7 @@ import { ExtensionMessage } from "@roo/ExtensionMessage"
88

99
import { useAppTranslation } from "@src/i18n/TranslationContext"
1010
import { useRouterModels } from "@src/components/ui/hooks/useRouterModels"
11+
import { vscode } from "@src/utils/vscode"
1112

1213
import { inputEventTransform } from "../transforms"
1314

@@ -48,6 +49,21 @@ export const Ollama = ({ apiConfiguration, setApiConfigurationField }: OllamaPro
4849

4950
useEvent("message", onMessage)
5051

52+
// Refresh models on mount
53+
useEffect(() => {
54+
// Request fresh models without flushing first
55+
// This ensures cached models remain visible while new ones load
56+
vscode.postMessage({ type: "requestRouterModels" })
57+
58+
// Optionally flush cache after a delay to ensure fresh data on next load
59+
// This won't affect the current session since models are already being fetched
60+
const timer = setTimeout(() => {
61+
vscode.postMessage({ type: "flushRouterModels", text: "ollama" })
62+
}, 1000)
63+
64+
return () => clearTimeout(timer)
65+
}, [])
66+
5167
// Check if the selected model exists in the fetched models
5268
const modelNotAvailable = useMemo(() => {
5369
const selectedModel = apiConfiguration?.ollamaModelId

0 commit comments

Comments
 (0)