diff --git a/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts b/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts index e49944a9975c..c8afe3a46272 100644 --- a/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts +++ b/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts @@ -291,7 +291,7 @@ describe("useSelectedModel", () => { }) describe("loading and error states", () => { - it("should return loading state when router models are loading", () => { + it("should NOT set loading when router models are loading but provider is static (anthropic)", () => { mockUseRouterModels.mockReturnValue({ data: undefined, isLoading: true, @@ -307,10 +307,11 @@ describe("useSelectedModel", () => { const wrapper = createWrapper() const { result } = renderHook(() => useSelectedModel(), { wrapper }) - expect(result.current.isLoading).toBe(true) + // With static provider default (anthropic), useSelectedModel gates router fetches, so loading should be false + expect(result.current.isLoading).toBe(false) }) - it("should return loading state when open router model providers are loading", () => { + it("should NOT set loading when openrouter provider metadata is loading but provider is static (anthropic)", () => { mockUseRouterModels.mockReturnValue({ data: { openrouter: {}, requesty: {}, glama: {}, unbound: {}, litellm: {}, "io-intelligence": {} }, isLoading: false, @@ -326,10 +327,11 @@ describe("useSelectedModel", () => { const wrapper = createWrapper() const { result } = renderHook(() => useSelectedModel(), { wrapper }) - expect(result.current.isLoading).toBe(true) + // With static provider default (anthropic), openrouter providers are irrelevant, so loading should be false + expect(result.current.isLoading).toBe(false) }) - it("should return error state when either hook has an error", () => { + it("should NOT set error when hooks error but provider is static (anthropic)", () => { mockUseRouterModels.mockReturnValue({ data: undefined, isLoading: false, @@ -345,7 +347,8 @@ describe("useSelectedModel", () => { const wrapper = createWrapper() const { result } = renderHook(() => useSelectedModel(), { wrapper }) - expect(result.current.isError).toBe(true) + // Error from gated routerModels should not bubble for static provider default + expect(result.current.isError).toBe(false) }) }) diff --git a/webview-ui/src/components/ui/hooks/useRouterModels.ts b/webview-ui/src/components/ui/hooks/useRouterModels.ts index 0ca68cc27a6e..5a3a761ecbf3 100644 --- a/webview-ui/src/components/ui/hooks/useRouterModels.ts +++ b/webview-ui/src/components/ui/hooks/useRouterModels.ts @@ -5,8 +5,16 @@ import { ExtensionMessage } from "@roo/ExtensionMessage" import { vscode } from "@src/utils/vscode" -const getRouterModels = async () => +type UseRouterModelsOptions = { + providers?: string[] // subset filter (e.g. ["roo"]) + enabled?: boolean // gate fetching entirely +} + +let __routerModelsRequestCount = 0 + +const getRouterModels = async (providers?: string[]) => new Promise((resolve, reject) => { + const requestId = ++__routerModelsRequestCount const cleanup = () => { window.removeEventListener("message", handler) } @@ -20,10 +28,26 @@ const getRouterModels = async () => const message: ExtensionMessage = event.data if (message.type === "routerModels") { + const msgProviders = message?.values?.providers as string[] | undefined + const requestedKey = providers && providers.length > 0 ? providers.slice().sort().join(",") : "all" + const responseKey = + Array.isArray(msgProviders) && msgProviders.length > 0 + ? msgProviders.slice().sort().join(",") + : "all" + + if (requestedKey !== responseKey) { + // Not our response; ignore and wait for the matching one + return + } + clearTimeout(timeout) cleanup() if (message.routerModels) { + const keys = Object.keys(message.routerModels || {}) + console.debug( + `[useRouterModels] response #${requestId} providers=${JSON.stringify(providers || "all")} keys=${keys.join(",")}`, + ) resolve(message.routerModels) } else { reject(new Error("No router models in response")) @@ -32,7 +56,21 @@ const getRouterModels = async () => } window.addEventListener("message", handler) - vscode.postMessage({ type: "requestRouterModels" }) + console.debug( + `[useRouterModels] request #${requestId} providers=${JSON.stringify(providers && providers.length ? providers : "all")}`, + ) + if (providers && providers.length > 0) { + vscode.postMessage({ type: "requestRouterModels", values: { providers } }) + } else { + vscode.postMessage({ type: "requestRouterModels" }) + } }) -export const useRouterModels = () => useQuery({ queryKey: ["routerModels"], queryFn: getRouterModels }) +export const useRouterModels = (opts: UseRouterModelsOptions = {}) => { + const providers = opts.providers && opts.providers.length ? [...opts.providers] : undefined + return useQuery({ + queryKey: ["routerModels", providers?.slice().sort().join(",") || "all"], + queryFn: () => getRouterModels(providers), + enabled: opts.enabled !== false, + }) +} diff --git a/webview-ui/src/components/ui/hooks/useSelectedModel.ts b/webview-ui/src/components/ui/hooks/useSelectedModel.ts index 55fdd120bd3c..99f7c944b472 100644 --- a/webview-ui/src/components/ui/hooks/useSelectedModel.ts +++ b/webview-ui/src/components/ui/hooks/useSelectedModel.ts @@ -67,30 +67,56 @@ import { useOpenRouterModelProviders } from "./useOpenRouterModelProviders" import { useLmStudioModels } from "./useLmStudioModels" import { useOllamaModels } from "./useOllamaModels" +const DYNAMIC_ROUTER_PROVIDERS = new Set([ + "openrouter", + "vercel-ai-gateway", + "litellm", + "deepinfra", + "io-intelligence", + "requesty", + "unbound", + "glama", + "roo", +]) + export const useSelectedModel = (apiConfiguration?: ProviderSettings) => { const provider = apiConfiguration?.apiProvider || "anthropic" const openRouterModelId = provider === "openrouter" ? apiConfiguration?.openRouterModelId : undefined const lmStudioModelId = provider === "lmstudio" ? apiConfiguration?.lmStudioModelId : undefined const ollamaModelId = provider === "ollama" ? apiConfiguration?.ollamaModelId : undefined - const routerModels = useRouterModels() + // Only fetch router models for dynamic router providers we actually need + const shouldFetchRouterModels = DYNAMIC_ROUTER_PROVIDERS.has(provider as ProviderName) + const routerModels = useRouterModels({ + providers: shouldFetchRouterModels ? [provider] : undefined, + enabled: shouldFetchRouterModels, // disable entirely for static providers + }) + const openRouterModelProviders = useOpenRouterModelProviders(openRouterModelId) const lmStudioModels = useLmStudioModels(lmStudioModelId) const ollamaModels = useOllamaModels(ollamaModelId) + // Compute readiness only for the data actually needed for the selected provider + const needRouterModels = shouldFetchRouterModels + const needOpenRouterProviders = provider === "openrouter" + const needLmStudio = typeof lmStudioModelId !== "undefined" + const needOllama = typeof ollamaModelId !== "undefined" + + const isReady = + (!needLmStudio || typeof lmStudioModels.data !== "undefined") && + (!needOllama || typeof ollamaModels.data !== "undefined") && + (!needRouterModels || typeof routerModels.data !== "undefined") && + (!needOpenRouterProviders || typeof openRouterModelProviders.data !== "undefined") + const { id, info } = - apiConfiguration && - (typeof lmStudioModelId === "undefined" || typeof lmStudioModels.data !== "undefined") && - (typeof ollamaModelId === "undefined" || typeof ollamaModels.data !== "undefined") && - typeof routerModels.data !== "undefined" && - typeof openRouterModelProviders.data !== "undefined" + apiConfiguration && isReady ? getSelectedModel({ provider, apiConfiguration, - routerModels: routerModels.data, - openRouterModelProviders: openRouterModelProviders.data, - lmStudioModels: lmStudioModels.data, - ollamaModels: ollamaModels.data, + routerModels: (routerModels.data || ({} as RouterModels)) as RouterModels, + openRouterModelProviders: (openRouterModelProviders.data || {}) as Record, + lmStudioModels: (lmStudioModels.data || undefined) as ModelRecord | undefined, + ollamaModels: (ollamaModels.data || undefined) as ModelRecord | undefined, }) : { id: anthropicDefaultModelId, info: undefined } @@ -99,13 +125,15 @@ export const useSelectedModel = (apiConfiguration?: ProviderSettings) => { id, info, isLoading: - routerModels.isLoading || - openRouterModelProviders.isLoading || - (apiConfiguration?.lmStudioModelId && lmStudioModels!.isLoading), + (needRouterModels && routerModels.isLoading) || + (needOpenRouterProviders && openRouterModelProviders.isLoading) || + (needLmStudio && lmStudioModels!.isLoading) || + (needOllama && ollamaModels!.isLoading), isError: - routerModels.isError || - openRouterModelProviders.isError || - (apiConfiguration?.lmStudioModelId && lmStudioModels!.isError), + (needRouterModels && routerModels.isError) || + (needOpenRouterProviders && openRouterModelProviders.isError) || + (needLmStudio && lmStudioModels!.isError) || + (needOllama && ollamaModels!.isError), } }