Skip to content

Commit f5cc227

Browse files
committed
Extract router models fetching logic into dedicated service
Refactor to improve separation of concerns: - Create src/services/router-models/index.ts to handle provider model fetching - Extract buildProviderFetchList() function for fetch options construction - Extract fetchRouterModels() function for coordinated model fetching - Move 150+ lines of provider-specific logic out of webviewMessageHandler - Add comprehensive tests in router-models-service.spec.ts (11 test cases) Benefits: - Cleaner webviewMessageHandler with less business logic - Reusable service for router model operations - Better testability with isolated unit tests - Clear separation between UI message handling and data fetching Files changed: - New: src/services/router-models/index.ts - New: src/services/router-models/__tests__/router-models-service.spec.ts - Modified: src/core/webview/webviewMessageHandler.ts (simplified)
1 parent 97beb13 commit f5cc227

File tree

3 files changed

+486
-259
lines changed

3 files changed

+486
-259
lines changed

src/core/webview/webviewMessageHandler.ts

Lines changed: 49 additions & 259 deletions
Original file line numberDiff line numberDiff line change
@@ -54,9 +54,9 @@ import { openMention } from "../mentions"
5454
import { getWorkspacePath } from "../../utils/path"
5555
import { Mode, defaultModeSlug } from "../../shared/modes"
5656
import { getModels, flushModels } from "../../api/providers/fetchers/modelCache"
57-
import { GetModelsOptions } from "../../shared/api"
5857
import { generateSystemPrompt } from "./generateSystemPrompt"
5958
import { getCommand } from "../../utils/commands"
59+
import { fetchRouterModels } from "../../services/router-models"
6060

6161
const ALLOWED_VSCODE_SETTINGS = new Set(["terminal.integrated.inheritEnv"])
6262

@@ -768,152 +768,27 @@ export const webviewMessageHandler = async (
768768
case "requestRouterModels": {
769769
// Phase 2: Scope to active provider during chat/task flows
770770
const { apiConfiguration } = await provider.getState()
771-
const providerStr = apiConfiguration.apiProvider
772-
const activeProvider: RouterName | undefined =
773-
providerStr && isRouterName(providerStr) ? providerStr : undefined
774-
775-
const routerModels: Partial<Record<RouterName, ModelRecord>> = {
776-
openrouter: {},
777-
"vercel-ai-gateway": {},
778-
huggingface: {},
779-
litellm: {},
780-
deepinfra: {},
781-
"io-intelligence": {},
782-
requesty: {},
783-
unbound: {},
784-
glama: {},
785-
ollama: {},
786-
lmstudio: {},
787-
roo: {},
788-
}
789-
790-
const safeGetModels = async (options: GetModelsOptions): Promise<ModelRecord> => {
791-
try {
792-
return await getModels(options)
793-
} catch (error) {
794-
provider.log(
795-
`Failed to fetch models in webviewMessageHandler requestRouterModels for ${options.provider}: ${error instanceof Error ? error.message : String(error)}`,
796-
)
797-
throw error
798-
}
799-
}
800771

801-
// Build full list then filter to active provider
802-
const allFetches: { key: RouterName; options: GetModelsOptions }[] = [
803-
{ key: "openrouter", options: { provider: "openrouter" } },
804-
{
805-
key: "requesty",
806-
options: {
807-
provider: "requesty",
808-
apiKey: apiConfiguration.requestyApiKey,
809-
baseUrl: apiConfiguration.requestyBaseUrl,
810-
},
811-
},
812-
{ key: "glama", options: { provider: "glama" } },
813-
{ key: "unbound", options: { provider: "unbound", apiKey: apiConfiguration.unboundApiKey } },
814-
{ key: "vercel-ai-gateway", options: { provider: "vercel-ai-gateway" } },
815-
{
816-
key: "deepinfra",
817-
options: {
818-
provider: "deepinfra",
819-
apiKey: apiConfiguration.deepInfraApiKey,
820-
baseUrl: apiConfiguration.deepInfraBaseUrl,
821-
},
822-
},
823-
{
824-
key: "roo",
825-
options: {
826-
provider: "roo",
827-
baseUrl: process.env.ROO_CODE_PROVIDER_URL ?? "https://api.roocode.com/proxy",
828-
apiKey: CloudService.hasInstance()
829-
? CloudService.instance.authService?.getSessionToken()
830-
: undefined,
831-
},
832-
},
833-
]
834-
835-
// Include local providers (ollama, lmstudio, huggingface) when they are the active provider
836-
if (activeProvider === "ollama") {
837-
allFetches.push({
838-
key: "ollama",
839-
options: {
840-
provider: "ollama",
841-
baseUrl: apiConfiguration.ollamaBaseUrl,
842-
apiKey: apiConfiguration.ollamaApiKey,
843-
},
844-
})
845-
}
846-
if (activeProvider === "lmstudio") {
847-
allFetches.push({
848-
key: "lmstudio",
849-
options: {
850-
provider: "lmstudio",
851-
baseUrl: apiConfiguration.lmStudioBaseUrl,
852-
},
853-
})
854-
}
855-
if (activeProvider === "huggingface") {
856-
allFetches.push({
857-
key: "huggingface",
858-
options: {
859-
provider: "huggingface",
860-
},
861-
})
862-
}
863-
864-
// IO Intelligence (optional)
865-
if (apiConfiguration.ioIntelligenceApiKey) {
866-
allFetches.push({
867-
key: "io-intelligence",
868-
options: { provider: "io-intelligence", apiKey: apiConfiguration.ioIntelligenceApiKey },
869-
})
870-
}
871-
872-
// LiteLLM (optional)
873-
const litellmApiKey = apiConfiguration.litellmApiKey || message?.values?.litellmApiKey
874-
const litellmBaseUrl = apiConfiguration.litellmBaseUrl || message?.values?.litellmBaseUrl
875-
if (litellmApiKey && litellmBaseUrl) {
876-
allFetches.push({
877-
key: "litellm",
878-
options: { provider: "litellm", apiKey: litellmApiKey, baseUrl: litellmBaseUrl },
879-
})
880-
}
881-
882-
const modelFetchPromises = activeProvider
883-
? allFetches.filter(({ key }) => key === activeProvider)
884-
: allFetches
772+
const { routerModels, errors } = await fetchRouterModels({
773+
apiConfiguration,
774+
activeProviderOnly: true,
775+
litellmOverrides: message?.values
776+
? {
777+
apiKey: message.values.litellmApiKey,
778+
baseUrl: message.values.litellmBaseUrl,
779+
}
780+
: undefined,
781+
})
885782

886-
// If nothing matched (edge case), still post empty structure for stability
887-
if (modelFetchPromises.length === 0) {
888-
await provider.postMessageToWebview({
889-
type: "routerModels",
890-
routerModels: routerModels as RouterModels,
783+
// Send error notifications for failed providers
784+
errors.forEach((err) => {
785+
provider.log(`Error fetching models for ${err.provider}: ${err.error}`)
786+
provider.postMessageToWebview({
787+
type: "singleRouterModelFetchResponse",
788+
success: false,
789+
error: err.error,
790+
values: { provider: err.provider },
891791
})
892-
break
893-
}
894-
895-
const results = await Promise.allSettled(
896-
modelFetchPromises.map(async ({ key, options }) => {
897-
const models = await safeGetModels(options)
898-
return { key, models }
899-
}),
900-
)
901-
902-
results.forEach((result, index) => {
903-
const routerName = modelFetchPromises[index].key
904-
if (result.status === "fulfilled") {
905-
routerModels[routerName] = result.value.models
906-
} else {
907-
const errorMessage = result.reason instanceof Error ? result.reason.message : String(result.reason)
908-
provider.log(`Error fetching models for ${routerName}: ${errorMessage}`)
909-
routerModels[routerName] = {}
910-
provider.postMessageToWebview({
911-
type: "singleRouterModelFetchResponse",
912-
success: false,
913-
error: errorMessage,
914-
values: { provider: routerName },
915-
})
916-
}
917792
})
918793

919794
provider.postMessageToWebview({ type: "routerModels", routerModels: routerModels as RouterModels })
@@ -923,126 +798,41 @@ export const webviewMessageHandler = async (
923798
// Settings and activation: fetch all providers (legacy behavior)
924799
const { apiConfiguration } = await provider.getState()
925800

926-
const routerModels: Partial<Record<RouterName, ModelRecord>> = {
927-
openrouter: {},
928-
"vercel-ai-gateway": {},
929-
huggingface: {},
930-
litellm: {},
931-
deepinfra: {},
932-
"io-intelligence": {},
933-
requesty: {},
934-
unbound: {},
935-
glama: {},
936-
ollama: {},
937-
lmstudio: {},
938-
roo: {},
939-
}
940-
941-
const safeGetModels = async (options: GetModelsOptions): Promise<ModelRecord> => {
942-
try {
943-
return await getModels(options)
944-
} catch (error) {
945-
provider.log(
946-
`Failed to fetch models in webviewMessageHandler requestRouterModelsAll for ${options.provider}: ${error instanceof Error ? error.message : String(error)}`,
947-
)
948-
throw error
949-
}
950-
}
801+
const { routerModels, errors } = await fetchRouterModels({
802+
apiConfiguration,
803+
activeProviderOnly: false,
804+
litellmOverrides: message?.values
805+
? {
806+
apiKey: message.values.litellmApiKey,
807+
baseUrl: message.values.litellmBaseUrl,
808+
}
809+
: undefined,
810+
})
951811

952-
const modelFetchPromises: { key: RouterName; options: GetModelsOptions }[] = [
953-
{ key: "openrouter", options: { provider: "openrouter" } },
954-
{
955-
key: "requesty",
956-
options: {
957-
provider: "requesty",
958-
apiKey: apiConfiguration.requestyApiKey,
959-
baseUrl: apiConfiguration.requestyBaseUrl,
960-
},
961-
},
962-
{ key: "glama", options: { provider: "glama" } },
963-
{ key: "unbound", options: { provider: "unbound", apiKey: apiConfiguration.unboundApiKey } },
964-
{ key: "vercel-ai-gateway", options: { provider: "vercel-ai-gateway" } },
965-
{
966-
key: "deepinfra",
967-
options: {
968-
provider: "deepinfra",
969-
apiKey: apiConfiguration.deepInfraApiKey,
970-
baseUrl: apiConfiguration.deepInfraBaseUrl,
971-
},
972-
},
973-
{
974-
key: "roo",
975-
options: {
976-
provider: "roo",
977-
baseUrl: process.env.ROO_CODE_PROVIDER_URL ?? "https://api.roocode.com/proxy",
978-
apiKey: CloudService.hasInstance()
979-
? CloudService.instance.authService?.getSessionToken()
980-
: undefined,
981-
},
982-
},
983-
]
984-
985-
// Add IO Intelligence if API key is provided.
986-
const ioIntelligenceApiKey = apiConfiguration.ioIntelligenceApiKey
987-
if (ioIntelligenceApiKey) {
988-
modelFetchPromises.push({
989-
key: "io-intelligence",
990-
options: { provider: "io-intelligence", apiKey: ioIntelligenceApiKey },
812+
// Send error notifications for failed providers
813+
errors.forEach((err) => {
814+
provider.log(`Error fetching models for ${err.provider}: ${err.error}`)
815+
provider.postMessageToWebview({
816+
type: "singleRouterModelFetchResponse",
817+
success: false,
818+
error: err.error,
819+
values: { provider: err.provider },
991820
})
992-
}
993-
994-
// Don't fetch Ollama and LM Studio models by default anymore.
995-
// They have their own specific handlers: requestOllamaModels and requestLmStudioModels.
821+
})
996822

997-
const litellmApiKey = apiConfiguration.litellmApiKey || message?.values?.litellmApiKey
998-
const litellmBaseUrl = apiConfiguration.litellmBaseUrl || message?.values?.litellmBaseUrl
999-
if (litellmApiKey && litellmBaseUrl) {
1000-
modelFetchPromises.push({
1001-
key: "litellm",
1002-
options: { provider: "litellm", apiKey: litellmApiKey, baseUrl: litellmBaseUrl },
823+
// Send ollama/lmstudio-specific messages if models were fetched
824+
if (routerModels.ollama && Object.keys(routerModels.ollama).length > 0) {
825+
provider.postMessageToWebview({
826+
type: "ollamaModels",
827+
ollamaModels: routerModels.ollama,
828+
})
829+
}
830+
if (routerModels.lmstudio && Object.keys(routerModels.lmstudio).length > 0) {
831+
provider.postMessageToWebview({
832+
type: "lmStudioModels",
833+
lmStudioModels: routerModels.lmstudio,
1003834
})
1004835
}
1005-
1006-
const results = await Promise.allSettled(
1007-
modelFetchPromises.map(async ({ key, options }) => {
1008-
const models = await safeGetModels(options)
1009-
return { key, models }
1010-
}),
1011-
)
1012-
1013-
results.forEach((result, index) => {
1014-
const routerName = modelFetchPromises[index].key
1015-
1016-
if (result.status === "fulfilled") {
1017-
routerModels[routerName] = result.value.models
1018-
1019-
// Ollama and LM Studio settings pages still need these events.
1020-
if (routerName === "ollama" && Object.keys(result.value.models).length > 0) {
1021-
provider.postMessageToWebview({
1022-
type: "ollamaModels",
1023-
ollamaModels: result.value.models,
1024-
})
1025-
} else if (routerName === "lmstudio" && Object.keys(result.value.models).length > 0) {
1026-
provider.postMessageToWebview({
1027-
type: "lmStudioModels",
1028-
lmStudioModels: result.value.models,
1029-
})
1030-
}
1031-
} else {
1032-
// Handle rejection: Post a specific error message for this provider.
1033-
const errorMessage = result.reason instanceof Error ? result.reason.message : String(result.reason)
1034-
provider.log(`Error fetching models for ${routerName}: ${errorMessage}`)
1035-
1036-
routerModels[routerName] = {}
1037-
1038-
provider.postMessageToWebview({
1039-
type: "singleRouterModelFetchResponse",
1040-
success: false,
1041-
error: errorMessage,
1042-
values: { provider: routerName },
1043-
})
1044-
}
1045-
})
1046836

1047837
provider.postMessageToWebview({ type: "routerModels", routerModels: routerModels as RouterModels })
1048838
break

0 commit comments

Comments
 (0)