Skip to content

Commit 86e05ab

Browse files
committed
refactor: remove requestHuggingFaceModels and use requestRouterModels instead
- Removed requestHuggingFaceModels message type from WebviewMessage.ts - Removed requestHuggingFaceModels handler from webviewMessageHandler.ts - Removed huggingFaceModels from ExtensionMessage.ts - Updated HuggingFace.tsx component to use requestRouterModels and extract HuggingFace models from the routerModels response This simplifies the codebase by using a single unified message type for all router-based model providers.
1 parent ef28685 commit 86e05ab

File tree

4 files changed

+42
-83
lines changed

4 files changed

+42
-83
lines changed

src/core/webview/webviewMessageHandler.ts

Lines changed: 1 addition & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -522,7 +522,6 @@ export const webviewMessageHandler = async (
522522
litellm: {},
523523
ollama: {},
524524
lmstudio: {},
525-
huggingface: {},
526525
}
527526

528527
const safeGetModels = async (options: GetModelsOptions): Promise<ModelRecord> => {
@@ -565,10 +564,9 @@ export const webviewMessageHandler = async (
565564

566565
const fetchedRouterModels: Partial<Record<RouterName, ModelRecord>> = {
567566
...routerModels,
568-
// Initialize ollama, lmstudio, and huggingface with empty objects since they use separate handlers
567+
// Initialize ollama and lmstudio with empty objects since they use separate handlers
569568
ollama: {},
570569
lmstudio: {},
571-
huggingface: {},
572570
}
573571

574572
results.forEach((result, index) => {
@@ -676,59 +674,6 @@ export const webviewMessageHandler = async (
676674
// TODO: Cache like we do for OpenRouter, etc?
677675
provider.postMessageToWebview({ type: "vsCodeLmModels", vsCodeLmModels })
678676
break
679-
case "requestHuggingFaceModels":
680-
try {
681-
// Flush cache first to ensure fresh models
682-
await flushModels("huggingface")
683-
684-
const huggingFaceModels = await getModels({
685-
provider: "huggingface",
686-
})
687-
688-
// Convert the model record to an array format expected by the webview
689-
const modelArray = Object.entries(huggingFaceModels).map(([id, info]) => ({
690-
id,
691-
_id: id,
692-
inferenceProviderMapping: [
693-
{
694-
provider: "huggingface",
695-
providerId: id,
696-
status: "live" as const,
697-
task: "conversational" as const,
698-
},
699-
],
700-
trendingScore: 0,
701-
config: {
702-
architectures: [],
703-
model_type:
704-
info.description
705-
?.split(", ")
706-
.find((part) => part.startsWith("Type: "))
707-
?.replace("Type: ", "") || "",
708-
tokenizer_config: {
709-
model_max_length: info.contextWindow,
710-
},
711-
},
712-
tags: [],
713-
pipeline_tag: info.supportsImages ? ("image-text-to-text" as const) : ("text-generation" as const),
714-
library_name: info.description
715-
?.split(", ")
716-
.find((part) => part.startsWith("Library: "))
717-
?.replace("Library: ", ""),
718-
}))
719-
720-
provider.postMessageToWebview({
721-
type: "huggingFaceModels",
722-
huggingFaceModels: modelArray,
723-
})
724-
} catch (error) {
725-
console.error("Failed to fetch Hugging Face models:", error)
726-
provider.postMessageToWebview({
727-
type: "huggingFaceModels",
728-
huggingFaceModels: [],
729-
})
730-
}
731-
break
732677
case "openImage":
733678
openImage(message.text!, { values: message.values })
734679
break

src/shared/ExtensionMessage.ts

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,6 @@ export interface ExtensionMessage {
6767
| "ollamaModels"
6868
| "lmStudioModels"
6969
| "vsCodeLmModels"
70-
| "huggingFaceModels"
7170
| "vsCodeLmApiAvailable"
7271
| "updatePrompt"
7372
| "systemPrompt"
@@ -137,28 +136,6 @@ export interface ExtensionMessage {
137136
ollamaModels?: string[]
138137
lmStudioModels?: string[]
139138
vsCodeLmModels?: { vendor?: string; family?: string; version?: string; id?: string }[]
140-
huggingFaceModels?: Array<{
141-
_id: string
142-
id: string
143-
inferenceProviderMapping: Array<{
144-
provider: string
145-
providerId: string
146-
status: "live" | "staging" | "error"
147-
task: "conversational"
148-
}>
149-
trendingScore: number
150-
config: {
151-
architectures: string[]
152-
model_type: string
153-
tokenizer_config?: {
154-
chat_template?: string | Array<{ name: string; template: string }>
155-
model_max_length?: number
156-
}
157-
}
158-
tags: string[]
159-
pipeline_tag: "text-generation" | "image-text-to-text"
160-
library_name?: string
161-
}>
162139
mcpServers?: McpServer[]
163140
commits?: GitCommit[]
164141
listApiConfig?: ProviderSettingsEntry[]

src/shared/WebviewMessage.ts

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,6 @@ export interface WebviewMessage {
6767
| "requestOllamaModels"
6868
| "requestLmStudioModels"
6969
| "requestVsCodeLmModels"
70-
| "requestHuggingFaceModels"
7170
| "openImage"
7271
| "saveImage"
7372
| "openFile"

webview-ui/src/components/settings/providers/HuggingFace.tsx

Lines changed: 41 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -62,16 +62,54 @@ export const HuggingFace = ({ apiConfiguration, setApiConfigurationField }: Hugg
6262
// Fetch models when component mounts
6363
useEffect(() => {
6464
setLoading(true)
65-
vscode.postMessage({ type: "requestHuggingFaceModels" })
65+
vscode.postMessage({ type: "requestRouterModels" })
6666
}, [])
6767

6868
// Handle messages from extension
6969
const onMessage = useCallback((event: MessageEvent) => {
7070
const message: ExtensionMessage = event.data
7171

7272
switch (message.type) {
73-
case "huggingFaceModels":
74-
setModels(message.huggingFaceModels || [])
73+
case "routerModels":
74+
// Extract HuggingFace models from routerModels
75+
if (message.routerModels?.huggingface) {
76+
// Convert from ModelRecord format to HuggingFaceModel array format
77+
const modelArray = Object.entries(message.routerModels.huggingface).map(([id, info]) => ({
78+
id,
79+
_id: id,
80+
inferenceProviderMapping: [
81+
{
82+
provider: "huggingface",
83+
providerId: id,
84+
status: "live" as const,
85+
task: "conversational" as const,
86+
},
87+
],
88+
trendingScore: 0,
89+
config: {
90+
architectures: [],
91+
model_type:
92+
info.description
93+
?.split(", ")
94+
.find((part: string) => part.startsWith("Type: "))
95+
?.replace("Type: ", "") || "",
96+
tokenizer_config: {
97+
model_max_length: info.contextWindow,
98+
},
99+
},
100+
tags: [],
101+
pipeline_tag: info.supportsImages
102+
? ("image-text-to-text" as const)
103+
: ("text-generation" as const),
104+
library_name: info.description
105+
?.split(", ")
106+
.find((part: string) => part.startsWith("Library: "))
107+
?.replace("Library: ", ""),
108+
}))
109+
setModels(modelArray)
110+
} else {
111+
setModels([])
112+
}
75113
setLoading(false)
76114
break
77115
}

0 commit comments

Comments
 (0)