|
| 1 | +export interface HuggingFaceModel { |
| 2 | + _id: string |
| 3 | + id: string |
| 4 | + inferenceProviderMapping: InferenceProviderMapping[] |
| 5 | + trendingScore: number |
| 6 | + config: ModelConfig |
| 7 | + tags: string[] |
| 8 | + pipeline_tag: "text-generation" | "image-text-to-text" |
| 9 | + library_name?: string |
| 10 | +} |
| 11 | + |
| 12 | +export interface InferenceProviderMapping { |
| 13 | + provider: string |
| 14 | + providerId: string |
| 15 | + status: "live" | "staging" | "error" |
| 16 | + task: "conversational" |
| 17 | +} |
| 18 | + |
| 19 | +export interface ModelConfig { |
| 20 | + architectures: string[] |
| 21 | + model_type: string |
| 22 | + tokenizer_config?: { |
| 23 | + chat_template?: string | Array<{ name: string; template: string }> |
| 24 | + model_max_length?: number |
| 25 | + } |
| 26 | +} |
| 27 | + |
| 28 | +interface HuggingFaceApiParams { |
| 29 | + pipeline_tag?: "text-generation" | "image-text-to-text" |
| 30 | + filter: string |
| 31 | + inference_provider: string |
| 32 | + limit: number |
| 33 | + expand: string[] |
| 34 | +} |
| 35 | + |
| 36 | +const DEFAULT_PARAMS: HuggingFaceApiParams = { |
| 37 | + filter: "conversational", |
| 38 | + inference_provider: "all", |
| 39 | + limit: 100, |
| 40 | + expand: [ |
| 41 | + "inferenceProviderMapping", |
| 42 | + "config", |
| 43 | + "library_name", |
| 44 | + "pipeline_tag", |
| 45 | + "tags", |
| 46 | + "mask_token", |
| 47 | + "trendingScore", |
| 48 | + ], |
| 49 | +} |
| 50 | + |
| 51 | +const BASE_URL = "https://huggingface.co/api/models" |
| 52 | +const CACHE_DURATION = 1000 * 60 * 60 // 1 hour |
| 53 | + |
| 54 | +interface CacheEntry { |
| 55 | + data: HuggingFaceModel[] |
| 56 | + timestamp: number |
| 57 | + status: "success" | "partial" | "error" |
| 58 | +} |
| 59 | + |
| 60 | +let cache: CacheEntry | null = null |
| 61 | + |
| 62 | +function buildApiUrl(params: HuggingFaceApiParams): string { |
| 63 | + const url = new URL(BASE_URL) |
| 64 | + |
| 65 | + // Add simple params |
| 66 | + Object.entries(params).forEach(([key, value]) => { |
| 67 | + if (!Array.isArray(value)) { |
| 68 | + url.searchParams.append(key, String(value)) |
| 69 | + } |
| 70 | + }) |
| 71 | + |
| 72 | + // Handle array params specially |
| 73 | + params.expand.forEach((item) => { |
| 74 | + url.searchParams.append("expand[]", item) |
| 75 | + }) |
| 76 | + |
| 77 | + return url.toString() |
| 78 | +} |
| 79 | + |
| 80 | +const headers: HeadersInit = { |
| 81 | + "Upgrade-Insecure-Requests": "1", |
| 82 | + "Sec-Fetch-Dest": "document", |
| 83 | + "Sec-Fetch-Mode": "navigate", |
| 84 | + "Sec-Fetch-Site": "none", |
| 85 | + "Sec-Fetch-User": "?1", |
| 86 | + Priority: "u=0, i", |
| 87 | + Pragma: "no-cache", |
| 88 | + "Cache-Control": "no-cache", |
| 89 | +} |
| 90 | + |
| 91 | +const requestInit: RequestInit = { |
| 92 | + credentials: "include", |
| 93 | + headers, |
| 94 | + method: "GET", |
| 95 | + mode: "cors", |
| 96 | +} |
| 97 | + |
| 98 | +export async function fetchHuggingFaceModels(): Promise<HuggingFaceModel[]> { |
| 99 | + const now = Date.now() |
| 100 | + |
| 101 | + // Check cache |
| 102 | + if (cache && now - cache.timestamp < CACHE_DURATION) { |
| 103 | + console.log("Using cached Hugging Face models") |
| 104 | + return cache.data |
| 105 | + } |
| 106 | + |
| 107 | + try { |
| 108 | + console.log("Fetching Hugging Face models from API...") |
| 109 | + |
| 110 | + // Fetch both text-generation and image-text-to-text models in parallel |
| 111 | + const [textGenResponse, imgTextResponse] = await Promise.allSettled([ |
| 112 | + fetch(buildApiUrl({ ...DEFAULT_PARAMS, pipeline_tag: "text-generation" }), requestInit), |
| 113 | + fetch(buildApiUrl({ ...DEFAULT_PARAMS, pipeline_tag: "image-text-to-text" }), requestInit), |
| 114 | + ]) |
| 115 | + |
| 116 | + let textGenModels: HuggingFaceModel[] = [] |
| 117 | + let imgTextModels: HuggingFaceModel[] = [] |
| 118 | + let hasErrors = false |
| 119 | + |
| 120 | + // Process text-generation models |
| 121 | + if (textGenResponse.status === "fulfilled" && textGenResponse.value.ok) { |
| 122 | + textGenModels = await textGenResponse.value.json() |
| 123 | + } else { |
| 124 | + console.error("Failed to fetch text-generation models:", textGenResponse) |
| 125 | + hasErrors = true |
| 126 | + } |
| 127 | + |
| 128 | + // Process image-text-to-text models |
| 129 | + if (imgTextResponse.status === "fulfilled" && imgTextResponse.value.ok) { |
| 130 | + imgTextModels = await imgTextResponse.value.json() |
| 131 | + } else { |
| 132 | + console.error("Failed to fetch image-text-to-text models:", imgTextResponse) |
| 133 | + hasErrors = true |
| 134 | + } |
| 135 | + |
| 136 | + // Combine and filter models |
| 137 | + const allModels = [...textGenModels, ...imgTextModels] |
| 138 | + .filter((model) => model.inferenceProviderMapping.length > 0) |
| 139 | + .sort((a, b) => a.id.toLowerCase().localeCompare(b.id.toLowerCase())) |
| 140 | + |
| 141 | + // Update cache |
| 142 | + cache = { |
| 143 | + data: allModels, |
| 144 | + timestamp: now, |
| 145 | + status: hasErrors ? "partial" : "success", |
| 146 | + } |
| 147 | + |
| 148 | + console.log(`Fetched ${allModels.length} Hugging Face models (status: ${cache.status})`) |
| 149 | + return allModels |
| 150 | + } catch (error) { |
| 151 | + console.error("Error fetching Hugging Face models:", error) |
| 152 | + |
| 153 | + // Return cached data if available |
| 154 | + if (cache) { |
| 155 | + console.log("Using stale cached data due to fetch error") |
| 156 | + cache.status = "error" |
| 157 | + return cache.data |
| 158 | + } |
| 159 | + |
| 160 | + // No cache available, return empty array |
| 161 | + return [] |
| 162 | + } |
| 163 | +} |
| 164 | + |
| 165 | +export function getCachedModels(): HuggingFaceModel[] | null { |
| 166 | + return cache?.data || null |
| 167 | +} |
| 168 | + |
| 169 | +export function clearCache(): void { |
| 170 | + cache = null |
| 171 | +} |
0 commit comments