@@ -6,6 +6,7 @@ import NodeCache from "node-cache"
66import { ContextProxy } from "../../../core/config/ContextProxy"
77import { getCacheDirectoryPath } from "../../../shared/storagePathManager"
88import { RouterName , ModelRecord } from "../../../shared/api"
9+ import { fileExistsAtPath } from "../../../utils/fs"
910
1011import { getOpenRouterModels } from "./openrouter"
1112import { getRequestyModels } from "./requesty"
@@ -21,6 +22,14 @@ async function writeModels(router: RouterName, data: ModelRecord) {
2122 await fs . writeFile ( path . join ( cacheDir , filename ) , JSON . stringify ( data ) )
2223}
2324
25+ async function readModels ( router : RouterName ) : Promise < ModelRecord | undefined > {
26+ const filename = `${ router } _models.json`
27+ const cacheDir = await getCacheDirectoryPath ( ContextProxy . instance . globalStorageUri . fsPath )
28+ const filePath = path . join ( cacheDir , filename )
29+ const exists = await fileExistsAtPath ( filePath )
30+ return exists ? JSON . parse ( await fs . readFile ( filePath , "utf8" ) ) : undefined
31+ }
32+
2433/**
2534 * Get models from the cache or fetch them from the provider and cache them.
2635 * There are two caches:
@@ -37,37 +46,34 @@ export const getModels = async (
3746 apiKey : string | undefined = undefined ,
3847 baseUrl : string | undefined = undefined ,
3948) : Promise < ModelRecord > => {
40- // If this call is meant for a refresh (indicated by apiKey/baseUrl for specific routers),
41- // the memory cache should have been flushed by the caller (e.g., webviewMessageHandler).
42- // Otherwise, for general calls, check memory cache first.
43- const modelsFromMemory = memoryCache . get < ModelRecord > ( router )
44- if ( modelsFromMemory ) {
45- return modelsFromMemory
49+ let models = memoryCache . get < ModelRecord > ( router )
50+ if ( models ) {
51+ return models
4652 }
4753
48- let fetchedModels : ModelRecord
4954 try {
5055 switch ( router ) {
5156 case "openrouter" :
52- fetchedModels = await getOpenRouterModels ( )
57+ models = await getOpenRouterModels ( )
5358 break
5459 case "requesty" :
55- // Assuming getRequestyModels will throw if apiKey is needed and not provided or invalid.
56- fetchedModels = await getRequestyModels ( apiKey )
60+ // Requesty models endpoint requires an API key for per-user custom policies
61+ models = await getRequestyModels ( apiKey )
5762 break
5863 case "glama" :
59- fetchedModels = await getGlamaModels ( )
64+ models = await getGlamaModels ( )
6065 break
6166 case "unbound" :
62- fetchedModels = await getUnboundModels ( )
67+ // Unbound models endpoint requires an API key to fetch application specific models
68+ models = await getUnboundModels ( apiKey )
6369 break
6470 case "litellm" :
6571 if ( ! baseUrl || ! apiKey ) {
6672 // This case should ideally be handled by the caller if baseUrl is strictly required.
6773 // However, for robustness, if called without baseUrl for litellm, it would fail in getLiteLLMModels or here.
6874 throw new Error ( "Base URL and api key are required for LiteLLM models." )
6975 }
70- fetchedModels = await getLiteLLMModels ( apiKey || "" , baseUrl )
76+ models = await getLiteLLMModels ( apiKey || "" , baseUrl )
7177 break
7278 default :
7379 // Ensures router is exhaustively checked if RouterName is a strict union
@@ -76,11 +82,18 @@ export const getModels = async (
7682 }
7783
7884 // Cache the fetched models (even if empty, to signify a successful fetch with no models)
79- memoryCache . set ( router , fetchedModels )
80- await writeModels ( router , fetchedModels ) . catch ( ( err ) =>
85+ memoryCache . set ( router , models )
86+ await writeModels ( router , models ) . catch ( ( err ) =>
8187 console . error ( `[getModels] Error writing ${ router } models to file cache:` , err ) ,
8288 )
83- return fetchedModels
89+
90+ try {
91+ models = await readModels ( router )
92+ // console.log(`[getModels] read ${router} models from file cache`)
93+ } catch ( error ) {
94+ console . error ( `[getModels] error reading ${ router } models from file cache` , error )
95+ }
96+ return models || { }
8497 } catch ( error ) {
8598 // Log the error and re-throw it so the caller can handle it (e.g., show a UI message).
8699 console . error ( `[getModels] Failed to fetch models for ${ router } :` , error )
@@ -89,10 +102,6 @@ export const getModels = async (
89102 }
90103}
91104
92- /**
93- * Flush models memory cache for a specific router
94- * @param router - The router to flush models for.
95- */
96105export const flushModels = async ( router : RouterName ) => {
97106 memoryCache . del ( router )
98107}
0 commit comments