@@ -26,9 +26,6 @@ import { getRooModels } from "./roo"
2626
2727const memoryCache = new NodeCache ( { stdTTL : 5 * 60 , checkperiod : 5 * 60 } )
2828
29- // Coalesce concurrent fetches per provider within this extension host
30- const inFlightModelFetches = new Map < RouterName , Promise < ModelRecord > > ( )
31-
3229async function writeModels ( router : RouterName , data : ModelRecord ) {
3330 const filename = `${ router } _models.json`
3431 const cacheDir = await getCacheDirectoryPath ( ContextProxy . instance . globalStorageUri . fsPath )
@@ -63,160 +60,75 @@ export const getModels = async (options: GetModelsOptions): Promise<ModelRecord>
6360 return cached
6461 }
6562
66- // 2) Try file cache snapshot (Option A), then kick off background refresh
63+ // 2) Try file cache snapshot
6764 try {
6865 const file = await readModels ( provider )
6966 if ( file && Object . keys ( file ) . length > 0 ) {
7067 memoryCache . set ( provider , file )
71-
72- // Start background refresh if not already in-flight (do not await)
73- if ( ! inFlightModelFetches . has ( provider ) ) {
74- const signal = AbortSignal . timeout ( 30_000 )
75- const bgPromise = ( async ( ) : Promise < ModelRecord > => {
76- let models : ModelRecord = { }
77- switch ( provider ) {
78- case "openrouter" :
79- models = await getOpenRouterModels ( undefined , signal )
80- break
81- case "requesty" :
82- models = await getRequestyModels ( options . baseUrl , options . apiKey , signal )
83- break
84- case "glama" :
85- models = await getGlamaModels ( signal )
86- break
87- case "unbound" :
88- models = await getUnboundModels ( options . apiKey , signal )
89- break
90- case "litellm" :
91- models = await getLiteLLMModels ( options . apiKey as string , options . baseUrl as string , signal )
92- break
93- case "ollama" :
94- models = await getOllamaModels ( options . baseUrl , options . apiKey , signal )
95- break
96- case "lmstudio" :
97- models = await getLMStudioModels ( options . baseUrl , signal )
98- break
99- case "deepinfra" :
100- models = await getDeepInfraModels ( options . apiKey , options . baseUrl , signal )
101- break
102- case "io-intelligence" :
103- models = await getIOIntelligenceModels ( options . apiKey , signal )
104- break
105- case "vercel-ai-gateway" :
106- models = await getVercelAiGatewayModels ( undefined , signal )
107- break
108- case "huggingface" :
109- models = await getHuggingFaceModels ( signal )
110- break
111- case "roo" : {
112- const rooBaseUrl =
113- options . baseUrl ?? process . env . ROO_CODE_PROVIDER_URL ?? "https://api.roocode.com/proxy"
114- models = await getRooModels ( rooBaseUrl , options . apiKey , signal )
115- break
116- }
117- default :
118- throw new Error ( `Unknown provider: ${ provider } ` )
119- }
120-
121- memoryCache . set ( provider , models )
122- await writeModels ( provider , models ) . catch ( ( err ) => {
123- console . error (
124- `[modelCache] Error writing ${ provider } to file cache during background refresh:` ,
125- err instanceof Error ? err . message : String ( err ) ,
126- )
127- } )
128- return models || { }
129- } ) ( )
130-
131- inFlightModelFetches . set ( provider , bgPromise )
132- Promise . resolve ( bgPromise )
133- . catch ( ( err ) => {
134- console . error (
135- `[modelCache] Background refresh failed for ${ provider } :` ,
136- err instanceof Error ? err . message : String ( err ) ,
137- )
138- } )
139- . finally ( ( ) => inFlightModelFetches . delete ( provider ) )
140- }
141-
14268 return file
14369 }
14470 } catch {
145- // ignore file read errors; fall through to network/coalesce path
146- }
147-
148- // 3) Coalesce concurrent fetches
149- const existing = inFlightModelFetches . get ( provider )
150- if ( existing ) {
151- return existing
71+ // ignore file read errors; fall through to network fetch
15272 }
15373
154- // 4 ) Network fetch wrapped as a single in-flight promise for this provider
74+ // 3 ) Network fetch
15575 const signal = AbortSignal . timeout ( 30_000 )
156- const fetchPromise = ( async ( ) : Promise < ModelRecord > => {
157- let models : ModelRecord = { }
158- switch ( provider ) {
159- case "openrouter" :
160- models = await getOpenRouterModels ( undefined , signal )
161- break
162- case "requesty" :
163- models = await getRequestyModels ( options . baseUrl , options . apiKey , signal )
164- break
165- case "glama" :
166- models = await getGlamaModels ( signal )
167- break
168- case "unbound" :
169- models = await getUnboundModels ( options . apiKey , signal )
170- break
171- case "litellm" :
172- models = await getLiteLLMModels ( options . apiKey as string , options . baseUrl as string , signal )
173- break
174- case "ollama" :
175- models = await getOllamaModels ( options . baseUrl , options . apiKey , signal )
176- break
177- case "lmstudio" :
178- models = await getLMStudioModels ( options . baseUrl , signal )
179- break
180- case "deepinfra" :
181- models = await getDeepInfraModels ( options . apiKey , options . baseUrl , signal )
182- break
183- case "io-intelligence" :
184- models = await getIOIntelligenceModels ( options . apiKey , signal )
185- break
186- case "vercel-ai-gateway" :
187- models = await getVercelAiGatewayModels ( undefined , signal )
188- break
189- case "huggingface" :
190- models = await getHuggingFaceModels ( signal )
191- break
192- case "roo" : {
193- const rooBaseUrl =
194- options . baseUrl ?? process . env . ROO_CODE_PROVIDER_URL ?? "https://api.roocode.com/proxy"
195- models = await getRooModels ( rooBaseUrl , options . apiKey , signal )
196- break
197- }
198- default : {
199- throw new Error ( `Unknown provider: ${ provider } ` )
200- }
76+ let models : ModelRecord = { }
77+
78+ switch ( provider ) {
79+ case "openrouter" :
80+ models = await getOpenRouterModels ( undefined , signal )
81+ break
82+ case "requesty" :
83+ models = await getRequestyModels ( options . baseUrl , options . apiKey , signal )
84+ break
85+ case "glama" :
86+ models = await getGlamaModels ( signal )
87+ break
88+ case "unbound" :
89+ models = await getUnboundModels ( options . apiKey , signal )
90+ break
91+ case "litellm" :
92+ models = await getLiteLLMModels ( options . apiKey as string , options . baseUrl as string , signal )
93+ break
94+ case "ollama" :
95+ models = await getOllamaModels ( options . baseUrl , options . apiKey , signal )
96+ break
97+ case "lmstudio" :
98+ models = await getLMStudioModels ( options . baseUrl , signal )
99+ break
100+ case "deepinfra" :
101+ models = await getDeepInfraModels ( options . apiKey , options . baseUrl , signal )
102+ break
103+ case "io-intelligence" :
104+ models = await getIOIntelligenceModels ( options . apiKey , signal )
105+ break
106+ case "vercel-ai-gateway" :
107+ models = await getVercelAiGatewayModels ( undefined , signal )
108+ break
109+ case "huggingface" :
110+ models = await getHuggingFaceModels ( signal )
111+ break
112+ case "roo" : {
113+ const rooBaseUrl = options . baseUrl ?? process . env . ROO_CODE_PROVIDER_URL ?? "https://api.roocode.com/proxy"
114+ models = await getRooModels ( rooBaseUrl , options . apiKey , signal )
115+ break
201116 }
202- memoryCache . set ( provider , models )
117+ default : {
118+ throw new Error ( `Unknown provider: ${ provider } ` )
119+ }
120+ }
203121
204- await writeModels ( provider , models ) . catch ( ( err ) => {
205- console . error (
206- `[modelCache] Error writing ${ provider } to file cache after network fetch:` ,
207- err instanceof Error ? err . message : String ( err ) ,
208- )
209- } )
122+ memoryCache . set ( provider , models )
210123
211- return models || { }
212- } ) ( )
124+ await writeModels ( provider , models ) . catch ( ( err ) => {
125+ console . error (
126+ `[modelCache] Error writing ${ provider } to file cache after network fetch:` ,
127+ err instanceof Error ? err . message : String ( err ) ,
128+ )
129+ } )
213130
214- inFlightModelFetches . set ( provider , fetchPromise )
215- try {
216- return await fetchPromise
217- } finally {
218- inFlightModelFetches . delete ( provider )
219- }
131+ return models || { }
220132}
221133
222134/**
0 commit comments