Skip to content

Commit bd1f93a

Browse files
committed
Refactor getModels function to use options parameter for improved type safety
- Updated the getModels function to accept a single options parameter instead of multiple individual parameters. - Introduced a discriminated union type for GetModelsOptions to enforce required properties based on the router type.
1 parent cbed162 commit bd1f93a

File tree

1 file changed

+19
-16
lines changed

1 file changed

+19
-16
lines changed

src/api/providers/fetchers/modelCache.ts

Lines changed: 19 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -30,22 +30,29 @@ async function readModels(router: RouterName): Promise<ModelRecord | undefined>
3030
return exists ? JSON.parse(await fs.readFile(filePath, "utf8")) : undefined
3131
}
3232

33+
/**
34+
* Options for fetching models from different routers.
35+
* This is a discriminated union type where the router property determines
36+
* which other properties are required.
37+
*/
38+
export type GetModelsOptions =
39+
| { router: "openrouter" }
40+
| { router: "glama" }
41+
| { router: "requesty"; apiKey?: string }
42+
| { router: "unbound"; apiKey?: string }
43+
| { router: "litellm"; apiKey: string; baseUrl: string }
44+
3345
/**
3446
* Get models from the cache or fetch them from the provider and cache them.
3547
* There are two caches:
3648
* 1. Memory cache - This is a simple in-memory cache that is used to store models for a short period of time.
3749
* 2. File cache - This is a file-based cache that is used to store models for a longer period of time.
3850
*
39-
* @param router - The router to fetch models from.
40-
* @param apiKey - Optional API key for the provider.
41-
* @param baseUrl - Optional base URL for the provider (currently used only for LiteLLM).
51+
* @param options - Options for fetching models, including the router and any required parameters.
4252
* @returns The models from the cache or the fetched models.
4353
*/
44-
export const getModels = async (
45-
router: RouterName,
46-
apiKey: string | undefined = undefined,
47-
baseUrl: string | undefined = undefined,
48-
): Promise<ModelRecord> => {
54+
export const getModels = async (options: GetModelsOptions): Promise<ModelRecord> => {
55+
const { router } = options
4956
let models = memoryCache.get<ModelRecord>(router)
5057
if (models) {
5158
return models
@@ -58,22 +65,18 @@ export const getModels = async (
5865
break
5966
case "requesty":
6067
// Requesty models endpoint requires an API key for per-user custom policies
61-
models = await getRequestyModels(apiKey)
68+
models = await getRequestyModels(options.apiKey)
6269
break
6370
case "glama":
6471
models = await getGlamaModels()
6572
break
6673
case "unbound":
6774
// Unbound models endpoint requires an API key to fetch application specific models
68-
models = await getUnboundModels(apiKey)
75+
models = await getUnboundModels(options.apiKey)
6976
break
7077
case "litellm":
71-
if (!baseUrl || !apiKey) {
72-
// This case should ideally be handled by the caller if baseUrl is strictly required.
73-
// However, for robustness, if called without baseUrl for litellm, it would fail in getLiteLLMModels or here.
74-
throw new Error("Base URL and api key are required for LiteLLM models.")
75-
}
76-
models = await getLiteLLMModels(apiKey, baseUrl)
78+
// Type safety ensures apiKey and baseUrl are always provided for litellm
79+
models = await getLiteLLMModels(options.apiKey, options.baseUrl)
7780
break
7881
default:
7982
// Ensures router is exhaustively checked if RouterName is a strict union

0 commit comments

Comments
 (0)