Skip to content

Commit 17f0a5d

Browse files
authored
Merge pull request #1192 from RooVetGit/cte/move-model-fetchers
Move & clean up model fetchers
2 parents 199b6ea + 50ce955 commit 17f0a5d

File tree

17 files changed

+601
-618
lines changed

17 files changed

+601
-618
lines changed

src/api/providers/glama.ts

Lines changed: 44 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,12 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
22
import axios from "axios"
33
import OpenAI from "openai"
4-
import { ApiHandler, SingleCompletionHandler } from "../"
4+
55
import { ApiHandlerOptions, ModelInfo, glamaDefaultModelId, glamaDefaultModelInfo } from "../../shared/api"
6+
import { parseApiPrice } from "../../utils/cost"
67
import { convertToOpenAiMessages } from "../transform/openai-format"
78
import { ApiStream } from "../transform/stream"
9+
import { ApiHandler, SingleCompletionHandler } from "../"
810

911
const GLAMA_DEFAULT_TEMPERATURE = 0
1012

@@ -190,3 +192,44 @@ export class GlamaHandler implements ApiHandler, SingleCompletionHandler {
190192
}
191193
}
192194
}
195+
196+
export async function getGlamaModels() {
197+
const models: Record<string, ModelInfo> = {}
198+
199+
try {
200+
const response = await axios.get("https://glama.ai/api/gateway/v1/models")
201+
const rawModels = response.data
202+
203+
for (const rawModel of rawModels) {
204+
const modelInfo: ModelInfo = {
205+
maxTokens: rawModel.maxTokensOutput,
206+
contextWindow: rawModel.maxTokensInput,
207+
supportsImages: rawModel.capabilities?.includes("input:image"),
208+
supportsComputerUse: rawModel.capabilities?.includes("computer_use"),
209+
supportsPromptCache: rawModel.capabilities?.includes("caching"),
210+
inputPrice: parseApiPrice(rawModel.pricePerToken?.input),
211+
outputPrice: parseApiPrice(rawModel.pricePerToken?.output),
212+
description: undefined,
213+
cacheWritesPrice: parseApiPrice(rawModel.pricePerToken?.cacheWrite),
214+
cacheReadsPrice: parseApiPrice(rawModel.pricePerToken?.cacheRead),
215+
}
216+
217+
switch (rawModel.id) {
218+
case rawModel.id.startsWith("anthropic/claude-3-7-sonnet"):
219+
modelInfo.maxTokens = 16384
220+
break
221+
case rawModel.id.startsWith("anthropic/"):
222+
modelInfo.maxTokens = 8192
223+
break
224+
default:
225+
break
226+
}
227+
228+
models[rawModel.id] = modelInfo
229+
}
230+
} catch (error) {
231+
console.error(`Error fetching Glama models: ${JSON.stringify(error, Object.getOwnPropertyNames(error), 2)}`)
232+
}
233+
234+
return models
235+
}

src/api/providers/lmstudio.ts

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
22
import OpenAI from "openai"
3+
import axios from "axios"
4+
35
import { ApiHandler, SingleCompletionHandler } from "../"
46
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api"
57
import { convertToOpenAiMessages } from "../transform/openai-format"
@@ -72,3 +74,17 @@ export class LmStudioHandler implements ApiHandler, SingleCompletionHandler {
7274
}
7375
}
7476
}
77+
78+
export async function getLmStudioModels(baseUrl = "http://localhost:1234") {
79+
try {
80+
if (!URL.canParse(baseUrl)) {
81+
return []
82+
}
83+
84+
const response = await axios.get(`${baseUrl}/v1/models`)
85+
const modelsArray = response.data?.data?.map((model: any) => model.id) || []
86+
return [...new Set<string>(modelsArray)]
87+
} catch (error) {
88+
return []
89+
}
90+
}

src/api/providers/ollama.ts

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
22
import OpenAI from "openai"
3+
import axios from "axios"
4+
35
import { ApiHandler, SingleCompletionHandler } from "../"
46
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api"
57
import { convertToOpenAiMessages } from "../transform/openai-format"
@@ -88,3 +90,17 @@ export class OllamaHandler implements ApiHandler, SingleCompletionHandler {
8890
}
8991
}
9092
}
93+
94+
export async function getOllamaModels(baseUrl = "http://localhost:11434") {
95+
try {
96+
if (!URL.canParse(baseUrl)) {
97+
return []
98+
}
99+
100+
const response = await axios.get(`${baseUrl}/api/tags`)
101+
const modelsArray = response.data?.models?.map((model: any) => model.name) || []
102+
return [...new Set<string>(modelsArray)]
103+
} catch (error) {
104+
return []
105+
}
106+
}

src/api/providers/openai.ts

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
22
import OpenAI, { AzureOpenAI } from "openai"
3+
import axios from "axios"
34

45
import {
56
ApiHandlerOptions,
@@ -166,3 +167,27 @@ export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
166167
}
167168
}
168169
}
170+
171+
export async function getOpenAiModels(baseUrl?: string, apiKey?: string) {
172+
try {
173+
if (!baseUrl) {
174+
return []
175+
}
176+
177+
if (!URL.canParse(baseUrl)) {
178+
return []
179+
}
180+
181+
const config: Record<string, any> = {}
182+
183+
if (apiKey) {
184+
config["headers"] = { Authorization: `Bearer ${apiKey}` }
185+
}
186+
187+
const response = await axios.get(`${baseUrl}/models`, config)
188+
const modelsArray = response.data?.data?.map((model: any) => model.id) || []
189+
return [...new Set<string>(modelsArray)]
190+
} catch (error) {
191+
return []
192+
}
193+
}

src/api/providers/openrouter.ts

Lines changed: 79 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,29 +1,29 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
22
import axios from "axios"
33
import OpenAI from "openai"
4-
import { ApiHandler } from "../"
4+
import delay from "delay"
5+
56
import { ApiHandlerOptions, ModelInfo, openRouterDefaultModelId, openRouterDefaultModelInfo } from "../../shared/api"
7+
import { parseApiPrice } from "../../utils/cost"
68
import { convertToOpenAiMessages } from "../transform/openai-format"
79
import { ApiStreamChunk, ApiStreamUsageChunk } from "../transform/stream"
8-
import delay from "delay"
10+
import { convertToR1Format } from "../transform/r1-format"
911
import { DEEP_SEEK_DEFAULT_TEMPERATURE } from "./openai"
12+
import { ApiHandler, SingleCompletionHandler } from ".."
1013

1114
const OPENROUTER_DEFAULT_TEMPERATURE = 0
1215

13-
// Add custom interface for OpenRouter params
16+
// Add custom interface for OpenRouter params.
1417
type OpenRouterChatCompletionParams = OpenAI.Chat.ChatCompletionCreateParams & {
1518
transforms?: string[]
1619
include_reasoning?: boolean
1720
}
1821

19-
// Add custom interface for OpenRouter usage chunk
22+
// Add custom interface for OpenRouter usage chunk.
2023
interface OpenRouterApiStreamUsageChunk extends ApiStreamUsageChunk {
2124
fullResponseText: string
2225
}
2326

24-
import { SingleCompletionHandler } from ".."
25-
import { convertToR1Format } from "../transform/r1-format"
26-
2727
export class OpenRouterHandler implements ApiHandler, SingleCompletionHandler {
2828
private options: ApiHandlerOptions
2929
private client: OpenAI
@@ -222,3 +222,75 @@ export class OpenRouterHandler implements ApiHandler, SingleCompletionHandler {
222222
}
223223
}
224224
}
225+
226+
export async function getOpenRouterModels() {
227+
const models: Record<string, ModelInfo> = {}
228+
229+
try {
230+
const response = await axios.get("https://openrouter.ai/api/v1/models")
231+
const rawModels = response.data.data
232+
233+
for (const rawModel of rawModels) {
234+
const modelInfo: ModelInfo = {
235+
maxTokens: rawModel.top_provider?.max_completion_tokens,
236+
contextWindow: rawModel.context_length,
237+
supportsImages: rawModel.architecture?.modality?.includes("image"),
238+
supportsPromptCache: false,
239+
inputPrice: parseApiPrice(rawModel.pricing?.prompt),
240+
outputPrice: parseApiPrice(rawModel.pricing?.completion),
241+
description: rawModel.description,
242+
}
243+
244+
// NOTE: this needs to be synced with api.ts/openrouter default model info.
245+
switch (true) {
246+
case rawModel.id.startsWith("anthropic/claude-3.7-sonnet"):
247+
modelInfo.supportsComputerUse = true
248+
modelInfo.supportsPromptCache = true
249+
modelInfo.cacheWritesPrice = 3.75
250+
modelInfo.cacheReadsPrice = 0.3
251+
modelInfo.maxTokens = 16384
252+
break
253+
case rawModel.id.startsWith("anthropic/claude-3.5-sonnet-20240620"):
254+
modelInfo.supportsPromptCache = true
255+
modelInfo.cacheWritesPrice = 3.75
256+
modelInfo.cacheReadsPrice = 0.3
257+
modelInfo.maxTokens = 8192
258+
break
259+
case rawModel.id.startsWith("anthropic/claude-3.5-sonnet"):
260+
modelInfo.supportsComputerUse = true
261+
modelInfo.supportsPromptCache = true
262+
modelInfo.cacheWritesPrice = 3.75
263+
modelInfo.cacheReadsPrice = 0.3
264+
modelInfo.maxTokens = 8192
265+
break
266+
case rawModel.id.startsWith("anthropic/claude-3-5-haiku"):
267+
modelInfo.supportsPromptCache = true
268+
modelInfo.cacheWritesPrice = 1.25
269+
modelInfo.cacheReadsPrice = 0.1
270+
modelInfo.maxTokens = 8192
271+
break
272+
case rawModel.id.startsWith("anthropic/claude-3-opus"):
273+
modelInfo.supportsPromptCache = true
274+
modelInfo.cacheWritesPrice = 18.75
275+
modelInfo.cacheReadsPrice = 1.5
276+
modelInfo.maxTokens = 8192
277+
break
278+
case rawModel.id.startsWith("anthropic/claude-3-haiku"):
279+
default:
280+
modelInfo.supportsPromptCache = true
281+
modelInfo.cacheWritesPrice = 0.3
282+
modelInfo.cacheReadsPrice = 0.03
283+
modelInfo.maxTokens = 8192
284+
break
285+
}
286+
287+
models[rawModel.id] = modelInfo
288+
}
289+
} catch (error) {
290+
console.error(
291+
`Error fetching OpenRouter models: ${JSON.stringify(error, Object.getOwnPropertyNames(error), 2)}`,
292+
)
293+
}
294+
295+
return models
296+
}

src/api/providers/requesty.ts

Lines changed: 53 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
1-
import { OpenAiHandler, OpenAiHandlerOptions } from "./openai"
1+
import axios from "axios"
2+
23
import { ModelInfo, requestyModelInfoSaneDefaults, requestyDefaultModelId } from "../../shared/api"
3-
import { ApiStream, ApiStreamUsageChunk } from "../transform/stream"
4+
import { parseApiPrice } from "../../utils/cost"
5+
import { ApiStreamUsageChunk } from "../transform/stream"
6+
import { OpenAiHandler, OpenAiHandlerOptions } from "./openai"
47

58
export class RequestyHandler extends OpenAiHandler {
69
constructor(options: OpenAiHandlerOptions) {
@@ -38,3 +41,51 @@ export class RequestyHandler extends OpenAiHandler {
3841
}
3942
}
4043
}
44+
45+
export async function getRequestyModels({ apiKey }: { apiKey?: string }) {
46+
const models: Record<string, ModelInfo> = {}
47+
48+
if (!apiKey) {
49+
return models
50+
}
51+
52+
try {
53+
const config: Record<string, any> = {}
54+
config["headers"] = { Authorization: `Bearer ${apiKey}` }
55+
56+
const response = await axios.get("https://router.requesty.ai/v1/models", config)
57+
const rawModels = response.data.data
58+
59+
for (const rawModel of rawModels) {
60+
const modelInfo: ModelInfo = {
61+
maxTokens: rawModel.max_output_tokens,
62+
contextWindow: rawModel.context_window,
63+
supportsImages: rawModel.support_image,
64+
supportsComputerUse: rawModel.support_computer_use,
65+
supportsPromptCache: rawModel.supports_caching,
66+
inputPrice: parseApiPrice(rawModel.input_price),
67+
outputPrice: parseApiPrice(rawModel.output_price),
68+
description: rawModel.description,
69+
cacheWritesPrice: parseApiPrice(rawModel.caching_price),
70+
cacheReadsPrice: parseApiPrice(rawModel.cached_price),
71+
}
72+
73+
switch (rawModel.id) {
74+
case rawModel.id.startsWith("anthropic/claude-3-7-sonnet"):
75+
modelInfo.maxTokens = 16384
76+
break
77+
case rawModel.id.startsWith("anthropic/"):
78+
modelInfo.maxTokens = 8192
79+
break
80+
default:
81+
break
82+
}
83+
84+
models[rawModel.id] = modelInfo
85+
}
86+
} catch (error) {
87+
console.error(`Error fetching Requesty models: ${JSON.stringify(error, Object.getOwnPropertyNames(error), 2)}`)
88+
}
89+
90+
return models
91+
}

src/api/providers/unbound.ts

Lines changed: 46 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,11 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
2+
import axios from "axios"
23
import OpenAI from "openai"
3-
import { ApiHandler, SingleCompletionHandler } from "../"
4+
45
import { ApiHandlerOptions, ModelInfo, unboundDefaultModelId, unboundDefaultModelInfo } from "../../shared/api"
56
import { convertToOpenAiMessages } from "../transform/openai-format"
67
import { ApiStream, ApiStreamUsageChunk } from "../transform/stream"
8+
import { ApiHandler, SingleCompletionHandler } from "../"
79

810
interface UnboundUsage extends OpenAI.CompletionUsage {
911
cache_creation_input_tokens?: number
@@ -163,3 +165,46 @@ export class UnboundHandler implements ApiHandler, SingleCompletionHandler {
163165
}
164166
}
165167
}
168+
169+
export async function getUnboundModels() {
170+
const models: Record<string, ModelInfo> = {}
171+
172+
try {
173+
const response = await axios.get("https://api.getunbound.ai/models")
174+
175+
if (response.data) {
176+
const rawModels: Record<string, any> = response.data
177+
178+
for (const [modelId, model] of Object.entries(rawModels)) {
179+
const modelInfo: ModelInfo = {
180+
maxTokens: model?.maxTokens ? parseInt(model.maxTokens) : undefined,
181+
contextWindow: model?.contextWindow ? parseInt(model.contextWindow) : 0,
182+
supportsImages: model?.supportsImages ?? false,
183+
supportsPromptCache: model?.supportsPromptCaching ?? false,
184+
supportsComputerUse: model?.supportsComputerUse ?? false,
185+
inputPrice: model?.inputTokenPrice ? parseFloat(model.inputTokenPrice) : undefined,
186+
outputPrice: model?.outputTokenPrice ? parseFloat(model.outputTokenPrice) : undefined,
187+
cacheWritesPrice: model?.cacheWritePrice ? parseFloat(model.cacheWritePrice) : undefined,
188+
cacheReadsPrice: model?.cacheReadPrice ? parseFloat(model.cacheReadPrice) : undefined,
189+
}
190+
191+
switch (true) {
192+
case modelId.startsWith("anthropic/claude-3-7-sonnet"):
193+
modelInfo.maxTokens = 16384
194+
break
195+
case modelId.startsWith("anthropic/"):
196+
modelInfo.maxTokens = 8192
197+
break
198+
default:
199+
break
200+
}
201+
202+
models[modelId] = modelInfo
203+
}
204+
}
205+
} catch (error) {
206+
console.error(`Error fetching Unbound models: ${JSON.stringify(error, Object.getOwnPropertyNames(error), 2)}`)
207+
}
208+
209+
return models
210+
}

0 commit comments

Comments
 (0)