Skip to content

Commit 73d1623

Browse files
authored
Added a hardcoded list of computer use models for litellm as a fallba… (#4052)
Added a hardcoded list of computer use models for litellm as a fallback for older litellm versions
1 parent 343f29b commit 73d1623

File tree

3 files changed

+247
-2
lines changed

3 files changed

+247
-2
lines changed

src/api/providers/fetchers/__tests__/litellm.test.ts

Lines changed: 199 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -248,4 +248,203 @@ describe("getLiteLLMModels", () => {
248248

249249
expect(result).toEqual({})
250250
})
251+
252+
it("uses fallback computer use detection when supports_computer_use is not available", async () => {
253+
const mockResponse = {
254+
data: {
255+
data: [
256+
{
257+
model_name: "claude-3-5-sonnet-latest",
258+
model_info: {
259+
max_tokens: 4096,
260+
max_input_tokens: 200000,
261+
supports_vision: true,
262+
supports_prompt_caching: false,
263+
// Note: no supports_computer_use field
264+
},
265+
litellm_params: {
266+
model: "anthropic/claude-3-5-sonnet-latest", // This should match the fallback list
267+
},
268+
},
269+
{
270+
model_name: "gpt-4-turbo",
271+
model_info: {
272+
max_tokens: 8192,
273+
max_input_tokens: 128000,
274+
supports_vision: false,
275+
supports_prompt_caching: false,
276+
// Note: no supports_computer_use field
277+
},
278+
litellm_params: {
279+
model: "openai/gpt-4-turbo", // This should NOT match the fallback list
280+
},
281+
},
282+
],
283+
},
284+
}
285+
286+
mockedAxios.get.mockResolvedValue(mockResponse)
287+
288+
const result = await getLiteLLMModels("test-api-key", "http://localhost:4000")
289+
290+
expect(result["claude-3-5-sonnet-latest"]).toEqual({
291+
maxTokens: 4096,
292+
contextWindow: 200000,
293+
supportsImages: true,
294+
supportsComputerUse: true, // Should be true due to fallback
295+
supportsPromptCache: false,
296+
inputPrice: undefined,
297+
outputPrice: undefined,
298+
description: "claude-3-5-sonnet-latest via LiteLLM proxy",
299+
})
300+
301+
expect(result["gpt-4-turbo"]).toEqual({
302+
maxTokens: 8192,
303+
contextWindow: 128000,
304+
supportsImages: false,
305+
supportsComputerUse: false, // Should be false as it's not in fallback list
306+
supportsPromptCache: false,
307+
inputPrice: undefined,
308+
outputPrice: undefined,
309+
description: "gpt-4-turbo via LiteLLM proxy",
310+
})
311+
})
312+
313+
it("prioritizes explicit supports_computer_use over fallback detection", async () => {
314+
const mockResponse = {
315+
data: {
316+
data: [
317+
{
318+
model_name: "claude-3-5-sonnet-latest",
319+
model_info: {
320+
max_tokens: 4096,
321+
max_input_tokens: 200000,
322+
supports_vision: true,
323+
supports_prompt_caching: false,
324+
supports_computer_use: false, // Explicitly set to false
325+
},
326+
litellm_params: {
327+
model: "anthropic/claude-3-5-sonnet-latest", // This matches fallback list but should be ignored
328+
},
329+
},
330+
{
331+
model_name: "custom-model",
332+
model_info: {
333+
max_tokens: 8192,
334+
max_input_tokens: 128000,
335+
supports_vision: false,
336+
supports_prompt_caching: false,
337+
supports_computer_use: true, // Explicitly set to true
338+
},
339+
litellm_params: {
340+
model: "custom/custom-model", // This would NOT match fallback list
341+
},
342+
},
343+
{
344+
model_name: "another-custom-model",
345+
model_info: {
346+
max_tokens: 8192,
347+
max_input_tokens: 128000,
348+
supports_vision: false,
349+
supports_prompt_caching: false,
350+
supports_computer_use: false, // Explicitly set to false
351+
},
352+
litellm_params: {
353+
model: "custom/another-custom-model", // This would NOT match fallback list
354+
},
355+
},
356+
],
357+
},
358+
}
359+
360+
mockedAxios.get.mockResolvedValue(mockResponse)
361+
362+
const result = await getLiteLLMModels("test-api-key", "http://localhost:4000")
363+
364+
expect(result["claude-3-5-sonnet-latest"]).toEqual({
365+
maxTokens: 4096,
366+
contextWindow: 200000,
367+
supportsImages: true,
368+
supportsComputerUse: false, // False because explicitly set to false (fallback ignored)
369+
supportsPromptCache: false,
370+
inputPrice: undefined,
371+
outputPrice: undefined,
372+
description: "claude-3-5-sonnet-latest via LiteLLM proxy",
373+
})
374+
375+
expect(result["custom-model"]).toEqual({
376+
maxTokens: 8192,
377+
contextWindow: 128000,
378+
supportsImages: false,
379+
supportsComputerUse: true, // True because explicitly set to true
380+
supportsPromptCache: false,
381+
inputPrice: undefined,
382+
outputPrice: undefined,
383+
description: "custom-model via LiteLLM proxy",
384+
})
385+
386+
expect(result["another-custom-model"]).toEqual({
387+
maxTokens: 8192,
388+
contextWindow: 128000,
389+
supportsImages: false,
390+
supportsComputerUse: false, // False because explicitly set to false
391+
supportsPromptCache: false,
392+
inputPrice: undefined,
393+
outputPrice: undefined,
394+
description: "another-custom-model via LiteLLM proxy",
395+
})
396+
})
397+
398+
it("handles fallback detection with various model name formats", async () => {
399+
const mockResponse = {
400+
data: {
401+
data: [
402+
{
403+
model_name: "vertex-claude",
404+
model_info: {
405+
max_tokens: 4096,
406+
max_input_tokens: 200000,
407+
supports_vision: true,
408+
supports_prompt_caching: false,
409+
},
410+
litellm_params: {
411+
model: "vertex_ai/claude-3-5-sonnet", // Should match fallback list
412+
},
413+
},
414+
{
415+
model_name: "openrouter-claude",
416+
model_info: {
417+
max_tokens: 4096,
418+
max_input_tokens: 200000,
419+
supports_vision: true,
420+
supports_prompt_caching: false,
421+
},
422+
litellm_params: {
423+
model: "openrouter/anthropic/claude-3.5-sonnet", // Should match fallback list
424+
},
425+
},
426+
{
427+
model_name: "bedrock-claude",
428+
model_info: {
429+
max_tokens: 4096,
430+
max_input_tokens: 200000,
431+
supports_vision: true,
432+
supports_prompt_caching: false,
433+
},
434+
litellm_params: {
435+
model: "anthropic.claude-3-5-sonnet-20241022-v2:0", // Should match fallback list
436+
},
437+
},
438+
],
439+
},
440+
}
441+
442+
mockedAxios.get.mockResolvedValue(mockResponse)
443+
444+
const result = await getLiteLLMModels("test-api-key", "http://localhost:4000")
445+
446+
expect(result["vertex-claude"].supportsComputerUse).toBe(true)
447+
expect(result["openrouter-claude"].supportsComputerUse).toBe(true)
448+
expect(result["bedrock-claude"].supportsComputerUse).toBe(true)
449+
})
251450
})

src/api/providers/fetchers/litellm.ts

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import axios from "axios"
22

3-
import { ModelRecord } from "../../../shared/api"
3+
import { LITELLM_COMPUTER_USE_MODELS, ModelRecord } from "../../../shared/api"
44

55
/**
66
* Fetches available models from a LiteLLM server
@@ -23,6 +23,8 @@ export async function getLiteLLMModels(apiKey: string, baseUrl: string): Promise
2323
const response = await axios.get(`${baseUrl}/v1/model/info`, { headers, timeout: 5000 })
2424
const models: ModelRecord = {}
2525

26+
const computerModels = Array.from(LITELLM_COMPUTER_USE_MODELS)
27+
2628
// Process the model info from the response
2729
if (response.data && response.data.data && Array.isArray(response.data.data)) {
2830
for (const model of response.data.data) {
@@ -32,12 +34,23 @@ export async function getLiteLLMModels(apiKey: string, baseUrl: string): Promise
3234

3335
if (!modelName || !modelInfo || !litellmModelName) continue
3436

37+
// Use explicit supports_computer_use if available, otherwise fall back to hardcoded list
38+
let supportsComputerUse: boolean
39+
if (modelInfo.supports_computer_use !== undefined) {
40+
supportsComputerUse = Boolean(modelInfo.supports_computer_use)
41+
} else {
42+
// Fallback for older LiteLLM versions that don't have supports_computer_use field
43+
supportsComputerUse = computerModels.some((computer_model) =>
44+
litellmModelName.endsWith(computer_model),
45+
)
46+
}
47+
3548
models[modelName] = {
3649
maxTokens: modelInfo.max_tokens || 8192,
3750
contextWindow: modelInfo.max_input_tokens || 200000,
3851
supportsImages: Boolean(modelInfo.supports_vision),
3952
// litellm_params.model may have a prefix like openrouter/
40-
supportsComputerUse: Boolean(modelInfo.supports_computer_use),
53+
supportsComputerUse,
4154
supportsPromptCache: Boolean(modelInfo.supports_prompt_caching),
4255
inputPrice: modelInfo.input_cost_per_token ? modelInfo.input_cost_per_token * 1000000 : undefined,
4356
outputPrice: modelInfo.output_cost_per_token

src/shared/api.ts

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1260,6 +1260,39 @@ export const litellmDefaultModelInfo: ModelInfo = {
12601260
cacheWritesPrice: 3.75,
12611261
cacheReadsPrice: 0.3,
12621262
}
1263+
1264+
export const LITELLM_COMPUTER_USE_MODELS = new Set([
1265+
"claude-3-5-sonnet-latest",
1266+
"claude-opus-4-20250514",
1267+
"claude-sonnet-4-20250514",
1268+
"claude-3-7-sonnet-latest",
1269+
"claude-3-7-sonnet-20250219",
1270+
"claude-3-5-sonnet-20241022",
1271+
"vertex_ai/claude-3-5-sonnet",
1272+
"vertex_ai/claude-3-5-sonnet-v2",
1273+
"vertex_ai/claude-3-5-sonnet-v2@20241022",
1274+
"vertex_ai/claude-3-7-sonnet@20250219",
1275+
"vertex_ai/claude-opus-4@20250514",
1276+
"vertex_ai/claude-sonnet-4@20250514",
1277+
"openrouter/anthropic/claude-3.5-sonnet",
1278+
"openrouter/anthropic/claude-3.5-sonnet:beta",
1279+
"openrouter/anthropic/claude-3.7-sonnet",
1280+
"openrouter/anthropic/claude-3.7-sonnet:beta",
1281+
"anthropic.claude-opus-4-20250514-v1:0",
1282+
"anthropic.claude-sonnet-4-20250514-v1:0",
1283+
"anthropic.claude-3-7-sonnet-20250219-v1:0",
1284+
"anthropic.claude-3-5-sonnet-20241022-v2:0",
1285+
"us.anthropic.claude-3-5-sonnet-20241022-v2:0",
1286+
"us.anthropic.claude-3-7-sonnet-20250219-v1:0",
1287+
"us.anthropic.claude-opus-4-20250514-v1:0",
1288+
"us.anthropic.claude-sonnet-4-20250514-v1:0",
1289+
"eu.anthropic.claude-3-5-sonnet-20241022-v2:0",
1290+
"eu.anthropic.claude-3-7-sonnet-20250219-v1:0",
1291+
"eu.anthropic.claude-opus-4-20250514-v1:0",
1292+
"eu.anthropic.claude-sonnet-4-20250514-v1:0",
1293+
"snowflake/claude-3-5-sonnet",
1294+
])
1295+
12631296
// xAI
12641297
// https://docs.x.ai/docs/api-reference
12651298
export type XAIModelId = keyof typeof xaiModels

0 commit comments

Comments
 (0)