Skip to content

Commit 9d4b4eb

Browse files
authored
Added support for dynamic litellm supports_computer_use (#4027)
1 parent 82f9e9e commit 9d4b4eb

File tree

2 files changed

+29
-9
lines changed

2 files changed

+29
-9
lines changed

src/api/providers/fetchers/__tests__/litellm.test.ts

Lines changed: 27 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
import axios from "axios"
22
import { getLiteLLMModels } from "../litellm"
3-
import { OPEN_ROUTER_COMPUTER_USE_MODELS } from "../../../../shared/api"
43

54
// Mock axios
65
jest.mock("axios")
@@ -26,6 +25,7 @@ describe("getLiteLLMModels", () => {
2625
supports_prompt_caching: false,
2726
input_cost_per_token: 0.000003,
2827
output_cost_per_token: 0.000015,
28+
supports_computer_use: true,
2929
},
3030
litellm_params: {
3131
model: "anthropic/claude-3.5-sonnet",
@@ -40,6 +40,7 @@ describe("getLiteLLMModels", () => {
4040
supports_prompt_caching: false,
4141
input_cost_per_token: 0.00001,
4242
output_cost_per_token: 0.00003,
43+
supports_computer_use: false,
4344
},
4445
litellm_params: {
4546
model: "openai/gpt-4-turbo",
@@ -105,7 +106,6 @@ describe("getLiteLLMModels", () => {
105106
})
106107

107108
it("handles computer use models correctly", async () => {
108-
const computerUseModel = Array.from(OPEN_ROUTER_COMPUTER_USE_MODELS)[0]
109109
const mockResponse = {
110110
data: {
111111
data: [
@@ -115,9 +115,22 @@ describe("getLiteLLMModels", () => {
115115
max_tokens: 4096,
116116
max_input_tokens: 200000,
117117
supports_vision: true,
118+
supports_computer_use: true,
118119
},
119120
litellm_params: {
120-
model: `anthropic/${computerUseModel}`,
121+
model: `anthropic/test-computer-model`,
122+
},
123+
},
124+
{
125+
model_name: "test-non-computer-model",
126+
model_info: {
127+
max_tokens: 4096,
128+
max_input_tokens: 200000,
129+
supports_vision: false,
130+
supports_computer_use: false,
131+
},
132+
litellm_params: {
133+
model: `anthropic/test-non-computer-model`,
121134
},
122135
},
123136
],
@@ -138,6 +151,17 @@ describe("getLiteLLMModels", () => {
138151
outputPrice: undefined,
139152
description: "test-computer-model via LiteLLM proxy",
140153
})
154+
155+
expect(result["test-non-computer-model"]).toEqual({
156+
maxTokens: 4096,
157+
contextWindow: 200000,
158+
supportsImages: false,
159+
supportsComputerUse: false,
160+
supportsPromptCache: false,
161+
inputPrice: undefined,
162+
outputPrice: undefined,
163+
description: "test-non-computer-model via LiteLLM proxy",
164+
})
141165
})
142166

143167
it("throws error for unexpected response format", async () => {

src/api/providers/fetchers/litellm.ts

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import axios from "axios"
22

3-
import { OPEN_ROUTER_COMPUTER_USE_MODELS, ModelRecord } from "../../../shared/api"
3+
import { ModelRecord } from "../../../shared/api"
44

55
/**
66
* Fetches available models from a LiteLLM server
@@ -23,8 +23,6 @@ export async function getLiteLLMModels(apiKey: string, baseUrl: string): Promise
2323
const response = await axios.get(`${baseUrl}/v1/model/info`, { headers, timeout: 5000 })
2424
const models: ModelRecord = {}
2525

26-
const computerModels = Array.from(OPEN_ROUTER_COMPUTER_USE_MODELS)
27-
2826
// Process the model info from the response
2927
if (response.data && response.data.data && Array.isArray(response.data.data)) {
3028
for (const model of response.data.data) {
@@ -39,9 +37,7 @@ export async function getLiteLLMModels(apiKey: string, baseUrl: string): Promise
3937
contextWindow: modelInfo.max_input_tokens || 200000,
4038
supportsImages: Boolean(modelInfo.supports_vision),
4139
// litellm_params.model may have a prefix like openrouter/
42-
supportsComputerUse: computerModels.some((computer_model) =>
43-
litellmModelName.endsWith(computer_model),
44-
),
40+
supportsComputerUse: Boolean(modelInfo.supports_computer_use),
4541
supportsPromptCache: Boolean(modelInfo.supports_prompt_caching),
4642
inputPrice: modelInfo.input_cost_per_token ? modelInfo.input_cost_per_token * 1000000 : undefined,
4743
outputPrice: modelInfo.output_cost_per_token

0 commit comments

Comments
 (0)