Skip to content

Commit ad28e12

Browse files
authored
feat: add native tool support for LiteLLM provider (#9719)
1 parent be76594 commit ad28e12

File tree

4 files changed

+61
-1
lines changed

4 files changed

+61
-1
lines changed

packages/types/src/providers/lite-llm.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ export const litellmDefaultModelInfo: ModelInfo = {
88
contextWindow: 200_000,
99
supportsImages: true,
1010
supportsPromptCache: true,
11+
supportsNativeTools: true,
1112
inputPrice: 3.0,
1213
outputPrice: 15.0,
1314
cacheWritesPrice: 3.75,

src/api/providers/fetchers/__tests__/litellm.spec.ts

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -222,17 +222,23 @@ describe("getLiteLLMModels", () => {
222222
contextWindow: 200000,
223223
supportsImages: true,
224224
supportsPromptCache: false,
225+
supportsNativeTools: false,
225226
inputPrice: 3,
226227
outputPrice: 15,
228+
cacheWritesPrice: undefined,
229+
cacheReadsPrice: undefined,
227230
description: "claude-3-5-sonnet via LiteLLM proxy",
228231
},
229232
"gpt-4-turbo": {
230233
maxTokens: 8192,
231234
contextWindow: 128000,
232235
supportsImages: false,
233236
supportsPromptCache: false,
237+
supportsNativeTools: false,
234238
inputPrice: 10,
235239
outputPrice: 30,
240+
cacheWritesPrice: undefined,
241+
cacheReadsPrice: undefined,
236242
description: "gpt-4-turbo via LiteLLM proxy",
237243
},
238244
})
@@ -299,8 +305,11 @@ describe("getLiteLLMModels", () => {
299305
contextWindow: 200000,
300306
supportsImages: true,
301307
supportsPromptCache: false,
308+
supportsNativeTools: false,
302309
inputPrice: undefined,
303310
outputPrice: undefined,
311+
cacheWritesPrice: undefined,
312+
cacheReadsPrice: undefined,
304313
description: "test-computer-model via LiteLLM proxy",
305314
})
306315

@@ -309,8 +318,11 @@ describe("getLiteLLMModels", () => {
309318
contextWindow: 200000,
310319
supportsImages: false,
311320
supportsPromptCache: false,
321+
supportsNativeTools: false,
312322
inputPrice: undefined,
313323
outputPrice: undefined,
324+
cacheWritesPrice: undefined,
325+
cacheReadsPrice: undefined,
314326
description: "test-non-computer-model via LiteLLM proxy",
315327
})
316328
})
@@ -443,8 +455,11 @@ describe("getLiteLLMModels", () => {
443455
contextWindow: 200000,
444456
supportsImages: true,
445457
supportsPromptCache: false,
458+
supportsNativeTools: false,
446459
inputPrice: undefined,
447460
outputPrice: undefined,
461+
cacheWritesPrice: undefined,
462+
cacheReadsPrice: undefined,
448463
description: "claude-3-5-sonnet-latest via LiteLLM proxy",
449464
})
450465

@@ -453,8 +468,11 @@ describe("getLiteLLMModels", () => {
453468
contextWindow: 128000,
454469
supportsImages: false,
455470
supportsPromptCache: false,
471+
supportsNativeTools: false,
456472
inputPrice: undefined,
457473
outputPrice: undefined,
474+
cacheWritesPrice: undefined,
475+
cacheReadsPrice: undefined,
458476
description: "gpt-4-turbo via LiteLLM proxy",
459477
})
460478
})
@@ -515,8 +533,11 @@ describe("getLiteLLMModels", () => {
515533
contextWindow: 200000,
516534
supportsImages: true,
517535
supportsPromptCache: false,
536+
supportsNativeTools: false,
518537
inputPrice: undefined,
519538
outputPrice: undefined,
539+
cacheWritesPrice: undefined,
540+
cacheReadsPrice: undefined,
520541
description: "claude-3-5-sonnet-latest via LiteLLM proxy",
521542
})
522543

@@ -525,8 +546,11 @@ describe("getLiteLLMModels", () => {
525546
contextWindow: 128000,
526547
supportsImages: false,
527548
supportsPromptCache: false,
549+
supportsNativeTools: false,
528550
inputPrice: undefined,
529551
outputPrice: undefined,
552+
cacheWritesPrice: undefined,
553+
cacheReadsPrice: undefined,
530554
description: "custom-model via LiteLLM proxy",
531555
})
532556

@@ -535,8 +559,11 @@ describe("getLiteLLMModels", () => {
535559
contextWindow: 128000,
536560
supportsImages: false,
537561
supportsPromptCache: false,
562+
supportsNativeTools: false,
538563
inputPrice: undefined,
539564
outputPrice: undefined,
565+
cacheWritesPrice: undefined,
566+
cacheReadsPrice: undefined,
540567
description: "another-custom-model via LiteLLM proxy",
541568
})
542569
})
@@ -646,6 +673,7 @@ describe("getLiteLLMModels", () => {
646673
contextWindow: 200000,
647674
supportsImages: true,
648675
supportsPromptCache: false,
676+
supportsNativeTools: false,
649677
inputPrice: undefined,
650678
outputPrice: undefined,
651679
cacheWritesPrice: undefined,
@@ -659,6 +687,7 @@ describe("getLiteLLMModels", () => {
659687
contextWindow: 128000,
660688
supportsImages: false,
661689
supportsPromptCache: false,
690+
supportsNativeTools: false,
662691
inputPrice: undefined,
663692
outputPrice: undefined,
664693
cacheWritesPrice: undefined,
@@ -672,6 +701,7 @@ describe("getLiteLLMModels", () => {
672701
contextWindow: 100000,
673702
supportsImages: false,
674703
supportsPromptCache: false,
704+
supportsNativeTools: false,
675705
inputPrice: undefined,
676706
outputPrice: undefined,
677707
cacheWritesPrice: undefined,

src/api/providers/fetchers/litellm.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,11 @@ export async function getLiteLLMModels(apiKey: string, baseUrl: string): Promise
4545
contextWindow: modelInfo.max_input_tokens || 200000,
4646
supportsImages: Boolean(modelInfo.supports_vision),
4747
supportsPromptCache: Boolean(modelInfo.supports_prompt_caching),
48+
supportsNativeTools: Boolean(
49+
modelInfo.supports_function_calling ||
50+
modelInfo.supports_tool_choice ||
51+
modelInfo.supports_tool_use,
52+
),
4853
inputPrice: modelInfo.input_cost_per_token ? modelInfo.input_cost_per_token * 1000000 : undefined,
4954
outputPrice: modelInfo.output_cost_per_token
5055
? modelInfo.output_cost_per_token * 1000000

src/api/providers/lite-llm.ts

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import OpenAI from "openai"
22
import { Anthropic } from "@anthropic-ai/sdk" // Keep for type usage only
33

4-
import { litellmDefaultModelId, litellmDefaultModelInfo } from "@roo-code/types"
4+
import { litellmDefaultModelId, litellmDefaultModelInfo, TOOL_PROTOCOL } from "@roo-code/types"
55

66
import { calculateApiCostOpenAI } from "../../shared/cost"
77

@@ -116,13 +116,24 @@ export class LiteLLMHandler extends RouterProvider implements SingleCompletionHa
116116
// Check if this is a GPT-5 model that requires max_completion_tokens instead of max_tokens
117117
const isGPT5Model = this.isGpt5(modelId)
118118

119+
// Check if model supports native tools and tools are provided with native protocol
120+
const supportsNativeTools = info.supportsNativeTools ?? false
121+
const useNativeTools =
122+
supportsNativeTools &&
123+
metadata?.tools &&
124+
metadata.tools.length > 0 &&
125+
metadata?.toolProtocol === TOOL_PROTOCOL.NATIVE
126+
119127
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
120128
model: modelId,
121129
messages: [systemMessage, ...enhancedMessages],
122130
stream: true,
123131
stream_options: {
124132
include_usage: true,
125133
},
134+
...(useNativeTools && { tools: this.convertToolsForOpenAI(metadata.tools) }),
135+
...(useNativeTools && metadata.tool_choice && { tool_choice: metadata.tool_choice }),
136+
...(useNativeTools && { parallel_tool_calls: metadata?.parallelToolCalls ?? false }),
126137
}
127138

128139
// GPT-5 models require max_completion_tokens instead of the deprecated max_tokens parameter
@@ -149,6 +160,19 @@ export class LiteLLMHandler extends RouterProvider implements SingleCompletionHa
149160
yield { type: "text", text: delta.content }
150161
}
151162

163+
// Handle tool calls in stream - emit partial chunks for NativeToolCallParser
164+
if (delta?.tool_calls) {
165+
for (const toolCall of delta.tool_calls) {
166+
yield {
167+
type: "tool_call_partial",
168+
index: toolCall.index,
169+
id: toolCall.id,
170+
name: toolCall.function?.name,
171+
arguments: toolCall.function?.arguments,
172+
}
173+
}
174+
}
175+
152176
if (usage) {
153177
lastUsage = usage
154178
}

0 commit comments

Comments
 (0)