Skip to content

Commit b5acebc

Browse files
mrubensroomote
andauthored
Add native tool support for vercel ai gateway (#9697)
Co-authored-by: Roo Code <[email protected]>
1 parent eb12054 commit b5acebc

File tree

4 files changed

+232
-0
lines changed

4 files changed

+232
-0
lines changed

src/api/providers/__tests__/vercel-ai-gateway.spec.ts

Lines changed: 211 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -274,6 +274,217 @@ describe("VercelAiGatewayHandler", () => {
274274
totalCost: 0.005,
275275
})
276276
})
277+
278+
describe("native tool calling", () => {
279+
const testTools = [
280+
{
281+
type: "function" as const,
282+
function: {
283+
name: "test_tool",
284+
description: "A test tool",
285+
parameters: {
286+
type: "object",
287+
properties: {
288+
arg1: { type: "string" },
289+
},
290+
required: ["arg1"],
291+
},
292+
},
293+
},
294+
]
295+
296+
beforeEach(() => {
297+
mockCreate.mockImplementation(async () => ({
298+
[Symbol.asyncIterator]: async function* () {
299+
yield {
300+
choices: [
301+
{
302+
delta: {},
303+
index: 0,
304+
},
305+
],
306+
}
307+
},
308+
}))
309+
})
310+
311+
it("should include tools when provided", async () => {
312+
const handler = new VercelAiGatewayHandler(mockOptions)
313+
314+
const messageGenerator = handler.createMessage("test prompt", [], {
315+
taskId: "test-task-id",
316+
tools: testTools,
317+
toolProtocol: "native",
318+
})
319+
await messageGenerator.next()
320+
321+
expect(mockCreate).toHaveBeenCalledWith(
322+
expect.objectContaining({
323+
tools: expect.arrayContaining([
324+
expect.objectContaining({
325+
type: "function",
326+
function: expect.objectContaining({
327+
name: "test_tool",
328+
}),
329+
}),
330+
]),
331+
}),
332+
)
333+
})
334+
335+
it("should include tool_choice when provided", async () => {
336+
const handler = new VercelAiGatewayHandler(mockOptions)
337+
338+
const messageGenerator = handler.createMessage("test prompt", [], {
339+
taskId: "test-task-id",
340+
tools: testTools,
341+
toolProtocol: "native",
342+
tool_choice: "auto",
343+
})
344+
await messageGenerator.next()
345+
346+
expect(mockCreate).toHaveBeenCalledWith(
347+
expect.objectContaining({
348+
tool_choice: "auto",
349+
}),
350+
)
351+
})
352+
353+
it("should set parallel_tool_calls when toolProtocol is native", async () => {
354+
const handler = new VercelAiGatewayHandler(mockOptions)
355+
356+
const messageGenerator = handler.createMessage("test prompt", [], {
357+
taskId: "test-task-id",
358+
tools: testTools,
359+
toolProtocol: "native",
360+
parallelToolCalls: true,
361+
})
362+
await messageGenerator.next()
363+
364+
expect(mockCreate).toHaveBeenCalledWith(
365+
expect.objectContaining({
366+
parallel_tool_calls: true,
367+
}),
368+
)
369+
})
370+
371+
it("should default parallel_tool_calls to false", async () => {
372+
const handler = new VercelAiGatewayHandler(mockOptions)
373+
374+
const messageGenerator = handler.createMessage("test prompt", [], {
375+
taskId: "test-task-id",
376+
tools: testTools,
377+
toolProtocol: "native",
378+
})
379+
await messageGenerator.next()
380+
381+
expect(mockCreate).toHaveBeenCalledWith(
382+
expect.objectContaining({
383+
parallel_tool_calls: false,
384+
}),
385+
)
386+
})
387+
388+
it("should yield tool_call_partial chunks when streaming tool calls", async () => {
389+
mockCreate.mockImplementation(async () => ({
390+
[Symbol.asyncIterator]: async function* () {
391+
yield {
392+
choices: [
393+
{
394+
delta: {
395+
tool_calls: [
396+
{
397+
index: 0,
398+
id: "call_123",
399+
function: {
400+
name: "test_tool",
401+
arguments: '{"arg1":',
402+
},
403+
},
404+
],
405+
},
406+
index: 0,
407+
},
408+
],
409+
}
410+
yield {
411+
choices: [
412+
{
413+
delta: {
414+
tool_calls: [
415+
{
416+
index: 0,
417+
function: {
418+
arguments: '"value"}',
419+
},
420+
},
421+
],
422+
},
423+
index: 0,
424+
},
425+
],
426+
}
427+
yield {
428+
choices: [
429+
{
430+
delta: {},
431+
index: 0,
432+
},
433+
],
434+
usage: {
435+
prompt_tokens: 10,
436+
completion_tokens: 5,
437+
},
438+
}
439+
},
440+
}))
441+
442+
const handler = new VercelAiGatewayHandler(mockOptions)
443+
444+
const stream = handler.createMessage("test prompt", [], {
445+
taskId: "test-task-id",
446+
tools: testTools,
447+
toolProtocol: "native",
448+
})
449+
450+
const chunks = []
451+
for await (const chunk of stream) {
452+
chunks.push(chunk)
453+
}
454+
455+
const toolCallChunks = chunks.filter((chunk) => chunk.type === "tool_call_partial")
456+
expect(toolCallChunks).toHaveLength(2)
457+
expect(toolCallChunks[0]).toEqual({
458+
type: "tool_call_partial",
459+
index: 0,
460+
id: "call_123",
461+
name: "test_tool",
462+
arguments: '{"arg1":',
463+
})
464+
expect(toolCallChunks[1]).toEqual({
465+
type: "tool_call_partial",
466+
index: 0,
467+
id: undefined,
468+
name: undefined,
469+
arguments: '"value"}',
470+
})
471+
})
472+
473+
it("should include stream_options with include_usage", async () => {
474+
const handler = new VercelAiGatewayHandler(mockOptions)
475+
476+
const messageGenerator = handler.createMessage("test prompt", [], {
477+
taskId: "test-task-id",
478+
})
479+
await messageGenerator.next()
480+
481+
expect(mockCreate).toHaveBeenCalledWith(
482+
expect.objectContaining({
483+
stream_options: { include_usage: true },
484+
}),
485+
)
486+
})
487+
})
277488
})
278489

279490
describe("completePrompt", () => {

src/api/providers/fetchers/__tests__/vercel-ai-gateway.spec.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,7 @@ describe("Vercel AI Gateway Fetchers", () => {
176176
maxTokens: 8000,
177177
contextWindow: 100000,
178178
supportsImages: false,
179+
supportsNativeTools: true,
179180
supportsPromptCache: false,
180181
inputPrice: 2500000,
181182
outputPrice: 10000000,

src/api/providers/fetchers/vercel-ai-gateway.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,7 @@ export const parseVercelAiGatewayModel = ({ id, model }: { id: string; model: Ve
108108
contextWindow: model.context_window,
109109
supportsImages,
110110
supportsPromptCache,
111+
supportsNativeTools: true,
111112
inputPrice: parseApiPrice(model.pricing?.input),
112113
outputPrice: parseApiPrice(model.pricing?.output),
113114
cacheWritesPrice,

src/api/providers/vercel-ai-gateway.ts

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,12 @@ export class VercelAiGatewayHandler extends RouterProvider implements SingleComp
6060
: undefined,
6161
max_completion_tokens: info.maxTokens,
6262
stream: true,
63+
stream_options: { include_usage: true },
64+
...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }),
65+
...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }),
66+
...(metadata?.toolProtocol === "native" && {
67+
parallel_tool_calls: metadata.parallelToolCalls ?? false,
68+
}),
6369
}
6470

6571
const completion = await this.client.chat.completions.create(body)
@@ -73,6 +79,19 @@ export class VercelAiGatewayHandler extends RouterProvider implements SingleComp
7379
}
7480
}
7581

82+
// Emit raw tool call chunks - NativeToolCallParser handles state management
83+
if (delta?.tool_calls) {
84+
for (const toolCall of delta.tool_calls) {
85+
yield {
86+
type: "tool_call_partial",
87+
index: toolCall.index,
88+
id: toolCall.id,
89+
name: toolCall.function?.name,
90+
arguments: toolCall.function?.arguments,
91+
}
92+
}
93+
}
94+
7695
if (chunk.usage) {
7796
const usage = chunk.usage as VercelAiGatewayUsage
7897
yield {

0 commit comments

Comments
 (0)