Skip to content

Commit 2f41ec5

Browse files
committed
feat: add tool call functionality
- Introduced a new ToolCallSettingsControl component for enabling/disabling tool calls in the settings UI. - Updated API options to include tool call settings and integrated it into the existing settings view. - Enhanced localization files to support new tool call settings in multiple languages. - Added logic to handle tool call enabled state across different components and tools.
1 parent 0c481a3 commit 2f41ec5

File tree

73 files changed

+3202
-141
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

73 files changed

+3202
-141
lines changed

packages/types/src/provider-settings.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@ const baseProviderSettingsSchema = z.object({
9595
includeMaxTokens: z.boolean().optional(),
9696
diffEnabled: z.boolean().optional(),
9797
todoListEnabled: z.boolean().optional(),
98+
toolCallEnabled: z.boolean().optional(),
9899
fuzzyMatchThreshold: z.number().optional(),
99100
modelTemperature: z.number().nullish(),
100101
rateLimitSeconds: z.number().optional(),

src/api/index.ts

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
22

3-
import type { ProviderSettings, ModelInfo } from "@roo-code/types"
3+
import type { ProviderSettings, ModelInfo, ToolName } from "@roo-code/types"
44

55
import { ApiStream } from "./transform/stream"
66

@@ -39,6 +39,7 @@ import {
3939
FeatherlessHandler,
4040
} from "./providers"
4141
import { NativeOllamaHandler } from "./providers/native-ollama"
42+
import { ToolArgs } from "../core/prompts/tools/types"
4243

4344
export interface SingleCompletionHandler {
4445
completePrompt(prompt: string): Promise<string>
@@ -54,6 +55,14 @@ export interface ApiHandlerCreateMessageMetadata {
5455
* Used to enforce "skip once" after a condense operation.
5556
*/
5657
suppressPreviousResponseId?: boolean
58+
/**
59+
* tool call
60+
*/
61+
tools?: ToolName[]
62+
/**
63+
* tool call args
64+
*/
65+
toolArgs?: ToolArgs
5766
}
5867

5968
export interface ApiHandler {

src/api/providers/base-openai-compatible-provider.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ import { convertToOpenAiMessages } from "../transform/openai-format"
1010
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1111
import { DEFAULT_HEADERS } from "./constants"
1212
import { BaseProvider } from "./base-provider"
13+
import { ToolRegistry } from "../../core/prompts/tools/schemas/tool-registry"
1314

1415
type BaseOpenAiCompatibleProviderOptions<ModelName extends string> = ApiHandlerOptions & {
1516
providerName: string
@@ -81,6 +82,11 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
8182
stream_options: { include_usage: true },
8283
}
8384

85+
if (metadata?.tools && metadata.tools.length > 0) {
86+
params.tools = ToolRegistry.getInstance().generateFunctionCallSchemas(metadata.tools, metadata.toolArgs)
87+
params.tool_choice = "auto"
88+
}
89+
8490
// Only include temperature if explicitly set
8591
if (this.options.modelTemperature !== undefined) {
8692
params.temperature = this.options.modelTemperature

src/api/providers/base-provider.ts

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,4 +32,34 @@ export abstract class BaseProvider implements ApiHandler {
3232

3333
return countTokens(content, { useWorker: true })
3434
}
35+
36+
/**
37+
* Convert tool schemas to text format for token counting
38+
*/
39+
protected convertToolSchemasToText(toolSchemas: Anthropic.ToolUnion[]): string {
40+
if (toolSchemas.length === 0) {
41+
return ""
42+
}
43+
44+
const toolsDescription = toolSchemas
45+
.map((tool) => {
46+
// Handle different tool types by accessing properties safely
47+
const toolName = tool.name
48+
let toolText = `Tool: ${toolName}\n`
49+
50+
// Try to access description and input_schema properties
51+
if ("description" in tool) {
52+
toolText += `Description: ${tool.description}\n`
53+
}
54+
55+
if ("input_schema" in tool && tool.input_schema && typeof tool.input_schema === "object") {
56+
toolText += `Parameters:\n${JSON.stringify(tool.input_schema, null, 2)}\n`
57+
}
58+
59+
return toolText
60+
})
61+
.join("\n---\n")
62+
63+
return `Available Tools:\n${toolsDescription}`
64+
}
3565
}

src/api/providers/featherless.ts

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,9 @@
1-
import { DEEP_SEEK_DEFAULT_TEMPERATURE, type FeatherlessModelId, featherlessDefaultModelId, featherlessModels } from "@roo-code/types"
1+
import {
2+
DEEP_SEEK_DEFAULT_TEMPERATURE,
3+
type FeatherlessModelId,
4+
featherlessDefaultModelId,
5+
featherlessModels,
6+
} from "@roo-code/types"
27
import { Anthropic } from "@anthropic-ai/sdk"
38
import OpenAI from "openai"
49

src/api/providers/lm-studio.ts

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ import { BaseProvider } from "./base-provider"
1515
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1616
import { getModels, getModelsFromCache } from "./fetchers/modelCache"
1717
import { getApiRequestTimeout } from "./utils/timeout-config"
18+
import { getToolRegistry } from "../../core/prompts/tools/schemas/tool-registry"
1819

1920
export class LmStudioHandler extends BaseProvider implements SingleCompletionHandler {
2021
protected options: ApiHandlerOptions
@@ -40,6 +41,8 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
4041
{ role: "system", content: systemPrompt },
4142
...convertToOpenAiMessages(messages),
4243
]
44+
const toolCallEnabled = metadata?.tools && metadata.tools.length > 0
45+
const toolRegistry = getToolRegistry()
4346

4447
// -------------------------
4548
// Track token usage
@@ -68,7 +71,17 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
6871

6972
let inputTokens = 0
7073
try {
71-
inputTokens = await this.countTokens([{ type: "text", text: systemPrompt }, ...toContentBlocks(messages)])
74+
const inputMessages: Anthropic.Messages.ContentBlockParam[] = [{ type: "text", text: systemPrompt }]
75+
if (toolCallEnabled) {
76+
const toolSchemas: Anthropic.ToolUnion[] = toolRegistry.generateAnthropicToolSchemas(
77+
metadata.tools!,
78+
metadata.toolArgs,
79+
)
80+
const toolsText = this.convertToolSchemasToText(toolSchemas)
81+
inputMessages.push({ type: "text", text: toolsText })
82+
}
83+
inputMessages.push(...toContentBlocks(messages))
84+
inputTokens = await this.countTokens(inputMessages)
7285
} catch (err) {
7386
console.error("[LmStudio] Failed to count input tokens:", err)
7487
inputTokens = 0
@@ -83,6 +96,10 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
8396
temperature: this.options.modelTemperature ?? LMSTUDIO_DEFAULT_TEMPERATURE,
8497
stream: true,
8598
}
99+
if (toolCallEnabled) {
100+
params.tools = toolRegistry.generateFunctionCallSchemas(metadata.tools!, metadata.toolArgs)
101+
params.tool_choice = "auto"
102+
}
86103

87104
if (this.options.lmStudioSpeculativeDecodingEnabled && this.options.lmStudioDraftModelId) {
88105
params.draft_model = this.options.lmStudioDraftModelId
@@ -108,6 +125,9 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
108125
yield processedChunk
109126
}
110127
}
128+
if (delta?.tool_calls) {
129+
yield { type: "tool_call", toolCalls: delta.tool_calls, toolCallType: "openai" }
130+
}
111131
}
112132

113133
for (const processedChunk of matcher.final()) {

src/api/providers/openai.ts

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ import { DEFAULT_HEADERS } from "./constants"
2424
import { BaseProvider } from "./base-provider"
2525
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
2626
import { getApiRequestTimeout } from "./utils/timeout-config"
27+
import { getToolRegistry } from "../../core/prompts/tools/schemas/tool-registry"
2728

2829
// TODO: Rename this to OpenAICompatibleHandler. Also, I think the
2930
// `OpenAINativeHandler` can subclass from this, since it's obviously
@@ -92,6 +93,9 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
9293
const deepseekReasoner = modelId.includes("deepseek-reasoner") || enabledR1Format
9394
const ark = modelUrl.includes(".volces.com")
9495

96+
const toolCallEnabled = metadata?.tools && metadata.tools.length > 0
97+
const toolRegistry = getToolRegistry()
98+
9599
if (modelId.includes("o1") || modelId.includes("o3") || modelId.includes("o4")) {
96100
yield* this.handleO3FamilyMessage(modelId, systemPrompt, messages)
97101
return
@@ -162,6 +166,10 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
162166
...(isGrokXAI ? {} : { stream_options: { include_usage: true } }),
163167
...(reasoning && reasoning),
164168
}
169+
if (toolCallEnabled) {
170+
requestOptions.tools = toolRegistry.generateFunctionCallSchemas(metadata.tools!, metadata.toolArgs)
171+
requestOptions.tool_choice = "auto"
172+
}
165173

166174
// Only include temperature if explicitly set
167175
if (this.options.modelTemperature !== undefined) {
@@ -205,6 +213,9 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
205213
text: (delta.reasoning_content as string | undefined) || "",
206214
}
207215
}
216+
if (delta?.tool_calls) {
217+
yield { type: "tool_call", toolCalls: delta.tool_calls, toolCallType: "openai" }
218+
}
208219
if (chunk.usage) {
209220
lastUsage = chunk.usage
210221
}

src/api/providers/openrouter.ts

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,8 @@ import { getModelEndpoints } from "./fetchers/modelEndpointCache"
2424

2525
import { DEFAULT_HEADERS } from "./constants"
2626
import { BaseProvider } from "./base-provider"
27-
import type { SingleCompletionHandler } from "../index"
27+
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
28+
import { getToolRegistry } from "../../core/prompts/tools/schemas/tool-registry"
2829

2930
// Add custom interface for OpenRouter params.
3031
type OpenRouterChatCompletionParams = OpenAI.Chat.ChatCompletionCreateParams & {
@@ -72,10 +73,13 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
7273
override async *createMessage(
7374
systemPrompt: string,
7475
messages: Anthropic.Messages.MessageParam[],
76+
metadata?: ApiHandlerCreateMessageMetadata,
7577
): AsyncGenerator<ApiStreamChunk> {
7678
const model = await this.fetchModel()
7779

7880
let { id: modelId, maxTokens, temperature, topP, reasoning } = model
81+
const toolCallEnabled = metadata?.tools && metadata.tools.length > 0
82+
const toolRegistry = getToolRegistry()
7983

8084
// OpenRouter sends reasoning tokens by default for Gemini 2.5 Pro
8185
// Preview even if you don't request them. This is not the default for
@@ -133,6 +137,10 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
133137
...(transforms && { transforms }),
134138
...(reasoning && { reasoning }),
135139
}
140+
if (toolCallEnabled) {
141+
completionParams.tools = toolRegistry.generateFunctionCallSchemas(metadata.tools!, metadata.toolArgs!)
142+
completionParams.tool_choice = "auto"
143+
}
136144

137145
const stream = await this.client.chat.completions.create(completionParams)
138146

@@ -156,6 +164,10 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
156164
yield { type: "text", text: delta.content }
157165
}
158166

167+
if (delta?.tool_calls) {
168+
yield { type: "tool_call", toolCalls: delta.tool_calls, toolCallType: "openai" }
169+
}
170+
159171
if (chunk.usage) {
160172
lastUsage = chunk.usage
161173
}

src/api/providers/roo.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,10 @@ export class RooHandler extends BaseOpenAiCompatibleProvider<RooModelId> {
5454
}
5555
}
5656

57+
if (delta?.tool_calls) {
58+
yield { type: "tool_call", toolCalls: delta.tool_calls, toolCallType: "openai" }
59+
}
60+
5761
if ("reasoning_content" in delta && typeof delta.reasoning_content === "string") {
5862
yield {
5963
type: "reasoning",

src/api/transform/stream.ts

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,13 @@
1+
import { ToolCallProviderType } from "../../shared/tools"
2+
13
export type ApiStream = AsyncGenerator<ApiStreamChunk>
24

3-
export type ApiStreamChunk = ApiStreamTextChunk | ApiStreamUsageChunk | ApiStreamReasoningChunk | ApiStreamError
5+
export type ApiStreamChunk =
6+
| ApiStreamTextChunk
7+
| ApiStreamUsageChunk
8+
| ApiStreamReasoningChunk
9+
| ApiStreamError
10+
| ApiStreamToolCallChunk
411

512
export interface ApiStreamError {
613
type: "error"
@@ -27,3 +34,9 @@ export interface ApiStreamUsageChunk {
2734
reasoningTokens?: number
2835
totalCost?: number
2936
}
37+
38+
export interface ApiStreamToolCallChunk {
39+
type: "tool_call"
40+
toolCalls: any
41+
toolCallType: ToolCallProviderType
42+
}

0 commit comments

Comments
 (0)