Skip to content

Commit 4ea7562

Browse files
authored
Add thinking to Requesty provider (#4041)
1 parent 9d4b4eb commit 4ea7562

File tree

3 files changed

+32
-1
lines changed

3 files changed

+32
-1
lines changed

.changeset/new-shoes-flow.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"roo-cline": patch
3+
---
4+
5+
Add thinking controls for Requesty

src/api/providers/fetchers/requesty.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,17 @@ export async function getRequestyModels(apiKey?: string): Promise<Record<string,
1919
const rawModels = response.data.data
2020

2121
for (const rawModel of rawModels) {
22+
const reasoningBudget = rawModel.supports_reasoning && rawModel.id.includes("claude")
23+
const reasoningEffort = rawModel.supports_reasoning && rawModel.id.includes("openai")
24+
2225
const modelInfo: ModelInfo = {
2326
maxTokens: rawModel.max_output_tokens,
2427
contextWindow: rawModel.context_window,
2528
supportsPromptCache: rawModel.supports_caching,
2629
supportsImages: rawModel.supports_vision,
2730
supportsComputerUse: rawModel.supports_computer_use,
31+
supportsReasoningBudget: reasoningBudget,
32+
supportsReasoningEffort: reasoningEffort,
2833
inputPrice: parseApiPrice(rawModel.input_price),
2934
outputPrice: parseApiPrice(rawModel.output_price),
3035
description: rawModel.description,

src/api/providers/requesty.ts

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,10 @@ type RequestyChatCompletionParams = OpenAI.Chat.ChatCompletionCreateParams & {
3131
mode?: string
3232
}
3333
}
34+
thinking?: {
35+
type: string
36+
budget_tokens?: number
37+
}
3438
}
3539

3640
export class RequestyHandler extends BaseProvider implements SingleCompletionHandler {
@@ -94,10 +98,25 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan
9498
]
9599

96100
let maxTokens = undefined
97-
if (this.options.includeMaxTokens) {
101+
if (this.options.modelMaxTokens) {
102+
maxTokens = this.options.modelMaxTokens
103+
} else if (this.options.includeMaxTokens) {
98104
maxTokens = model.info.maxTokens
99105
}
100106

107+
let reasoningEffort = undefined
108+
if (this.options.reasoningEffort) {
109+
reasoningEffort = this.options.reasoningEffort
110+
}
111+
112+
let thinking = undefined
113+
if (this.options.modelMaxThinkingTokens) {
114+
thinking = {
115+
type: "enabled",
116+
budget_tokens: this.options.modelMaxThinkingTokens,
117+
}
118+
}
119+
101120
const temperature = this.options.modelTemperature
102121

103122
const completionParams: RequestyChatCompletionParams = {
@@ -107,6 +126,8 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan
107126
temperature: temperature,
108127
stream: true,
109128
stream_options: { include_usage: true },
129+
reasoning_effort: reasoningEffort,
130+
thinking: thinking,
110131
requesty: {
111132
trace_id: metadata?.taskId,
112133
extra: {

0 commit comments

Comments
 (0)