Skip to content

Commit ffd80d9

Browse files
fealbuquFelipe Albuquerque
andauthored
Patch/openai comp max tokens (RooCodeInc#2411)
* take into account max_tokens for OpenAI Compatible provider * change set * considering default -1 value * checking maxTokens is set and greater than 0 --------- Co-authored-by: Felipe Albuquerque <[email protected]>
1 parent 54bb50f commit ffd80d9

File tree

2 files changed

+13
-0
lines changed

2 files changed

+13
-0
lines changed

.changeset/happy-seals-tickle.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"claude-dev": patch
3+
---
4+
5+
Using Max Output Tokens that was set by the UI

src/api/providers/openai.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,13 @@ export class OpenAiHandler implements ApiHandler {
4343
]
4444
let temperature: number | undefined = this.options.openAiModelInfo?.temperature ?? openAiModelInfoSaneDefaults.temperature
4545
let reasoningEffort: ChatCompletionReasoningEffort | undefined = undefined
46+
let maxTokens: number | undefined
47+
48+
if (this.options.openAiModelInfo?.maxTokens && this.options.openAiModelInfo.maxTokens > 0) {
49+
maxTokens = Number(this.options.openAiModelInfo.maxTokens)
50+
} else {
51+
maxTokens = undefined
52+
}
4653

4754
if (isDeepseekReasoner || isR1FormatRequired) {
4855
openAiMessages = convertToR1Format([{ role: "user", content: systemPrompt }, ...messages])
@@ -58,6 +65,7 @@ export class OpenAiHandler implements ApiHandler {
5865
model: modelId,
5966
messages: openAiMessages,
6067
temperature,
68+
max_tokens: maxTokens,
6169
reasoning_effort: reasoningEffort,
6270
stream: true,
6371
stream_options: { include_usage: true },

0 commit comments

Comments
 (0)