Skip to content

Commit 8ade97f

Browse files
committed
fix: revert BaseOpenAiCompatibleProvider and hide max tokens slider for OpenAI compatible providers
- Reverted BaseOpenAiCompatibleProvider to use maxTokens directly from model info - OpenAI compatible providers have their own server-side max output configuration - Hidden the generic MaxTokensSlider for OpenAI compatible provider in the UI - This ensures OpenAI compatible providers use their own max tokens configuration
1 parent 7623d21 commit 8ade97f

File tree

2 files changed

+12
-13
lines changed

2 files changed

+12
-13
lines changed

src/api/providers/base-openai-compatible-provider.ts

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@ import OpenAI from "openai"
44
import type { ModelInfo } from "@roo-code/types"
55

66
import type { ApiHandlerOptions } from "../../shared/api"
7-
import { getModelMaxOutputTokens } from "../../shared/api"
87
import { ApiStream } from "../transform/stream"
98
import { convertToOpenAiMessages } from "../transform/openai-format"
109

@@ -68,17 +67,15 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
6867
messages: Anthropic.Messages.MessageParam[],
6968
metadata?: ApiHandlerCreateMessageMetadata,
7069
): ApiStream {
71-
const model = this.getModel()
72-
const max_tokens = getModelMaxOutputTokens({
73-
modelId: model.id,
74-
model: model.info,
75-
settings: this.options as any,
76-
})
70+
const {
71+
id: model,
72+
info: { maxTokens: max_tokens },
73+
} = this.getModel()
7774

7875
const temperature = this.options.modelTemperature ?? this.defaultTemperature
7976

8077
const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
81-
model: model.id,
78+
model,
8279
max_tokens,
8380
temperature,
8481
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],

webview-ui/src/components/settings/ApiOptions.tsx

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -575,11 +575,13 @@ const ApiOptions = ({
575575
onChange={handleInputChange("modelTemperature", noTransform)}
576576
maxValue={2}
577577
/>
578-
<MaxTokensSlider
579-
value={apiConfiguration.modelMaxTokens}
580-
onChange={(value) => setApiConfigurationField("modelMaxTokens", value)}
581-
modelInfo={selectedModelInfo}
582-
/>
578+
{selectedProvider !== "openai" && (
579+
<MaxTokensSlider
580+
value={apiConfiguration.modelMaxTokens}
581+
onChange={(value) => setApiConfigurationField("modelMaxTokens", value)}
582+
modelInfo={selectedModelInfo}
583+
/>
584+
)}
583585
<RateLimitSecondsControl
584586
value={apiConfiguration.rateLimitSeconds || 0}
585587
onChange={(value) => setApiConfigurationField("rateLimitSeconds", value)}

0 commit comments

Comments
 (0)