diff --git a/src/api/providers/__tests__/openai.spec.ts b/src/api/providers/__tests__/openai.spec.ts index 81c0b45e41..ba0913c2b2 100644 --- a/src/api/providers/__tests__/openai.spec.ts +++ b/src/api/providers/__tests__/openai.spec.ts @@ -5,6 +5,7 @@ import { OpenAiHandler } from "../openai" import { ApiHandlerOptions } from "../../../shared/api" import { Anthropic } from "@anthropic-ai/sdk" import OpenAI from "openai" +import { openAiModelInfoSaneDefaults } from "@roo-code/types" const mockCreate = vitest.fn() @@ -197,6 +198,113 @@ describe("OpenAiHandler", () => { const callArgs = mockCreate.mock.calls[0][0] expect(callArgs.reasoning_effort).toBeUndefined() }) + + it("should include max_tokens when includeMaxTokens is true", async () => { + const optionsWithMaxTokens: ApiHandlerOptions = { + ...mockOptions, + includeMaxTokens: true, + openAiCustomModelInfo: { + contextWindow: 128_000, + maxTokens: 4096, + supportsPromptCache: false, + }, + } + const handlerWithMaxTokens = new OpenAiHandler(optionsWithMaxTokens) + const stream = handlerWithMaxTokens.createMessage(systemPrompt, messages) + // Consume the stream to trigger the API call + for await (const _chunk of stream) { + } + // Assert the mockCreate was called with max_tokens + expect(mockCreate).toHaveBeenCalled() + const callArgs = mockCreate.mock.calls[0][0] + expect(callArgs.max_completion_tokens).toBe(4096) + }) + + it("should not include max_tokens when includeMaxTokens is false", async () => { + const optionsWithoutMaxTokens: ApiHandlerOptions = { + ...mockOptions, + includeMaxTokens: false, + openAiCustomModelInfo: { + contextWindow: 128_000, + maxTokens: 4096, + supportsPromptCache: false, + }, + } + const handlerWithoutMaxTokens = new OpenAiHandler(optionsWithoutMaxTokens) + const stream = handlerWithoutMaxTokens.createMessage(systemPrompt, messages) + // Consume the stream to trigger the API call + for await (const _chunk of stream) { + } + // Assert the mockCreate was called without max_tokens + expect(mockCreate).toHaveBeenCalled() + const callArgs = mockCreate.mock.calls[0][0] + expect(callArgs.max_completion_tokens).toBeUndefined() + }) + + it("should not include max_tokens when includeMaxTokens is undefined", async () => { + const optionsWithUndefinedMaxTokens: ApiHandlerOptions = { + ...mockOptions, + // includeMaxTokens is not set, should not include max_tokens + openAiCustomModelInfo: { + contextWindow: 128_000, + maxTokens: 4096, + supportsPromptCache: false, + }, + } + const handlerWithDefaultMaxTokens = new OpenAiHandler(optionsWithUndefinedMaxTokens) + const stream = handlerWithDefaultMaxTokens.createMessage(systemPrompt, messages) + // Consume the stream to trigger the API call + for await (const _chunk of stream) { + } + // Assert the mockCreate was called without max_tokens + expect(mockCreate).toHaveBeenCalled() + const callArgs = mockCreate.mock.calls[0][0] + expect(callArgs.max_completion_tokens).toBeUndefined() + }) + + it("should use user-configured modelMaxTokens instead of model default maxTokens", async () => { + const optionsWithUserMaxTokens: ApiHandlerOptions = { + ...mockOptions, + includeMaxTokens: true, + modelMaxTokens: 32000, // User-configured value + openAiCustomModelInfo: { + contextWindow: 128_000, + maxTokens: 4096, // Model's default value (should not be used) + supportsPromptCache: false, + }, + } + const handlerWithUserMaxTokens = new OpenAiHandler(optionsWithUserMaxTokens) + const stream = handlerWithUserMaxTokens.createMessage(systemPrompt, messages) + // Consume the stream to trigger the API call + for await (const _chunk of stream) { + } + // Assert the mockCreate was called with user-configured modelMaxTokens (32000), not model default maxTokens (4096) + expect(mockCreate).toHaveBeenCalled() + const callArgs = mockCreate.mock.calls[0][0] + expect(callArgs.max_completion_tokens).toBe(32000) + }) + + it("should fallback to model default maxTokens when user modelMaxTokens is not set", async () => { + const optionsWithoutUserMaxTokens: ApiHandlerOptions = { + ...mockOptions, + includeMaxTokens: true, + // modelMaxTokens is not set + openAiCustomModelInfo: { + contextWindow: 128_000, + maxTokens: 4096, // Model's default value (should be used as fallback) + supportsPromptCache: false, + }, + } + const handlerWithoutUserMaxTokens = new OpenAiHandler(optionsWithoutUserMaxTokens) + const stream = handlerWithoutUserMaxTokens.createMessage(systemPrompt, messages) + // Consume the stream to trigger the API call + for await (const _chunk of stream) { + } + // Assert the mockCreate was called with model default maxTokens (4096) as fallback + expect(mockCreate).toHaveBeenCalled() + const callArgs = mockCreate.mock.calls[0][0] + expect(callArgs.max_completion_tokens).toBe(4096) + }) }) describe("error handling", () => { @@ -336,6 +444,10 @@ describe("OpenAiHandler", () => { }, { path: "/models/chat/completions" }, ) + + // Verify max_tokens is NOT included when includeMaxTokens is not set + const callArgs = mockCreate.mock.calls[0][0] + expect(callArgs).not.toHaveProperty("max_completion_tokens") }) it("should handle non-streaming responses with Azure AI Inference Service", async () => { @@ -378,6 +490,10 @@ describe("OpenAiHandler", () => { }, { path: "/models/chat/completions" }, ) + + // Verify max_tokens is NOT included when includeMaxTokens is not set + const callArgs = mockCreate.mock.calls[0][0] + expect(callArgs).not.toHaveProperty("max_completion_tokens") }) it("should handle completePrompt with Azure AI Inference Service", async () => { @@ -391,6 +507,10 @@ describe("OpenAiHandler", () => { }, { path: "/models/chat/completions" }, ) + + // Verify max_tokens is NOT included when includeMaxTokens is not set + const callArgs = mockCreate.mock.calls[0][0] + expect(callArgs).not.toHaveProperty("max_completion_tokens") }) }) @@ -433,4 +553,225 @@ describe("OpenAiHandler", () => { expect(lastCall[0]).not.toHaveProperty("stream_options") }) }) + + describe("O3 Family Models", () => { + const o3Options = { + ...mockOptions, + openAiModelId: "o3-mini", + openAiCustomModelInfo: { + contextWindow: 128_000, + maxTokens: 65536, + supportsPromptCache: false, + reasoningEffort: "medium" as "low" | "medium" | "high", + }, + } + + it("should handle O3 model with streaming and include max_completion_tokens when includeMaxTokens is true", async () => { + const o3Handler = new OpenAiHandler({ + ...o3Options, + includeMaxTokens: true, + modelMaxTokens: 32000, + modelTemperature: 0.5, + }) + const systemPrompt = "You are a helpful assistant." + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: "Hello!", + }, + ] + + const stream = o3Handler.createMessage(systemPrompt, messages) + const chunks: any[] = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + model: "o3-mini", + messages: [ + { + role: "developer", + content: "Formatting re-enabled\nYou are a helpful assistant.", + }, + { role: "user", content: "Hello!" }, + ], + stream: true, + stream_options: { include_usage: true }, + reasoning_effort: "medium", + temperature: 0.5, + // O3 models do not support deprecated max_tokens but do support max_completion_tokens + max_completion_tokens: 32000, + }), + {}, + ) + }) + + it("should handle O3 model with streaming and exclude max_tokens when includeMaxTokens is false", async () => { + const o3Handler = new OpenAiHandler({ + ...o3Options, + includeMaxTokens: false, + modelTemperature: 0.7, + }) + const systemPrompt = "You are a helpful assistant." + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: "Hello!", + }, + ] + + const stream = o3Handler.createMessage(systemPrompt, messages) + const chunks: any[] = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + model: "o3-mini", + messages: [ + { + role: "developer", + content: "Formatting re-enabled\nYou are a helpful assistant.", + }, + { role: "user", content: "Hello!" }, + ], + stream: true, + stream_options: { include_usage: true }, + reasoning_effort: "medium", + temperature: 0.7, + }), + {}, + ) + + // Verify max_tokens is NOT included + const callArgs = mockCreate.mock.calls[0][0] + expect(callArgs).not.toHaveProperty("max_completion_tokens") + }) + + it("should handle O3 model non-streaming with reasoning_effort and max_completion_tokens when includeMaxTokens is true", async () => { + const o3Handler = new OpenAiHandler({ + ...o3Options, + openAiStreamingEnabled: false, + includeMaxTokens: true, + modelTemperature: 0.3, + }) + const systemPrompt = "You are a helpful assistant." + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: "Hello!", + }, + ] + + const stream = o3Handler.createMessage(systemPrompt, messages) + const chunks: any[] = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + model: "o3-mini", + messages: [ + { + role: "developer", + content: "Formatting re-enabled\nYou are a helpful assistant.", + }, + { role: "user", content: "Hello!" }, + ], + reasoning_effort: "medium", + temperature: 0.3, + // O3 models do not support deprecated max_tokens but do support max_completion_tokens + max_completion_tokens: 65536, // Using default maxTokens from o3Options + }), + {}, + ) + + // Verify stream is not set + const callArgs = mockCreate.mock.calls[0][0] + expect(callArgs).not.toHaveProperty("stream") + }) + + it("should use default temperature of 0 when not specified for O3 models", async () => { + const o3Handler = new OpenAiHandler({ + ...o3Options, + // No modelTemperature specified + }) + const systemPrompt = "You are a helpful assistant." + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: "Hello!", + }, + ] + + const stream = o3Handler.createMessage(systemPrompt, messages) + await stream.next() + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + temperature: 0, // Default temperature + }), + {}, + ) + }) + + it("should handle O3 model with Azure AI Inference Service respecting includeMaxTokens", async () => { + const o3AzureHandler = new OpenAiHandler({ + ...o3Options, + openAiBaseUrl: "https://test.services.ai.azure.com", + includeMaxTokens: false, // Should NOT include max_tokens + }) + const systemPrompt = "You are a helpful assistant." + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: "Hello!", + }, + ] + + const stream = o3AzureHandler.createMessage(systemPrompt, messages) + await stream.next() + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + model: "o3-mini", + }), + { path: "/models/chat/completions" }, + ) + + // Verify max_tokens is NOT included when includeMaxTokens is false + const callArgs = mockCreate.mock.calls[0][0] + expect(callArgs).not.toHaveProperty("max_completion_tokens") + }) + + it("should NOT include max_tokens for O3 model with Azure AI Inference Service even when includeMaxTokens is true", async () => { + const o3AzureHandler = new OpenAiHandler({ + ...o3Options, + openAiBaseUrl: "https://test.services.ai.azure.com", + includeMaxTokens: true, // Should include max_tokens + }) + const systemPrompt = "You are a helpful assistant." + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: "Hello!", + }, + ] + + const stream = o3AzureHandler.createMessage(systemPrompt, messages) + await stream.next() + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + model: "o3-mini", + // O3 models do not support max_tokens + }), + { path: "/models/chat/completions" }, + ) + }) + }) }) diff --git a/src/api/providers/openai.ts b/src/api/providers/openai.ts index 62aa4cc8a3..b4f256f43a 100644 --- a/src/api/providers/openai.ts +++ b/src/api/providers/openai.ts @@ -158,10 +158,8 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl ...(reasoning && reasoning), } - // @TODO: Move this to the `getModelParams` function. - if (this.options.includeMaxTokens) { - requestOptions.max_tokens = modelInfo.maxTokens - } + // Add max_tokens if needed + this.addMaxTokensIfNeeded(requestOptions, modelInfo) const stream = await this.client.chat.completions.create( requestOptions, @@ -222,6 +220,9 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl : [systemMessage, ...convertToOpenAiMessages(messages)], } + // Add max_tokens if needed + this.addMaxTokensIfNeeded(requestOptions, modelInfo) + const response = await this.client.chat.completions.create( requestOptions, this._isAzureAiInference(modelUrl) ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {}, @@ -256,12 +257,17 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl async completePrompt(prompt: string): Promise { try { const isAzureAiInference = this._isAzureAiInference(this.options.openAiBaseUrl) + const model = this.getModel() + const modelInfo = model.info const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { - model: this.getModel().id, + model: model.id, messages: [{ role: "user", content: prompt }], } + // Add max_tokens if needed + this.addMaxTokensIfNeeded(requestOptions, modelInfo) + const response = await this.client.chat.completions.create( requestOptions, isAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {}, @@ -282,25 +288,34 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl systemPrompt: string, messages: Anthropic.Messages.MessageParam[], ): ApiStream { - if (this.options.openAiStreamingEnabled ?? true) { - const methodIsAzureAiInference = this._isAzureAiInference(this.options.openAiBaseUrl) + const modelInfo = this.getModel().info + const methodIsAzureAiInference = this._isAzureAiInference(this.options.openAiBaseUrl) + if (this.options.openAiStreamingEnabled ?? true) { const isGrokXAI = this._isGrokXAI(this.options.openAiBaseUrl) + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { + model: modelId, + messages: [ + { + role: "developer", + content: `Formatting re-enabled\n${systemPrompt}`, + }, + ...convertToOpenAiMessages(messages), + ], + stream: true, + ...(isGrokXAI ? {} : { stream_options: { include_usage: true } }), + reasoning_effort: modelInfo.reasoningEffort, + temperature: this.options.modelTemperature ?? 0, + } + + // O3 family models do not support the deprecated max_tokens parameter + // but they do support max_completion_tokens (the modern OpenAI parameter) + // This allows O3 models to limit response length when includeMaxTokens is enabled + this.addMaxTokensIfNeeded(requestOptions, modelInfo) + const stream = await this.client.chat.completions.create( - { - model: modelId, - messages: [ - { - role: "developer", - content: `Formatting re-enabled\n${systemPrompt}`, - }, - ...convertToOpenAiMessages(messages), - ], - stream: true, - ...(isGrokXAI ? {} : { stream_options: { include_usage: true } }), - reasoning_effort: this.getModel().info.reasoningEffort, - }, + requestOptions, methodIsAzureAiInference ? { path: OPENAI_AZURE_AI_INFERENCE_PATH } : {}, ) @@ -315,9 +330,14 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl }, ...convertToOpenAiMessages(messages), ], + reasoning_effort: modelInfo.reasoningEffort, + temperature: this.options.modelTemperature ?? 0, } - const methodIsAzureAiInference = this._isAzureAiInference(this.options.openAiBaseUrl) + // O3 family models do not support the deprecated max_tokens parameter + // but they do support max_completion_tokens (the modern OpenAI parameter) + // This allows O3 models to limit response length when includeMaxTokens is enabled + this.addMaxTokensIfNeeded(requestOptions, modelInfo) const response = await this.client.chat.completions.create( requestOptions, @@ -369,6 +389,25 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl const urlHost = this._getUrlHost(baseUrl) return urlHost.endsWith(".services.ai.azure.com") } + + /** + * Adds max_completion_tokens to the request body if needed based on provider configuration + * Note: max_tokens is deprecated in favor of max_completion_tokens as per OpenAI documentation + * O3 family models handle max_tokens separately in handleO3FamilyMessage + */ + private addMaxTokensIfNeeded( + requestOptions: + | OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming + | OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming, + modelInfo: ModelInfo, + ): void { + // Only add max_completion_tokens if includeMaxTokens is true + if (this.options.includeMaxTokens === true) { + // Use user-configured modelMaxTokens if available, otherwise fall back to model's default maxTokens + // Using max_completion_tokens as max_tokens is deprecated + requestOptions.max_completion_tokens = this.options.modelMaxTokens || modelInfo.maxTokens + } + } } export async function getOpenAiModels(baseUrl?: string, apiKey?: string, openAiHeaders?: Record) { diff --git a/webview-ui/src/components/settings/providers/OpenAICompatible.tsx b/webview-ui/src/components/settings/providers/OpenAICompatible.tsx index 43fea540c3..12ddaf77a7 100644 --- a/webview-ui/src/components/settings/providers/OpenAICompatible.tsx +++ b/webview-ui/src/components/settings/providers/OpenAICompatible.tsx @@ -164,6 +164,16 @@ export const OpenAICompatible = ({ onChange={handleInputChange("openAiStreamingEnabled", noTransform)}> {t("settings:modelInfo.enableStreaming")} +
+ + {t("settings:includeMaxOutputTokens")} + +
+ {t("settings:includeMaxOutputTokensDescription")} +
+
diff --git a/webview-ui/src/components/settings/providers/__tests__/OpenAICompatible.spec.tsx b/webview-ui/src/components/settings/providers/__tests__/OpenAICompatible.spec.tsx new file mode 100644 index 0000000000..f7e26c19b2 --- /dev/null +++ b/webview-ui/src/components/settings/providers/__tests__/OpenAICompatible.spec.tsx @@ -0,0 +1,314 @@ +import React from "react" +import { render, screen, fireEvent } from "@testing-library/react" +import { OpenAICompatible } from "../OpenAICompatible" +import { ProviderSettings } from "@roo-code/types" + +// Mock the vscrui Checkbox component +jest.mock("vscrui", () => ({ + Checkbox: ({ children, checked, onChange }: any) => ( + + ), +})) + +// Mock the VSCodeTextField and VSCodeButton components +jest.mock("@vscode/webview-ui-toolkit/react", () => ({ + VSCodeTextField: ({ + children, + value, + onInput, + placeholder, + className, + style, + "data-testid": dataTestId, + ...rest + }: any) => { + return ( +
+ {children} + onInput && onInput(e)} + placeholder={placeholder} + data-testid={dataTestId} + {...rest} + /> +
+ ) + }, + VSCodeButton: ({ children, onClick, appearance, title }: any) => ( + + ), +})) + +// Mock the translation hook +jest.mock("@src/i18n/TranslationContext", () => ({ + useAppTranslation: () => ({ + t: (key: string) => key, + }), +})) + +// Mock the UI components +jest.mock("@src/components/ui", () => ({ + Button: ({ children, onClick }: any) => , +})) + +// Mock other components +jest.mock("../../ModelPicker", () => ({ + ModelPicker: () =>
Model Picker
, +})) + +jest.mock("../../R1FormatSetting", () => ({ + R1FormatSetting: () =>
R1 Format Setting
, +})) + +jest.mock("../../ThinkingBudget", () => ({ + ThinkingBudget: () =>
Thinking Budget
, +})) + +// Mock react-use +jest.mock("react-use", () => ({ + useEvent: jest.fn(), +})) + +describe("OpenAICompatible Component - includeMaxTokens checkbox", () => { + const mockSetApiConfigurationField = jest.fn() + const mockOrganizationAllowList = { + allowAll: true, + providers: {}, + } + + beforeEach(() => { + jest.clearAllMocks() + }) + + describe("Checkbox Rendering", () => { + it("should render the includeMaxTokens checkbox", () => { + const apiConfiguration: Partial = { + includeMaxTokens: true, + } + + render( + , + ) + + // Check that the checkbox is rendered + const checkbox = screen.getByTestId("checkbox-settings:includemaxoutputtokens") + expect(checkbox).toBeInTheDocument() + + // Check that the description text is rendered + expect(screen.getByText("settings:includeMaxOutputTokensDescription")).toBeInTheDocument() + }) + + it("should render the checkbox with correct translation keys", () => { + const apiConfiguration: Partial = { + includeMaxTokens: true, + } + + render( + , + ) + + // Check that the correct translation key is used for the label + expect(screen.getByText("settings:includeMaxOutputTokens")).toBeInTheDocument() + + // Check that the correct translation key is used for the description + expect(screen.getByText("settings:includeMaxOutputTokensDescription")).toBeInTheDocument() + }) + }) + + describe("Initial State", () => { + it("should show checkbox as checked when includeMaxTokens is true", () => { + const apiConfiguration: Partial = { + includeMaxTokens: true, + } + + render( + , + ) + + const checkboxInput = screen.getByTestId("checkbox-input-settings:includemaxoutputtokens") + expect(checkboxInput).toBeChecked() + }) + + it("should show checkbox as unchecked when includeMaxTokens is false", () => { + const apiConfiguration: Partial = { + includeMaxTokens: false, + } + + render( + , + ) + + const checkboxInput = screen.getByTestId("checkbox-input-settings:includemaxoutputtokens") + expect(checkboxInput).not.toBeChecked() + }) + + it("should default to checked when includeMaxTokens is undefined", () => { + const apiConfiguration: Partial = { + // includeMaxTokens is not defined + } + + render( + , + ) + + const checkboxInput = screen.getByTestId("checkbox-input-settings:includemaxoutputtokens") + expect(checkboxInput).toBeChecked() + }) + + it("should default to checked when includeMaxTokens is null", () => { + const apiConfiguration: Partial = { + includeMaxTokens: null as any, + } + + render( + , + ) + + const checkboxInput = screen.getByTestId("checkbox-input-settings:includemaxoutputtokens") + expect(checkboxInput).toBeChecked() + }) + }) + + describe("User Interaction", () => { + it("should call handleInputChange with correct parameters when checkbox is clicked from checked to unchecked", () => { + const apiConfiguration: Partial = { + includeMaxTokens: true, + } + + render( + , + ) + + const checkboxInput = screen.getByTestId("checkbox-input-settings:includemaxoutputtokens") + fireEvent.click(checkboxInput) + + // Verify setApiConfigurationField was called with correct parameters + expect(mockSetApiConfigurationField).toHaveBeenCalledWith("includeMaxTokens", false) + }) + + it("should call handleInputChange with correct parameters when checkbox is clicked from unchecked to checked", () => { + const apiConfiguration: Partial = { + includeMaxTokens: false, + } + + render( + , + ) + + const checkboxInput = screen.getByTestId("checkbox-input-settings:includemaxoutputtokens") + fireEvent.click(checkboxInput) + + // Verify setApiConfigurationField was called with correct parameters + expect(mockSetApiConfigurationField).toHaveBeenCalledWith("includeMaxTokens", true) + }) + }) + + describe("Component Updates", () => { + it("should update checkbox state when apiConfiguration changes", () => { + const apiConfigurationInitial: Partial = { + includeMaxTokens: true, + } + + const { rerender } = render( + , + ) + + // Verify initial state + let checkboxInput = screen.getByTestId("checkbox-input-settings:includemaxoutputtokens") + expect(checkboxInput).toBeChecked() + + // Update with new configuration + const apiConfigurationUpdated: Partial = { + includeMaxTokens: false, + } + + rerender( + , + ) + + // Verify updated state + checkboxInput = screen.getByTestId("checkbox-input-settings:includemaxoutputtokens") + expect(checkboxInput).not.toBeChecked() + }) + }) + + describe("UI Structure", () => { + it("should render the checkbox with description in correct structure", () => { + const apiConfiguration: Partial = { + includeMaxTokens: true, + } + + render( + , + ) + + // Check that the checkbox and description are in a div container + const checkbox = screen.getByTestId("checkbox-settings:includemaxoutputtokens") + const parentDiv = checkbox.closest("div") + expect(parentDiv).toBeInTheDocument() + + // Check that the description has the correct styling classes + const description = screen.getByText("settings:includeMaxOutputTokensDescription") + expect(description).toHaveClass("text-sm", "text-vscode-descriptionForeground", "ml-6") + }) + }) +}) diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index 205ff89e3a..c88005ea61 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -613,5 +613,7 @@ "labels": { "customArn": "ARN personalitzat", "useCustomArn": "Utilitza ARN personalitzat..." - } + }, + "includeMaxOutputTokens": "Incloure tokens màxims de sortida", + "includeMaxOutputTokensDescription": "Enviar el paràmetre de tokens màxims de sortida a les sol·licituds API. Alguns proveïdors poden no admetre això." } diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index 044c4f5220..27a7486436 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -613,5 +613,7 @@ "labels": { "customArn": "Benutzerdefinierte ARN", "useCustomArn": "Benutzerdefinierte ARN verwenden..." - } + }, + "includeMaxOutputTokens": "Maximale Ausgabe-Tokens einbeziehen", + "includeMaxOutputTokensDescription": "Sende den Parameter für maximale Ausgabe-Tokens in API-Anfragen. Einige Anbieter unterstützen dies möglicherweise nicht." } diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index b7f2a014c9..b8e51afc50 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -613,5 +613,7 @@ "labels": { "customArn": "Custom ARN", "useCustomArn": "Use custom ARN..." - } + }, + "includeMaxOutputTokens": "Include max output tokens", + "includeMaxOutputTokensDescription": "Send max output tokens parameter in API requests. Some providers may not support this." } diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index b9d0d25ec3..db8b4736eb 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -613,5 +613,7 @@ "labels": { "customArn": "ARN personalizado", "useCustomArn": "Usar ARN personalizado..." - } + }, + "includeMaxOutputTokens": "Incluir tokens máximos de salida", + "includeMaxOutputTokensDescription": "Enviar parámetro de tokens máximos de salida en solicitudes API. Algunos proveedores pueden no soportar esto." } diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index 87cc5c7a0a..0bf837accb 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -613,5 +613,7 @@ "labels": { "customArn": "ARN personnalisé", "useCustomArn": "Utiliser un ARN personnalisé..." - } + }, + "includeMaxOutputTokens": "Inclure les tokens de sortie maximum", + "includeMaxOutputTokensDescription": "Envoyer le paramètre de tokens de sortie maximum dans les requêtes API. Certains fournisseurs peuvent ne pas supporter cela." } diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index 86afe59319..fec1b27007 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -613,5 +613,7 @@ "labels": { "customArn": "कस्टम ARN", "useCustomArn": "कस्टम ARN का उपयोग करें..." - } + }, + "includeMaxOutputTokens": "अधिकतम आउटपुट टोकन शामिल करें", + "includeMaxOutputTokensDescription": "API अनुरोधों में अधिकतम आउटपुट टोकन पैरामीटर भेजें। कुछ प्रदाता इसका समर्थन नहीं कर सकते हैं।" } diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index c9fc4db506..6d6a8e93b1 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -642,5 +642,7 @@ "labels": { "customArn": "ARN Kustom", "useCustomArn": "Gunakan ARN kustom..." - } + }, + "includeMaxOutputTokens": "Sertakan token output maksimum", + "includeMaxOutputTokensDescription": "Kirim parameter token output maksimum dalam permintaan API. Beberapa provider mungkin tidak mendukung ini." } diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index 50c6528210..fcb389a4a7 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -613,5 +613,7 @@ "labels": { "customArn": "ARN personalizzato", "useCustomArn": "Usa ARN personalizzato..." - } + }, + "includeMaxOutputTokens": "Includi token di output massimi", + "includeMaxOutputTokensDescription": "Invia il parametro dei token di output massimi nelle richieste API. Alcuni provider potrebbero non supportarlo." } diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index 7e82190b7a..eabd751308 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -613,5 +613,7 @@ "labels": { "customArn": "カスタム ARN", "useCustomArn": "カスタム ARN を使用..." - } + }, + "includeMaxOutputTokens": "最大出力トークンを含める", + "includeMaxOutputTokensDescription": "APIリクエストで最大出力トークンパラメータを送信します。一部のプロバイダーはこれをサポートしていない場合があります。" } diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index a2dc6e9b64..68ca2a963c 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -613,5 +613,7 @@ "labels": { "customArn": "사용자 지정 ARN", "useCustomArn": "사용자 지정 ARN 사용..." - } + }, + "includeMaxOutputTokens": "최대 출력 토큰 포함", + "includeMaxOutputTokensDescription": "API 요청에서 최대 출력 토큰 매개변수를 전송합니다. 일부 제공업체는 이를 지원하지 않을 수 있습니다." } diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index 94d63a71db..996c0c673c 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -613,5 +613,7 @@ "labels": { "customArn": "Aangepaste ARN", "useCustomArn": "Aangepaste ARN gebruiken..." - } + }, + "includeMaxOutputTokens": "Maximale output tokens opnemen", + "includeMaxOutputTokensDescription": "Stuur maximale output tokens parameter in API-verzoeken. Sommige providers ondersteunen dit mogelijk niet." } diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index 41eae85d79..cf4421e00e 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -613,5 +613,7 @@ "labels": { "customArn": "Niestandardowy ARN", "useCustomArn": "Użyj niestandardowego ARN..." - } + }, + "includeMaxOutputTokens": "Uwzględnij maksymalne tokeny wyjściowe", + "includeMaxOutputTokensDescription": "Wyślij parametr maksymalnych tokenów wyjściowych w żądaniach API. Niektórzy dostawcy mogą tego nie obsługiwać." } diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index 35254166a4..229419dd23 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -613,5 +613,7 @@ "labels": { "customArn": "ARN personalizado", "useCustomArn": "Usar ARN personalizado..." - } + }, + "includeMaxOutputTokens": "Incluir tokens máximos de saída", + "includeMaxOutputTokensDescription": "Enviar parâmetro de tokens máximos de saída nas solicitações de API. Alguns provedores podem não suportar isso." } diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index 51b3206537..dcce5e5b1a 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -613,5 +613,7 @@ "labels": { "customArn": "Пользовательский ARN", "useCustomArn": "Использовать пользовательский ARN..." - } + }, + "includeMaxOutputTokens": "Включить максимальные выходные токены", + "includeMaxOutputTokensDescription": "Отправлять параметр максимальных выходных токенов в API-запросах. Некоторые провайдеры могут не поддерживать это." } diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 9900008861..f8f53ae21c 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -613,5 +613,7 @@ "labels": { "customArn": "Özel ARN", "useCustomArn": "Özel ARN kullan..." - } + }, + "includeMaxOutputTokens": "Maksimum çıktı tokenlerini dahil et", + "includeMaxOutputTokensDescription": "API isteklerinde maksimum çıktı token parametresini gönder. Bazı sağlayıcılar bunu desteklemeyebilir." } diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index 5f260bd845..edb2b386b2 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -613,5 +613,7 @@ "labels": { "customArn": "ARN tùy chỉnh", "useCustomArn": "Sử dụng ARN tùy chỉnh..." - } + }, + "includeMaxOutputTokens": "Bao gồm token đầu ra tối đa", + "includeMaxOutputTokensDescription": "Gửi tham số token đầu ra tối đa trong các yêu cầu API. Một số nhà cung cấp có thể không hỗ trợ điều này." } diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index d35e7a4054..51ae2269e4 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -613,5 +613,7 @@ "labels": { "customArn": "自定义 ARN", "useCustomArn": "使用自定义 ARN..." - } + }, + "includeMaxOutputTokens": "包含最大输出 Token 数", + "includeMaxOutputTokensDescription": "在 API 请求中发送最大输出 Token 参数。某些提供商可能不支持此功能。" } diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index 5f96f692e4..07544879cd 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -613,5 +613,7 @@ "labels": { "customArn": "自訂 ARN", "useCustomArn": "使用自訂 ARN..." - } + }, + "includeMaxOutputTokens": "包含最大輸出 Token 數", + "includeMaxOutputTokensDescription": "在 API 請求中傳送最大輸出 Token 參數。某些提供商可能不支援此功能。" }