From 5ff59936f94f4147fada07337de7ea5fb2db6cc4 Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Thu, 19 Jun 2025 19:27:39 +0100 Subject: [PATCH 01/31] feat: Adding more settings and control over Gemini - with topP, topK, maxOutputTokens - allow users to enable URL context and Grounding Research --- packages/types/src/provider-settings.ts | 6 ++ .../__tests__/gemini-handler.spec.ts | 72 ++++++++++++++ src/api/providers/gemini.ts | 44 +++++++-- .../components/settings/providers/Gemini.tsx | 97 +++++++++++++++++++ 4 files changed, 210 insertions(+), 9 deletions(-) create mode 100644 src/api/providers/__tests__/gemini-handler.spec.ts diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index 65e3f9b5b6..2238081ea4 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -151,6 +151,12 @@ const lmStudioSchema = baseProviderSettingsSchema.extend({ const geminiSchema = apiModelIdProviderModelSchema.extend({ geminiApiKey: z.string().optional(), googleGeminiBaseUrl: z.string().optional(), + topP: z.number().optional(), + topK: z.number().optional(), + maxOutputTokens: z.number().optional(), + enableUrlContext: z.boolean().optional(), + enableGrounding: z.boolean().optional(), + contextLimit: z.number().optional(), }) const openAiNativeSchema = apiModelIdProviderModelSchema.extend({ diff --git a/src/api/providers/__tests__/gemini-handler.spec.ts b/src/api/providers/__tests__/gemini-handler.spec.ts new file mode 100644 index 0000000000..2805593ca5 --- /dev/null +++ b/src/api/providers/__tests__/gemini-handler.spec.ts @@ -0,0 +1,72 @@ +import { describe, it, expect, vi } from "vitest" +import { GeminiHandler } from "../gemini" +import type { ApiHandlerOptions } from "../../../shared/api" +import type { Anthropic } from "@anthropic-ai/sdk" + +describe("GeminiHandler backend support", () => { + it("slices messages when contextLimit is set", async () => { + const options = { apiProvider: "gemini", contextLimit: 1 } as ApiHandlerOptions + const handler = new GeminiHandler(options) + const stub = vi.fn().mockReturnValue((async function* () {})()) + // @ts-ignore access private client + handler["client"].models.generateContentStream = stub + const messages = [ + { role: "user", content: [{ type: "text", text: "first" }] }, + { role: "assistant", content: [{ type: "text", text: "second" }] }, + ] as Anthropic.Messages.MessageParam[] + for await (const _ of handler.createMessage("instr", messages)) { + } + expect(stub).toHaveBeenCalledOnce() + const params = stub.mock.calls[0][0] + expect(params.contents).toHaveLength(1) + }) + + it("passes maxOutputTokens, topP, topK, and tools for URL context and grounding in config", async () => { + const options = { + apiProvider: "gemini", + maxOutputTokens: 5, + topP: 0.5, + topK: 10, + enableUrlContext: true, + enableGrounding: true, + } as ApiHandlerOptions + const handler = new GeminiHandler(options) + const stub = vi.fn().mockReturnValue((async function* () {})()) + // @ts-ignore access private client + handler["client"].models.generateContentStream = stub + await handler.createMessage("instr", [] as any).next() + const config = stub.mock.calls[0][0].config + expect(config.maxOutputTokens).toBe(5) + expect(config.topP).toBe(0.5) + expect(config.topK).toBe(10) + expect(config.tools).toEqual([{ urlContext: {} }, { googleSearch: {} }]) + }) + + it("completePrompt passes config overrides without tools when URL context and grounding disabled", async () => { + const options = { + apiProvider: "gemini", + maxOutputTokens: 7, + topP: 0.7, + topK: 3, + enableUrlContext: false, + enableGrounding: false, + } as ApiHandlerOptions + const handler = new GeminiHandler(options) + const stub = vi.fn().mockResolvedValue({ text: "ok" }) + // @ts-ignore access private client + handler["client"].models.generateContent = stub + const res = await handler.completePrompt("hi") + expect(res).toBe("ok") + expect(stub).toHaveBeenCalledWith( + expect.objectContaining({ + config: expect.objectContaining({ + maxOutputTokens: 7, + topP: 0.7, + topK: 3, + }), + }), + ) + const promptConfig = stub.mock.calls[0][0].config + expect(promptConfig.tools).toBeUndefined() + }) +}) diff --git a/src/api/providers/gemini.ts b/src/api/providers/gemini.ts index 6765c8676d..8790682f08 100644 --- a/src/api/providers/gemini.ts +++ b/src/api/providers/gemini.ts @@ -65,15 +65,27 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl ): ApiStream { const { id: model, info, reasoning: thinkingConfig, maxTokens } = this.getModel() - const contents = messages.map(convertAnthropicMessageToGemini) + const limitedMessages = this.options.contextLimit ? messages.slice(-this.options.contextLimit) : messages + const contents = limitedMessages.map(convertAnthropicMessageToGemini) - const config: GenerateContentConfig = { + const tools: Array> = [] + if (this.options.enableUrlContext) { + tools.push({ urlContext: {} }) + } + if (this.options.enableGrounding) { + tools.push({ googleSearch: {} }) + } + const rawConfig = { systemInstruction, httpOptions: this.options.googleGeminiBaseUrl ? { baseUrl: this.options.googleGeminiBaseUrl } : undefined, thinkingConfig, - maxOutputTokens: this.options.modelMaxTokens ?? maxTokens ?? undefined, + maxOutputTokens: this.options.maxOutputTokens ?? this.options.modelMaxTokens ?? maxTokens ?? undefined, temperature: this.options.modelTemperature ?? 0, + topP: this.options.topP, + topK: this.options.topK, + ...(tools.length > 0 ? { tools } : {}), } + const config = rawConfig as unknown as GenerateContentConfig const params: GenerateContentParameters = { model, contents, config } @@ -146,15 +158,29 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl try { const { id: model } = this.getModel() + const tools: Array> = [] + if (this.options.enableUrlContext) { + tools.push({ urlContext: {} }) + } + if (this.options.enableGrounding) { + tools.push({ googleSearch: {} }) + } + const rawPromptConfig = { + httpOptions: this.options.googleGeminiBaseUrl + ? { baseUrl: this.options.googleGeminiBaseUrl } + : undefined, + temperature: this.options.modelTemperature ?? 0, + maxOutputTokens: this.options.maxOutputTokens ?? this.options.modelMaxTokens, + topP: this.options.topP, + topK: this.options.topK, + ...(tools.length > 0 ? { tools } : {}), + } + const promptConfig = rawPromptConfig as unknown as GenerateContentConfig + const result = await this.client.models.generateContent({ model, contents: [{ role: "user", parts: [{ text: prompt }] }], - config: { - httpOptions: this.options.googleGeminiBaseUrl - ? { baseUrl: this.options.googleGeminiBaseUrl } - : undefined, - temperature: this.options.modelTemperature ?? 0, - }, + config: promptConfig, }) return result.text ?? "" diff --git a/webview-ui/src/components/settings/providers/Gemini.tsx b/webview-ui/src/components/settings/providers/Gemini.tsx index 21056f12d5..04e8464f95 100644 --- a/webview-ui/src/components/settings/providers/Gemini.tsx +++ b/webview-ui/src/components/settings/providers/Gemini.tsx @@ -1,6 +1,7 @@ import { useCallback, useState } from "react" import { Checkbox } from "vscrui" import { VSCodeTextField } from "@vscode/webview-ui-toolkit/react" +import { Slider } from "@src/components/ui" import type { ProviderSettings } from "@roo-code/types" @@ -72,6 +73,102 @@ export const Gemini = ({ apiConfiguration, setApiConfigurationField }: GeminiPro /> )} +
+ +
+ setApiConfigurationField("topP", values[0])} + className="flex-grow" + /> + {(apiConfiguration.topP ?? 0).toFixed(2)} +
+
+ {t("settings:providers.topPDescription")} +
+
+
+ +
+ setApiConfigurationField("topK", values[0])} + className="flex-grow" + /> + {apiConfiguration.topK ?? 0} +
+
+ {t("settings:providers.topKDescription")} +
+
+
+ +
+ setApiConfigurationField("maxOutputTokens", values[0])} + className="flex-grow" + /> + parseInt((e as any).target.value, 10))} + className="w-16" + /> +
+
+ {t("settings:providers.maxOutputTokensDescription")} +
+
+ setApiConfigurationField("enableUrlContext", checked)}> + {t("settings:providers.enableUrlContext")} + +
+ {t("settings:providers.enableUrlContextDescription")} +
+ setApiConfigurationField("enableGrounding", checked)}> + {t("settings:providers.enableGrounding")} + +
+ {t("settings:providers.enableGroundingDescription")} +
+
+ +
+ setApiConfigurationField("contextLimit", values[0])} + className="flex-grow" + /> + parseInt((e as any).target.value, 10))} + className="w-16" + /> +
+
+ {t("settings:providers.contextLimitDescription")} +
+
) } From afcb66d5bde2d6bed3947ebe0f6d712b70efb151 Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Mon, 23 Jun 2025 00:29:49 +0100 Subject: [PATCH 02/31] feat: Adding parameter titles and descriptions + translation to all languages --- .../components/settings/providers/Gemini.tsx | 28 +++++++++++-------- webview-ui/src/i18n/locales/ca/settings.json | 26 +++++++++++++++++ webview-ui/src/i18n/locales/de/settings.json | 26 +++++++++++++++++ webview-ui/src/i18n/locales/en/settings.json | 26 +++++++++++++++++ webview-ui/src/i18n/locales/es/settings.json | 26 +++++++++++++++++ webview-ui/src/i18n/locales/fr/settings.json | 26 +++++++++++++++++ webview-ui/src/i18n/locales/hi/settings.json | 26 +++++++++++++++++ webview-ui/src/i18n/locales/id/settings.json | 26 +++++++++++++++++ webview-ui/src/i18n/locales/it/settings.json | 26 +++++++++++++++++ webview-ui/src/i18n/locales/ja/settings.json | 26 +++++++++++++++++ webview-ui/src/i18n/locales/ko/settings.json | 26 +++++++++++++++++ webview-ui/src/i18n/locales/nl/settings.json | 26 +++++++++++++++++ webview-ui/src/i18n/locales/pl/settings.json | 26 +++++++++++++++++ .../src/i18n/locales/pt-BR/settings.json | 26 +++++++++++++++++ webview-ui/src/i18n/locales/ru/settings.json | 26 +++++++++++++++++ 15 files changed, 380 insertions(+), 12 deletions(-) diff --git a/webview-ui/src/components/settings/providers/Gemini.tsx b/webview-ui/src/components/settings/providers/Gemini.tsx index 04e8464f95..34cfd588a9 100644 --- a/webview-ui/src/components/settings/providers/Gemini.tsx +++ b/webview-ui/src/components/settings/providers/Gemini.tsx @@ -74,7 +74,7 @@ export const Gemini = ({ apiConfiguration, setApiConfigurationField }: GeminiPro )}
- +
{(apiConfiguration.topP ?? 0).toFixed(2)}
- {t("settings:providers.topPDescription")} + {t("settings:providers.geminiParameters.topP.description")}
- +
{apiConfiguration.topK ?? 0}
- {t("settings:providers.topKDescription")} + {t("settings:providers.geminiParameters.topK.description")}
- +
- {t("settings:providers.maxOutputTokensDescription")} + {t("settings:providers.geminiParameters.maxOutputTokens.description")}
setApiConfigurationField("enableUrlContext", checked)}> - {t("settings:providers.enableUrlContext")} + {t("settings:providers.geminiParameters.urlContext.title")}
- {t("settings:providers.enableUrlContextDescription")} + {t("settings:providers.geminiParameters.urlContext.description")}
setApiConfigurationField("enableGrounding", checked)}> - {t("settings:providers.enableGrounding")} + {t("settings:providers.geminiParameters.groundingSearch.title")}
- {t("settings:providers.enableGroundingDescription")} + {t("settings:providers.geminiParameters.groundingSearch.description")}
- +
- {t("settings:providers.contextLimitDescription")} + {t("settings:providers.geminiParameters.contextLimit.description")}
diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index c88005ea61..ea23ab7dc2 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -210,6 +210,32 @@ "cacheUsageNote": "Nota: Si no veieu l'ús de la caché, proveu de seleccionar un model diferent i després tornar a seleccionar el model desitjat.", "vscodeLmModel": "Model de llenguatge", "vscodeLmWarning": "Nota: Aquesta és una integració molt experimental i el suport del proveïdor variarà. Si rebeu un error sobre un model no compatible, és un problema del proveïdor.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Controla el nombre de tokens amb més probabilitat a considerar en cada pas. Valors més alts augmenten la diversitat, valors més baixos fan que la sortida sigui més enfocada i determinista." + }, + "topP": { + "title": "Top P", + "description": "Controla la probabilitat acumulada dels tokens a considerar (mètode nucleus). Valors més propers a 1,0 augmenten la diversitat, mentre que valors més baixos fan que la sortida sigui més enfocada." + }, + "maxOutputTokens": { + "title": "Tokens màxims de sortida", + "description": "Nombre màxim de tokens que el model pot generar en una sola resposta. Valors més alts permeten respostes més llargues, però augmenten l’ús de tokens i els costos." + }, + "urlContext": { + "title": "Activa context d’URL", + "description": "Permet a Gemini accedir i processar URLs per obtenir context addicional durant la generació de respostes. Útil per a tasques que requereixen anàlisi de contingut web." + }, + "groundingSearch": { + "title": "Activa grounding amb cerca de Google", + "description": "Permet a Gemini cercar informació actual a Google i fonamentar les respostes en dades en temps real. Útil per a consultes que requereixen informació actualitzada." + }, + "contextLimit": { + "title": "Límit de context", + "description": "Nombre màxim de missatges anteriors a incloure en el context. Valors més baixos redueixen l’ús de tokens i els costos, però poden limitar la continuïtat de la conversa." + } + }, "googleCloudSetup": { "title": "Per utilitzar Google Cloud Vertex AI, necessiteu:", "step1": "1. Crear un compte de Google Cloud, habilitar l'API de Vertex AI i habilitar els models Claude necessaris.", diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index 27a7486436..0d5275ea18 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -210,6 +210,32 @@ "cacheUsageNote": "Hinweis: Wenn Sie keine Cache-Nutzung sehen, versuchen Sie ein anderes Modell auszuwählen und dann Ihr gewünschtes Modell erneut auszuwählen.", "vscodeLmModel": "Sprachmodell", "vscodeLmWarning": "Hinweis: Dies ist eine sehr experimentelle Integration und die Anbieterunterstützung variiert. Wenn Sie einen Fehler über ein nicht unterstütztes Modell erhalten, liegt das Problem auf Anbieterseite.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Steuert die Anzahl der wahrscheinlichsten Tokens, die in jedem Schritt berücksichtigt werden. Höhere Werte erhöhen die Vielfalt, niedrigere Werte sorgen für fokussiertere und deterministischere Ausgaben." + }, + "topP": { + "title": "Top P", + "description": "Steuert die kumulative Wahrscheinlichkeit der in Betracht zu ziehenden Tokens (Nucleus Sampling). Werte näher bei 1,0 erhöhen die Vielfalt, während niedrigere Werte die Ausgabe fokussierter machen." + }, + "maxOutputTokens": { + "title": "Maximale Ausgabetokens", + "description": "Maximale Anzahl der Tokens, die das Modell in einer einzigen Antwort generieren kann. Höhere Werte ermöglichen längere Antworten, erhöhen jedoch den Tokenverbrauch und die Kosten." + }, + "urlContext": { + "title": "URL-Kontext aktivieren", + "description": "Ermöglicht es Gemini, URLs für zusätzlichen Kontext bei der Generierung von Antworten zu verwenden und zu verarbeiten. Nützlich für Aufgaben, die eine Webinhaltsanalyse erfordern." + }, + "groundingSearch": { + "title": "Grounding mit Google-Suche aktivieren", + "description": "Ermöglicht es Gemini, Google nach aktuellen Informationen zu durchsuchen und Antworten auf Echtzeitdaten zu stützen. Nützlich für Abfragen, die aktuelle Informationen erfordern." + }, + "contextLimit": { + "title": "Kontextlimit", + "description": "Maximale Anzahl vorheriger Nachrichten, die in den Kontext einbezogen werden. Niedrigere Werte reduzieren den Tokenverbrauch und die Kosten, können jedoch die Kontinuität der Konversation einschränken." + } + }, "googleCloudSetup": { "title": "Um Google Cloud Vertex AI zu verwenden, müssen Sie:", "step1": "1. Ein Google Cloud-Konto erstellen, die Vertex AI API aktivieren & die gewünschten Claude-Modelle aktivieren.", diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index b8e51afc50..48f3f4454c 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -210,6 +210,32 @@ "cacheUsageNote": "Note: If you don't see cache usage, try selecting a different model and then selecting your desired model again.", "vscodeLmModel": "Language Model", "vscodeLmWarning": "Note: This is a very experimental integration and provider support will vary. If you get an error about a model not being supported, that's an issue on the provider's end.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Controls the number of highest probability tokens to consider for each step. Higher values increase diversity, lower values make output more focused and deterministic." + }, + "topP": { + "title": "Top P", + "description": "Controls the cumulative probability of tokens to consider (nucleus sampling). Values closer to 1.0 increase diversity, while lower values make output more focused." + }, + "maxOutputTokens": { + "title": "Max Output Tokens", + "description": "Maximum number of tokens the model can generate in a single response. Higher values allow longer responses but increase token usage and costs." + }, + "urlContext": { + "title": "Enable URL Context", + "description": "Allows Gemini to access and process URLs for additional context when generating responses. Useful for tasks requiring web content analysis." + }, + "groundingSearch": { + "title": "Enable Grounding with Google Search", + "description": "Enables Gemini to search Google for current information and ground responses in real-time data. Useful for queries requiring up-to-date information." + }, + "contextLimit": { + "title": "Context Limit", + "description": "Maximum number of previous messages to include in context. Lower values reduce token usage and costs but may limit conversation continuity." + } + }, "googleCloudSetup": { "title": "To use Google Cloud Vertex AI, you need to:", "step1": "1. Create a Google Cloud account, enable the Vertex AI API & enable the desired Claude models.", diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index db8b4736eb..8ad04d8592 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -210,6 +210,32 @@ "cacheUsageNote": "Nota: Si no ve el uso del caché, intente seleccionar un modelo diferente y luego seleccionar nuevamente su modelo deseado.", "vscodeLmModel": "Modelo de lenguaje", "vscodeLmWarning": "Nota: Esta es una integración muy experimental y el soporte del proveedor variará. Si recibe un error sobre un modelo no compatible, es un problema del proveedor.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Controla el número de tokens con mayor probabilidad a considerar en cada paso. Valores más altos aumentan la diversidad, valores más bajos hacen que la salida sea más enfocada y determinista." + }, + "topP": { + "title": "Top P", + "description": "Controla la probabilidad acumulada de tokens a considerar (muestra de núcleo). Valores cercanos a 1.0 aumentan la diversidad, mientras que valores más bajos hacen que la salida sea más enfocada." + }, + "maxOutputTokens": { + "title": "Tokens máximos de salida", + "description": "Número máximo de tokens que el modelo puede generar en una sola respuesta. Valores más altos permiten respuestas más largas, pero aumentan el uso de tokens y los costos." + }, + "urlContext": { + "title": "Habilitar contexto de URL", + "description": "Permite que Gemini acceda y procese URLs para contexto adicional al generar respuestas. Útil para tareas que requieren análisis de contenido web." + }, + "groundingSearch": { + "title": "Habilitar grounding con búsqueda en Google", + "description": "Permite que Gemini busque en Google información actual y fundamente las respuestas en datos en tiempo real. Útil para consultas que requieren información actualizada." + }, + "contextLimit": { + "title": "Límite de contexto", + "description": "Número máximo de mensajes anteriores que se incluirán en el contexto. Valores más bajos reducen el uso de tokens y los costos, pero pueden limitar la continuidad de la conversación." + } + }, "googleCloudSetup": { "title": "Para usar Google Cloud Vertex AI, necesita:", "step1": "1. Crear una cuenta de Google Cloud, habilitar la API de Vertex AI y habilitar los modelos Claude deseados.", diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index 0bf837accb..0b6d35a855 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -210,6 +210,32 @@ "cacheUsageNote": "Remarque : Si vous ne voyez pas l'utilisation du cache, essayez de sélectionner un modèle différent puis de sélectionner à nouveau votre modèle souhaité.", "vscodeLmModel": "Modèle de langage", "vscodeLmWarning": "Remarque : Il s'agit d'une intégration très expérimentale et le support des fournisseurs variera. Si vous recevez une erreur concernant un modèle non pris en charge, c'est un problème du côté du fournisseur.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Contrôle le nombre de tokens les plus probables à considérer à chaque étape. Des valeurs plus élevées augmentent la diversité, des valeurs plus faibles rendent la sortie plus ciblée et déterministe." + }, + "topP": { + "title": "Top P", + "description": "Contrôle la probabilité cumulée des tokens à considérer (échantillonnage nucleus). Des valeurs proches de 1,0 augmentent la diversité, tandis que des valeurs plus faibles rendent la sortie plus ciblée." + }, + "maxOutputTokens": { + "title": "Nombre maximal de tokens de sortie", + "description": "Nombre maximal de tokens que le modèle peut générer dans une seule réponse. Des valeurs plus élevées permettent des réponses plus longues mais augmentent l'utilisation des tokens et les coûts." + }, + "urlContext": { + "title": "Activer le contexte d'URL", + "description": "Permet à Gemini d'accéder et de traiter les URL pour un contexte supplémentaire lors de la génération des réponses. Utile pour les tâches nécessitant l'analyse de contenu web." + }, + "groundingSearch": { + "title": "Activer la mise en contexte via la recherche Google", + "description": "Permet à Gemini d'effectuer des recherches sur Google pour obtenir des informations actuelles et fonder les réponses sur des données en temps réel. Utile pour les requêtes nécessitant des informations à jour." + }, + "contextLimit": { + "title": "Limite de contexte", + "description": "Nombre maximum de messages précédents à inclure dans le contexte. Des valeurs plus faibles réduisent l'utilisation des tokens et les coûts, mais peuvent limiter la continuité de la conversation." + } + }, "googleCloudSetup": { "title": "Pour utiliser Google Cloud Vertex AI, vous devez :", "step1": "1. Créer un compte Google Cloud, activer l'API Vertex AI et activer les modèles Claude souhaités.", diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index fec1b27007..f7ba92a1e9 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -210,6 +210,32 @@ "cacheUsageNote": "नोट: यदि आप कैश उपयोग नहीं देखते हैं, तो एक अलग मॉडल चुनने का प्रयास करें और फिर अपने वांछित मॉडल को पुनः चुनें।", "vscodeLmModel": "भाषा मॉडल", "vscodeLmWarning": "नोट: यह एक बहुत ही प्रायोगिक एकीकरण है और प्रदाता समर्थन भिन्न होगा। यदि आपको किसी मॉडल के समर्थित न होने की त्रुटि मिलती है, तो यह प्रदाता की ओर से एक समस्या है।", + "geminiParameters": { + "topK": { + "title": "शीर्ष K", + "description": "प्रत्येक चरण के लिए विचार करने के लिए उच्चतम संभावना वाले टोकनों की संख्या को नियंत्रित करता है। उच्च मान विविधता बढ़ाते हैं, निम्न मान आउटपुट को अधिक केंद्रित और निर्धार्त बनाते हैं।" + }, + "topP": { + "title": "शीर्ष P", + "description": "समूह नमूना (न्यूक्लियस सैंपलिंग) में विचार करने के लिए टोकनों की संचयी संभावना को नियंत्रित करता है। 1.0 के करीब मान विविधता बढ़ाते हैं, जबकि निम्न मान आउटपुट को अधिक केंद्रित बनाते हैं।" + }, + "maxOutputTokens": { + "title": "अधिकतम आउटपुट टोकन", + "description": "मॉडल एकल प्रतिक्रिया में उत्पन्न कर सकने वाले अधिकतम टोकनों की संख्या। उच्च मान लंबी प्रतिक्रियाएं सक्षम करते हैं लेकिन टोकन उपयोग और लागत बढ़ाते हैं।" + }, + "urlContext": { + "title": "URL संदर्भ सक्षम करें", + "description": "जब प्रतिक्रियाएं उत्पन्न करता है, अतिरिक्त संदर्भ के लिए Gemini को URL तक पहुंचने और संसाधित करने की अनुमति देता है। वेब सामग्री विश्लेषण वाली कार्यों के लिए उपयोगी।" + }, + "groundingSearch": { + "title": "Google खोज के साथ ग्राउंडिंग सक्षम करें", + "description": "Gemini को वास्तविक समय के डेटा पर आधारित उत्तर प्रदान करने के लिए Google पर जानकारी खोजने और उत्तरों को ग्राउंड करने की अनुमति देता है। अद्यतित जानकारी की आवश्यकता वाली क्वेरीज़ के लिए उपयोगी।" + }, + "contextLimit": { + "title": "संदर्भ सीमा", + "description": "संदर्भ में शामिल करने के लिए पिछले संदेशों की अधिकतम संख्या। निम्न मान टोकन उपयोग और लागत कम करते हैं, लेकिन बातचीत की निरंतरता सीमित कर सकते हैं।" + } + }, "googleCloudSetup": { "title": "Google Cloud Vertex AI का उपयोग करने के लिए, आपको आवश्यकता है:", "step1": "1. Google Cloud खाता बनाएं, Vertex AI API सक्षम करें और वांछित Claude मॉडल सक्षम करें।", diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index 6d6a8e93b1..9eb80900a3 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -214,6 +214,32 @@ "cacheUsageNote": "Catatan: Jika kamu tidak melihat penggunaan cache, coba pilih model yang berbeda lalu pilih model yang kamu inginkan lagi.", "vscodeLmModel": "Model Bahasa", "vscodeLmWarning": "Catatan: Ini adalah integrasi yang sangat eksperimental dan dukungan provider akan bervariasi. Jika kamu mendapat error tentang model yang tidak didukung, itu adalah masalah di sisi provider.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Mengontrol jumlah token dengan probabilitas tertinggi yang dipertimbangkan pada setiap langkah. Nilai lebih tinggi meningkatkan keberagaman, nilai lebih rendah membuat keluaran lebih terfokus dan deterministik." + }, + "topP": { + "title": "Top P", + "description": "Mengontrol probabilitas kumulatif token yang dipertimbangkan (pengambilan sampel nucleus). Nilai mendekati 1.0 meningkatkan keberagaman, sedangkan nilai lebih rendah membuat keluaran lebih terfokus." + }, + "maxOutputTokens": { + "title": "Token Output Maksimum", + "description": "Jumlah maksimum token yang dapat dihasilkan model dalam satu respons. Nilai lebih tinggi memungkinkan respons lebih panjang tetapi meningkatkan penggunaan token dan biaya." + }, + "urlContext": { + "title": "Aktifkan Konteks URL", + "description": "Memungkinkan Gemini mengakses dan memproses URL untuk konteks tambahan saat menghasilkan respons. Berguna untuk tugas yang memerlukan analisis konten web." + }, + "groundingSearch": { + "title": "Aktifkan Grounding dengan Pencarian Google", + "description": "Memungkinkan Gemini mencari informasi terkini di Google dan mendasarkan respons pada data waktu nyata. Berguna untuk kueri yang memerlukan informasi terkini." + }, + "contextLimit": { + "title": "Batas Konteks", + "description": "Jumlah maksimum pesan sebelumnya yang disertakan dalam konteks. Nilai lebih rendah mengurangi penggunaan token dan biaya tetapi dapat membatasi kelanjutan percakapan." + } + }, "googleCloudSetup": { "title": "Untuk menggunakan Google Cloud Vertex AI, kamu perlu:", "step1": "1. Buat akun Google Cloud, aktifkan Vertex AI API & aktifkan model Claude yang diinginkan.", diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index fcb389a4a7..b5cfcbc3cb 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -210,6 +210,32 @@ "cacheUsageNote": "Nota: Se non vedi l'utilizzo della cache, prova a selezionare un modello diverso e poi seleziona nuovamente il modello desiderato.", "vscodeLmModel": "Modello linguistico", "vscodeLmWarning": "Nota: Questa è un'integrazione molto sperimentale e il supporto del fornitore varierà. Se ricevi un errore relativo a un modello non supportato, si tratta di un problema del fornitore.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Controlla il numero di token con la massima probabilità da considerare ad ogni passaggio. Valori più alti aumentano la diversità, valori più bassi rendono l'output più focalizzato e deterministico." + }, + "topP": { + "title": "Top P", + "description": "Controlla la probabilità cumulativa dei token da considerare (campionamento nucleare). Valori prossimi a 1,0 aumentano la diversità, mentre valori più bassi rendono l'output più focalizzato." + }, + "maxOutputTokens": { + "title": "Token massimi di output", + "description": "Numero massimo di token che il modello può generare in una singola risposta. Valori più alti consentono risposte più lunghe ma aumentano l'utilizzo dei token e i costi." + }, + "urlContext": { + "title": "Abilita contesto URL", + "description": "Consente a Gemini di accedere e processare URL per contesto aggiuntivo durante la generazione delle risposte. Utile per attività che richiedono analisi di contenuti web." + }, + "groundingSearch": { + "title": "Abilita grounding con ricerca Google", + "description": "Consente a Gemini di cercare informazioni aggiornate su Google e basare le risposte su dati in tempo reale. Utile per query che richiedono informazioni aggiornate." + }, + "contextLimit": { + "title": "Limite di contesto", + "description": "Numero massimo di messaggi precedenti da includere nel contesto. Valori più bassi riducono l'utilizzo dei token e i costi ma possono limitare la continuità della conversazione." + } + }, "googleCloudSetup": { "title": "Per utilizzare Google Cloud Vertex AI, è necessario:", "step1": "1. Creare un account Google Cloud, abilitare l'API Vertex AI e abilitare i modelli Claude desiderati.", diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index eabd751308..fbb631c469 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -210,6 +210,32 @@ "cacheUsageNote": "注意:キャッシュの使用が表示されない場合は、別のモデルを選択してから希望のモデルを再度選択してみてください。", "vscodeLmModel": "言語モデル", "vscodeLmWarning": "注意:これは非常に実験的な統合であり、プロバイダーのサポートは異なります。モデルがサポートされていないというエラーが表示された場合、それはプロバイダー側の問題です。", + "geminiParameters": { + "topK": { + "title": "トップK", + "description": "各ステップで考慮する最も確率の高いトークンの数を制御します。値を大きくすると多様性が増し、値を小さくすると出力がより集中して決定的になります。" + }, + "topP": { + "title": "トップP", + "description": "考慮するトークンの累積確率を制御します(ニュークリアスサンプリング)。1.0に近い値は多様性を高め、低い値は出力をより集中させます。" + }, + "maxOutputTokens": { + "title": "最大出力トークン数", + "description": "モデルが1つの応答で生成できる最大トークン数。値が大きいほど長い応答が可能になりますが、トークン使用量とコストが増加します。" + }, + "urlContext": { + "title": "URLコンテキストを有効にする", + "description": "応答を生成する際に、追加のコンテキストとしてGeminiがURLにアクセスして処理できるようにします。Webコンテンツの分析を必要とするタスクに役立ちます。" + }, + "groundingSearch": { + "title": "Google検索でのグラウンディングを有効にする", + "description": "GeminiがGoogleを検索して最新情報を取得し、リアルタイムデータに基づいて応答をグラウンディングできるようにします。最新情報が必要なクエリに便利です。" + }, + "contextLimit": { + "title": "コンテキスト制限", + "description": "コンテキストに含める過去のメッセージの最大数。値を小さくするとトークン使用量とコストが削減されますが、会話の連続性が制限される場合があります。" + } + }, "googleCloudSetup": { "title": "Google Cloud Vertex AIを使用するには:", "step1": "1. Google Cloudアカウントを作成し、Vertex AI APIを有効にして、希望するClaudeモデルを有効にします。", diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index 68ca2a963c..b819e4b63c 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -210,6 +210,32 @@ "cacheUsageNote": "참고: 캐시 사용이 표시되지 않는 경우, 다른 모델을 선택한 다음 원하는 모델을 다시 선택해 보세요.", "vscodeLmModel": "언어 모델", "vscodeLmWarning": "참고: 이는 매우 실험적인 통합이며, 공급자 지원은 다를 수 있습니다. 모델이 지원되지 않는다는 오류가 발생하면, 이는 공급자 측의 문제입니다.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "각 단계에서 고려할 최고 확률 토큰의 수를 제어합니다. 값이 높을수록 다양성이 증가하고, 값이 낮을수록 출력이 더 집중되고 결정적이 됩니다." + }, + "topP": { + "title": "Top P", + "description": "고려할 토큰의 누적 확률을 제어합니다(중앙 샘플링). 1.0에 가까운 값은 다양성을 높이고, 낮은 값은 출력을 더 집중되게 합니다." + }, + "maxOutputTokens": { + "title": "최대 출력 토큰", + "description": "모델이 하나의 응답에서 생성할 수 있는 최대 토큰 수입니다. 높은 값은 더 긴 응답을 허용하지만 토큰 사용량과 비용을 증가시킵니다." + }, + "urlContext": { + "title": "URL 컨텍스트 활성화", + "description": "응답을 생성할 때 추가 컨텍스트를 위해 Gemini가 URL에 액세스하고 처리할 수 있도록 합니다. 웹 콘텐츠 분석이 필요한 작업에 유용합니다." + }, + "groundingSearch": { + "title": "Google 검색과 함께 근거 지정 활성화", + "description": "Gemini가 최신 정보를 얻기 위해 Google을 검색하고 응답을 실시간 데이터에 근거하도록 합니다. 최신 정보가 필요한 쿼리에 유용합니다." + }, + "contextLimit": { + "title": "컨텍스트 제한", + "description": "컨텍스트에 포함할 이전 메시지의 최대 수입니다. 낮은 값은 토큰 사용량과 비용을 줄이지만 대화 연속성이 제한될 수 있습니다." + } + }, "googleCloudSetup": { "title": "Google Cloud Vertex AI를 사용하려면:", "step1": "1. Google Cloud 계정을 만들고, Vertex AI API를 활성화하고, 원하는 Claude 모델을 활성화하세요.", diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index 996c0c673c..ac12521607 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -210,6 +210,32 @@ "cacheUsageNote": "Let op: als je geen cachegebruik ziet, probeer dan een ander model te selecteren en vervolgens weer je gewenste model.", "vscodeLmModel": "Taalmodel", "vscodeLmWarning": "Let op: dit is een zeer experimentele integratie en ondersteuning door providers kan variëren. Krijg je een foutmelding dat een model niet wordt ondersteund, dan ligt dat aan de provider.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Beheert het aantal tokens met de hoogste waarschijnlijkheid dat in elke stap wordt overwogen. Hogere waarden vergroten de diversiteit, lagere waarden maken de uitvoer meer gefocust en deterministisch." + }, + "topP": { + "title": "Top P", + "description": "Beheert de cumulatieve kans van tokens om te overwegen (nucleus-sampling). Waarden dicht bij 1,0 vergroten de diversiteit, terwijl lagere waarden de uitvoer meer gefocust maken." + }, + "maxOutputTokens": { + "title": "Maximale uitvoertokens", + "description": "Maximaal aantal tokens dat het model in één antwoord kan genereren. Hogere waarden maken langere antwoorden mogelijk, maar verhogen het tokengebruik en de kosten." + }, + "urlContext": { + "title": "URL-context inschakelen", + "description": "Staat Gemini toe om URL's te openen en te verwerken voor extra context bij het genereren van antwoorden. Handig voor taken die webinhoudsanalyse vereisen." + }, + "groundingSearch": { + "title": "Grounding met Google-zoekopdracht inschakelen", + "description": "Staat Gemini toe om Google te doorzoeken voor actuele informatie en antwoorden op realtime gegevens te baseren. Handig voor vragen die actuele informatie vereisen." + }, + "contextLimit": { + "title": "Contextlimiet", + "description": "Maximaal aantal vorige berichten dat in de context wordt opgenomen. Lagere waarden verlagen het tokengebruik en de kosten, maar kunnen de continuïteit van het gesprek beperken." + } + }, "googleCloudSetup": { "title": "Om Google Cloud Vertex AI te gebruiken, moet je:", "step1": "1. Maak een Google Cloud-account aan, schakel de Vertex AI API in en activeer de gewenste Claude-modellen.", diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index cf4421e00e..39db2457de 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -210,6 +210,32 @@ "cacheUsageNote": "Uwaga: Jeśli nie widzisz użycia bufora, spróbuj wybrać inny model, a następnie ponownie wybrać żądany model.", "vscodeLmModel": "Model językowy", "vscodeLmWarning": "Uwaga: To bardzo eksperymentalna integracja, a wsparcie dostawcy może się różnić. Jeśli otrzymasz błąd dotyczący nieobsługiwanego modelu, jest to problem po stronie dostawcy.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Kontroluje liczbę tokenów o najwyższym prawdopodobieństwie rozważanych na każdym kroku. Wyższe wartości zwiększają różnorodność, niższe wartości powodują bardziej skupione i deterministyczne wyniki." + }, + "topP": { + "title": "Top P", + "description": "Kontroluje skumulowane prawdopodobieństwo tokenów do rozważenia (nucleus sampling). Wartości bliskie 1.0 zwiększają różnorodność, a niższe wartości sprawiają, że wyjście jest bardziej skupione." + }, + "maxOutputTokens": { + "title": "Maksymalna liczba tokenów wyjściowych", + "description": "Maksymalna liczba tokenów, które model może wygenerować w jednej odpowiedzi. Wyższe wartości umożliwiają dłuższe odpowiedzi, ale zwiększają użycie tokenów i koszty." + }, + "urlContext": { + "title": "Włącz kontekst URL", + "description": "Pozwala Gemini uzyskiwać dostęp i przetwarzać adresy URL w celu uzyskania dodatkowego kontekstu podczas generowania odpowiedzi. Przydatne w zadaniach wymagających analizy zawartości sieci Web." + }, + "groundingSearch": { + "title": "Włącz grounding przy użyciu wyszukiwarki Google", + "description": "Pozwala Gemini przeszukiwać Google w celu uzyskania aktualnych informacji i opierać odpowiedzi na danych w czasie rzeczywistym. Przydatne w zapytaniach wymagających najnowszych informacji." + }, + "contextLimit": { + "title": "Limit kontekstu", + "description": "Maksymalna liczba poprzednich wiadomości uwzględnianych w kontekście. Niższe wartości zmniejszają użycie tokenów i koszty, ale mogą ograniczać ciągłość rozmowy." + } + }, "googleCloudSetup": { "title": "Aby korzystać z Google Cloud Vertex AI, potrzebujesz:", "step1": "1. Utworzyć konto Google Cloud, włączyć API Vertex AI i włączyć żądane modele Claude.", diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index 229419dd23..584163a529 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -210,6 +210,32 @@ "cacheUsageNote": "Nota: Se você não vir o uso do cache, tente selecionar um modelo diferente e depois selecionar novamente o modelo desejado.", "vscodeLmModel": "Modelo de Linguagem", "vscodeLmWarning": "Nota: Esta é uma integração muito experimental e o suporte do provedor pode variar. Se você receber um erro sobre um modelo não ser suportado, isso é um problema do lado do provedor.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Controla o número de tokens com maior probabilidade a considerar em cada etapa. Valores mais altos aumentam a diversidade, valores mais baixos tornam a saída mais focada e determinística." + }, + "topP": { + "title": "Top P", + "description": "Controla a probabilidade cumulativa de tokens a considerar (amostragem de núcleo). Valores próximos a 1,0 aumentam a diversidade, enquanto valores mais baixos tornam a saída mais focada." + }, + "maxOutputTokens": { + "title": "Tokens máximos de saída", + "description": "Número máximo de tokens que o modelo pode gerar em uma única resposta. Valores mais altos permitem respostas mais longas, mas aumentam o uso de tokens e os custos." + }, + "urlContext": { + "title": "Ativar contexto de URL", + "description": "Permite que o Gemini acesse e processe URLs para contexto adicional ao gerar respostas. Útil para tarefas que exijam análise de conteúdo da web." + }, + "groundingSearch": { + "title": "Ativar grounding com pesquisa no Google", + "description": "Permite que o Gemini pesquise informações atuais no Google e fundamente as respostas em dados em tempo real. Útil para consultas que requerem informações atualizadas." + }, + "contextLimit": { + "title": "Limite de contexto", + "description": "Número máximo de mensagens anteriores a incluir no contexto. Valores mais baixos reduzem o uso de tokens e os custos, mas podem limitar a continuidade da conversa." + } + }, "googleCloudSetup": { "title": "Para usar o Google Cloud Vertex AI, você precisa:", "step1": "1. Criar uma conta Google Cloud, ativar a API Vertex AI e ativar os modelos Claude desejados.", diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index dcce5e5b1a..355b9ad4d3 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -210,6 +210,32 @@ "cacheUsageNote": "Примечание: если вы не видите использование кэша, попробуйте выбрать другую модель, а затем вернуться к нужной.", "vscodeLmModel": "Языковая модель", "vscodeLmWarning": "Внимание: это очень экспериментальная интеграция, поддержка провайдера может отличаться. Если возникает ошибка о неподдерживаемой модели — проблема на стороне провайдера.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Управляет количеством токенов с наивысшей вероятностью, которые учитываются на каждом шаге. Более высокие значения увеличивают разнообразие, более низкие делают вывод более сфокусированным и детерминированным." + }, + "topP": { + "title": "Top P", + "description": "Управляет накопительной вероятностью токенов для рассмотрения (nucleus sampling). Значения ближе к 1,0 повышают разнообразие, в то время как более низкие значения делают вывод более сфокусированным." + }, + "maxOutputTokens": { + "title": "Максимальное количество токенов вывода", + "description": "Максимальное количество токенов, которое модель может сгенерировать в одном ответе. Более высокие значения позволяют генерировать более длинные ответы, но увеличивают использование токенов и стоимость." + }, + "urlContext": { + "title": "Включить контекст URL", + "description": "Позволяет Gemini получать доступ к URL-адресам и обрабатывать их для дополнительного контекста при генерации ответов. Полезно для задач, требующих анализа веб-контента." + }, + "groundingSearch": { + "title": "Включить grounding через поиск Google", + "description": "Позволяет Gemini искать актуальную информацию в Google и основывать ответы на данных в реальном времени. Полезно для запросов, требующих актуальной информации." + }, + "contextLimit": { + "title": "Ограничение контекста", + "description": "Максимальное число предыдущих сообщений, включаемых в контекст. Более низкие значения снижают использование токенов и стоимость, но могут ограничить непрерывность разговора." + } + }, "googleCloudSetup": { "title": "Для использования Google Cloud Vertex AI необходимо:", "step1": "1. Создайте аккаунт Google Cloud, включите Vertex AI API и нужные модели Claude.", From ac96e99cdf79a3fbc2db7d52dc88f71228f12497 Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Mon, 23 Jun 2025 00:55:36 +0100 Subject: [PATCH 03/31] feat: adding more translations --- webview-ui/src/i18n/locales/tr/settings.json | 26 +++++++++++++++++++ webview-ui/src/i18n/locales/vi/settings.json | 26 +++++++++++++++++++ .../src/i18n/locales/zh-CN/settings.json | 26 +++++++++++++++++++ .../src/i18n/locales/zh-TW/settings.json | 26 +++++++++++++++++++ 4 files changed, 104 insertions(+) diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index f8f53ae21c..5b9e177828 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -210,6 +210,32 @@ "cacheUsageNote": "Not: Önbellek kullanımını görmüyorsanız, farklı bir model seçip ardından istediğiniz modeli tekrar seçmeyi deneyin.", "vscodeLmModel": "Dil Modeli", "vscodeLmWarning": "Not: Bu çok deneysel bir entegrasyondur ve sağlayıcı desteği değişebilir. Bir modelin desteklenmediğine dair bir hata alırsanız, bu sağlayıcı tarafındaki bir sorundur.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Her adım için dikkate alınacak en yüksek olasılıklı token sayısını kontrol eder. Daha yüksek değerler çeşitliliği artırır, daha düşük değerler çıktıyı daha odaklı ve belirleyici yapar." + }, + "topP": { + "title": "Top P", + "description": "Düşünülecek token'ların kümülatif olasılığını kontrol eder (nucleus sampling). 1,0'a yakın değerler çeşitliliği artırırken, daha düşük değerler çıktıyı daha odaklı hale getirir." + }, + "maxOutputTokens": { + "title": "Maksimum Çıkış Tokenleri", + "description": "Modelin tek bir yanıtta oluşturabileceği maksimum token sayısını kontrol eder. Daha yüksek değerler daha uzun yanıtlar sağlar, ancak token kullanımı ve maliyetleri artırır." + }, + "urlContext": { + "title": "URL Bağlamını Etkinleştir", + "description": "Yanıtlar oluşturulurken ek bağlam için Gemini'nin URL'lere erişmesine ve işlemesine izin verir. Web içeriği analizi gerektiren görevler için faydalıdır." + }, + "groundingSearch": { + "title": "Google Aramasıyla Grounding Etkinleştir", + "description": "Gemini'nin güncel bilgileri almak için Google'da arama yapmasına ve yanıtları gerçek zamanlı verilere dayandırmasına izin verir. Güncel bilgi gerektiren sorgular için kullanışlıdır." + }, + "contextLimit": { + "title": "Bağlam Sınırı", + "description": "Bağlamda dahil edilecek önceki mesajların maksimum sayısı. Daha düşük değerler token kullanımını ve maliyeti azaltır, ancak konuşmanın devamlılığını sınırlayabilir." + } + }, "googleCloudSetup": { "title": "Google Cloud Vertex AI'yi kullanmak için şunları yapmanız gerekir:", "step1": "1. Google Cloud hesabı oluşturun, Vertex AI API'sini etkinleştirin ve istediğiniz Claude modellerini etkinleştirin.", diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index edb2b386b2..9808445fd5 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -210,6 +210,32 @@ "cacheUsageNote": "Lưu ý: Nếu bạn không thấy việc sử dụng bộ nhớ đệm, hãy thử chọn một mô hình khác và sau đó chọn lại mô hình mong muốn của bạn.", "vscodeLmModel": "Mô hình ngôn ngữ", "vscodeLmWarning": "Lưu ý: Đây là tích hợp thử nghiệm và hỗ trợ nhà cung cấp có thể khác nhau. Nếu bạn nhận được lỗi về mô hình không được hỗ trợ, đó là vấn đề từ phía nhà cung cấp.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Điều chỉnh số lượng token có xác suất cao nhất được xem xét cho mỗi bước. Giá trị cao hơn tăng tính đa dạng, giá trị thấp hơn khiến đầu ra tập trung hơn và mang tính xác định." + }, + "topP": { + "title": "Top P", + "description": "Điều chỉnh xác suất tích lũy của các token được xem xét (lấy mẫu nucleus). Giá trị gần 1.0 tăng tính đa dạng, trong khi giá trị thấp hơn khiến đầu ra tập trung hơn." + }, + "maxOutputTokens": { + "title": "Token đầu ra tối đa", + "description": "Số lượng token tối đa mà mô hình có thể tạo ra trong một phản hồi. Giá trị cao hơn cho phép phản hồi dài hơn nhưng tăng mức sử dụng token và chi phí." + }, + "urlContext": { + "title": "Bật ngữ cảnh URL", + "description": "Cho phép Gemini truy cập và xử lý URL để có thêm ngữ cảnh khi tạo phản hồi. Hữu ích cho các tác vụ yêu cầu phân tích nội dung web." + }, + "groundingSearch": { + "title": "Bật grounding với tìm kiếm Google", + "description": "Cho phép Gemini tìm kiếm trên Google để lấy thông tin mới nhất và căn cứ phản hồi dựa trên dữ liệu thời gian thực. Hữu ích cho các truy vấn yêu cầu thông tin cập nhật." + }, + "contextLimit": { + "title": "Giới hạn ngữ cảnh", + "description": "Số lượng tối đa các tin nhắn trước đó được đưa vào ngữ cảnh. Giá trị thấp hơn giảm mức sử dụng token và chi phí nhưng có thể hạn chế tính liên tục của cuộc trò chuyện." + } + }, "googleCloudSetup": { "title": "Để sử dụng Google Cloud Vertex AI, bạn cần:", "step1": "1. Tạo tài khoản Google Cloud, kích hoạt Vertex AI API và kích hoạt các mô hình Claude mong muốn.", diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index 51ae2269e4..9e885d3fce 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -210,6 +210,32 @@ "cacheUsageNote": "提示:若未显示缓存使用情况,请切换模型后重新选择", "vscodeLmModel": "VSCode LM 模型", "vscodeLmWarning": "注意:这是一个非常实验性的集成,提供商支持会有所不同。如果您收到有关不支持模型的错误,则这是提供商方面的问题。", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "控制每个步骤要考虑的最高概率令牌数量。值越高,多样性越高;值越低,输出越集中和确定。" + }, + "topP": { + "title": "Top P", + "description": "控制要考虑的令牌的累积概率(核采样)。接近1.0的值增加多样性,而较低的值使输出更集中。" + }, + "maxOutputTokens": { + "title": "最大输出令牌", + "description": "模型在单个响应中可以生成的最大令牌数。值越高,响应越长,但会增加令牌使用量和成本。" + }, + "urlContext": { + "title": "启用URL上下文", + "description": "允许Gemini在生成响应时访问和处理URL以获取额外上下文。适用于需要网络内容分析的任务。" + }, + "groundingSearch": { + "title": "启用Google搜索落地", + "description": "允许Gemini在Google中搜索最新信息,并在实时数据的基础上生成响应。适用于需要最新信息的查询。" + }, + "contextLimit": { + "title": "上下文限制", + "description": "包括在上下文中的先前消息的最大数量。较低的值可减少令牌使用量和成本,但可能限制对话连续性。" + } + }, "googleCloudSetup": { "title": "要使用 Google Cloud Vertex AI,您需要:", "step1": "1. 注册Google Cloud账号并启用Vertex AI API", diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index 07544879cd..99a614c06c 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -210,6 +210,32 @@ "cacheUsageNote": "注意:如果您沒有看到快取使用情況,請嘗試選擇其他模型,然後重新選擇您想要的模型。", "vscodeLmModel": "語言模型", "vscodeLmWarning": "注意:此整合功能仍處於實驗階段,各供應商的支援程度可能不同。如果出現模型不支援的錯誤,通常是供應商方面的問題。", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "控制要考慮的最高機率代幣數量。" + }, + "topP": { + "title": "Top P", + "description": "控制生成時累積機率閾值。" + }, + "maxOutputTokens": { + "title": "最大輸出代幣", + "description": "控制模型可返回的最大代幣數量。" + }, + "urlContext": { + "title": "啟用 URL 上下文", + "description": "允許在生成期間從提供的 URL 獲取頁面內容並將其包含在上下文中。" + }, + "groundingSearch": { + "title": "啟用使用 Google 搜索進行基礎支持", + "description": "在生成期間使用 Google 搜索以獲取最新資訊並將其包含在上下文中。" + }, + "contextLimit": { + "title": "上下文限制", + "description": "生成期間要包含的最大上下文大小(以代幣為單位)。" + } + }, "googleCloudSetup": { "title": "要使用 Google Cloud Vertex AI,您需要:", "step1": "1. 建立 Google Cloud 帳戶,啟用 Vertex AI API 並啟用所需的 Claude 模型。", From 67b476285e4bab69337d56e063a35046964e29fd Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Wed, 25 Jun 2025 18:50:08 +0100 Subject: [PATCH 04/31] feat: adding `contextLimit` implementation from `maxContextWindow` PR + working with profile-specific thresholding --- src/api/providers/gemini.ts | 9 +- src/core/task/Task.ts | 5 +- .../src/components/settings/ApiOptions.tsx | 18 +- .../src/components/settings/SettingsView.tsx | 14 + .../components/settings/providers/Gemini.tsx | 394 +++++++++++++----- webview-ui/src/i18n/locales/en/settings.json | 12 + 6 files changed, 355 insertions(+), 97 deletions(-) diff --git a/src/api/providers/gemini.ts b/src/api/providers/gemini.ts index 8790682f08..1fb10749b2 100644 --- a/src/api/providers/gemini.ts +++ b/src/api/providers/gemini.ts @@ -144,9 +144,16 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl override getModel() { const modelId = this.options.apiModelId let id = modelId && modelId in geminiModels ? (modelId as GeminiModelId) : geminiDefaultModelId - const info: ModelInfo = geminiModels[id] + let info: ModelInfo = geminiModels[id] const params = getModelParams({ format: "gemini", modelId: id, model: info, settings: this.options }) + if (this.options.contextLimit) { + info = { + ...info, + contextWindow: this.options.contextLimit, + } + } + // The `:thinking` suffix indicates that the model is a "Hybrid" // reasoning model and that reasoning is required to be enabled. // The actual model ID honored by Gemini's API does not have this diff --git a/src/core/task/Task.ts b/src/core/task/Task.ts index 46da7485ed..9487600fc0 100644 --- a/src/core/task/Task.ts +++ b/src/core/task/Task.ts @@ -1706,7 +1706,10 @@ export class Task extends EventEmitter { ? this.apiConfiguration.modelMaxTokens || DEFAULT_THINKING_MODEL_MAX_TOKENS : modelInfo.maxTokens - const contextWindow = modelInfo.contextWindow + const contextWindow = + this.apiConfiguration.apiProvider === "gemini" && this.apiConfiguration.contextLimit + ? this.apiConfiguration.contextLimit + : modelInfo.contextWindow const truncateResult = await truncateConversationIfNeeded({ messages: this.apiConversationHistory, diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 8f6050f4f2..f8e4764afc 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -74,6 +74,10 @@ export interface ApiOptionsProps { fromWelcomeView?: boolean errorMessage: string | undefined setErrorMessage: React.Dispatch> + currentProfileId?: string + profileThresholds?: Record + autoCondenseContextPercent?: number + setProfileThreshold?: (profileId: string, threshold: number) => void } const ApiOptions = ({ @@ -83,6 +87,10 @@ const ApiOptions = ({ fromWelcomeView, errorMessage, setErrorMessage, + currentProfileId, + profileThresholds, + autoCondenseContextPercent, + setProfileThreshold, }: ApiOptionsProps) => { const { t } = useAppTranslation() const { organizationAllowList } = useExtensionState() @@ -411,7 +419,15 @@ const ApiOptions = ({ )} {selectedProvider === "gemini" && ( - + )} {selectedProvider === "openai" && ( diff --git a/webview-ui/src/components/settings/SettingsView.tsx b/webview-ui/src/components/settings/SettingsView.tsx index 839ce25b69..99b1407d97 100644 --- a/webview-ui/src/components/settings/SettingsView.tsx +++ b/webview-ui/src/components/settings/SettingsView.tsx @@ -233,6 +233,16 @@ const SettingsView = forwardRef(({ onDone, t }) }, []) + const setProfileThreshold = useCallback( + (profileId: string, threshold: number) => { + setCachedStateField("profileThresholds", { + ...profileThresholds, + [profileId]: threshold, + }) + }, + [profileThresholds, setCachedStateField], + ) + const setTelemetrySetting = useCallback((setting: TelemetrySetting) => { setCachedState((prevState) => { if (prevState.telemetrySetting === setting) { @@ -576,6 +586,10 @@ const SettingsView = forwardRef(({ onDone, t setApiConfigurationField={setApiConfigurationField} errorMessage={errorMessage} setErrorMessage={setErrorMessage} + currentProfileId={currentApiConfigName} + profileThresholds={profileThresholds || {}} + autoCondenseContextPercent={autoCondenseContextPercent || 75} + setProfileThreshold={setProfileThreshold} /> diff --git a/webview-ui/src/components/settings/providers/Gemini.tsx b/webview-ui/src/components/settings/providers/Gemini.tsx index 34cfd588a9..fd5051baf4 100644 --- a/webview-ui/src/components/settings/providers/Gemini.tsx +++ b/webview-ui/src/components/settings/providers/Gemini.tsx @@ -1,27 +1,101 @@ -import { useCallback, useState } from "react" +import { useCallback, useState, useMemo } from "react" import { Checkbox } from "vscrui" import { VSCodeTextField } from "@vscode/webview-ui-toolkit/react" import { Slider } from "@src/components/ui" import type { ProviderSettings } from "@roo-code/types" +import { geminiModels, geminiDefaultModelId, type GeminiModelId } from "@roo-code/types" import { useAppTranslation } from "@src/i18n/TranslationContext" import { VSCodeButtonLink } from "@src/components/common/VSCodeButtonLink" +import { vscode } from "@src/utils/vscode" import { inputEventTransform } from "../transforms" type GeminiProps = { apiConfiguration: ProviderSettings setApiConfigurationField: (field: keyof ProviderSettings, value: ProviderSettings[keyof ProviderSettings]) => void + currentModelId?: string + currentProfileId?: string + profileThresholds?: Record + autoCondenseContextPercent?: number + setProfileThreshold?: (profileId: string, threshold: number) => void } -export const Gemini = ({ apiConfiguration, setApiConfigurationField }: GeminiProps) => { +export const Gemini = ({ + apiConfiguration, + setApiConfigurationField, + currentModelId, + currentProfileId, + profileThresholds = {}, + autoCondenseContextPercent = 75, + setProfileThreshold, +}: GeminiProps) => { const { t } = useAppTranslation() const [googleGeminiBaseUrlSelected, setGoogleGeminiBaseUrlSelected] = useState( !!apiConfiguration?.googleGeminiBaseUrl, ) + const [isCustomContextLimit, setIsCustomContextLimit] = useState( + apiConfiguration?.contextLimit !== undefined && apiConfiguration?.contextLimit !== null, + ) + + const modelInfo = useMemo(() => { + const modelId = ( + currentModelId && currentModelId in geminiModels ? currentModelId : geminiDefaultModelId + ) as GeminiModelId + return geminiModels[modelId] + }, [currentModelId]) + + const getCurrentThreshold = useCallback(() => { + if (!currentProfileId) return autoCondenseContextPercent + + const profileThreshold = profileThresholds[currentProfileId] + if (profileThreshold === undefined || profileThreshold === -1) { + return autoCondenseContextPercent + } + return profileThreshold + }, [currentProfileId, profileThresholds, autoCondenseContextPercent]) + + const handleThresholdChange = useCallback( + (newThreshold: number) => { + if (!currentProfileId || !setProfileThreshold) return + + setProfileThreshold(currentProfileId, newThreshold) + + vscode.postMessage({ + type: "profileThresholds", + values: { + ...profileThresholds, + [currentProfileId]: newThreshold, + }, + }) + }, + [currentProfileId, profileThresholds, setProfileThreshold], + ) + + const getTriggerDetails = useCallback(() => { + const contextWindow = apiConfiguration?.contextLimit || modelInfo?.contextWindow || 1048576 + const threshold = getCurrentThreshold() + + const TOKEN_BUFFER_PERCENTAGE = 0.1 + const maxTokens = modelInfo?.maxTokens + const reservedTokens = maxTokens || contextWindow * 0.2 + const allowedTokens = Math.floor(contextWindow * (1 - TOKEN_BUFFER_PERCENTAGE) - reservedTokens) + + const percentageBasedTrigger = Math.floor(contextWindow * (threshold / 100)) + + return { + percentageBasedTrigger, + allowedTokens, + actualTrigger: Math.min(percentageBasedTrigger, allowedTokens), + triggerReason: allowedTokens < percentageBasedTrigger ? "token-limit" : "percentage-threshold", + maxTokens, + reservedTokens, + } + }, [apiConfiguration?.contextLimit, modelInfo, getCurrentThreshold]) + const handleInputChange = useCallback( ( field: K, @@ -51,12 +125,12 @@ export const Gemini = ({ apiConfiguration, setApiConfigurationField }: GeminiPro {t("settings:providers.getGeminiApiKey")} )} +
{ setGoogleGeminiBaseUrlSelected(checked) - if (!checked) { setApiConfigurationField("googleGeminiBaseUrl", "") } @@ -73,104 +147,236 @@ export const Gemini = ({ apiConfiguration, setApiConfigurationField }: GeminiPro /> )}
-
- -
- setApiConfigurationField("topP", values[0])} - className="flex-grow" - /> - {(apiConfiguration.topP ?? 0).toFixed(2)} -
-
- {t("settings:providers.geminiParameters.topP.description")} + +
+

Model Parameters

+ +
+ +
+ setApiConfigurationField("topP", values[0])} + className="flex-grow" + /> + {(apiConfiguration.topP ?? 0).toFixed(2)} +
+
+ {t("settings:providers.geminiParameters.topP.description")} +
-
-
- -
- setApiConfigurationField("topK", values[0])} - className="flex-grow" - /> - {apiConfiguration.topK ?? 0} + +
+ +
+ setApiConfigurationField("topK", values[0])} + className="flex-grow" + /> + {apiConfiguration.topK ?? 0} +
+
+ {t("settings:providers.geminiParameters.topK.description")} +
-
- {t("settings:providers.geminiParameters.topK.description")} + +
+ +
+ setApiConfigurationField("maxOutputTokens", values[0])} + className="flex-grow" + /> + parseInt((e as any).target.value, 10))} + className="w-16" + /> +
+
+ {t("settings:providers.geminiParameters.maxOutputTokens.description")} +
-
- -
- setApiConfigurationField("maxOutputTokens", values[0])} - className="flex-grow" - /> - parseInt((e as any).target.value, 10))} - className="w-16" - /> -
-
- {t("settings:providers.geminiParameters.maxOutputTokens.description")} + +
+

{t("settings:providers.geminiContextManagement.title")}

+
+ { + setIsCustomContextLimit(checked) + if (!checked) { + setApiConfigurationField("contextLimit", null) + } else { + setApiConfigurationField( + "contextLimit", + apiConfiguration.contextLimit ?? modelInfo?.contextWindow ?? 1048576, + ) + } + }}> + + +
+ {t("settings:providers.geminiContextManagement.description")} +
+ +
+ {t("settings:providers.geminiContextManagement.modelDefault")}:{" "} + {(modelInfo?.contextWindow || 1048576).toLocaleString()} tokens +
+ + {isCustomContextLimit && ( +
+
+
+ setApiConfigurationField("contextLimit", value)} + /> + + parseInt((e as any).target.value, 10), + )} + className="w-24" + /> + tokens +
+
+
+ )}
+ + {currentProfileId && ( +
+ +
+ Context condensing threshold for this Gemini profile. When context reaches this percentage, + it will be automatically condensed. +
+ +
+ handleThresholdChange(value)} + className="flex-grow" + /> + { + const value = parseInt((e.target as HTMLInputElement).value, 10) + if (!isNaN(value) && value >= 5 && value <= 100) { + handleThresholdChange(value) + } + }} + className="w-16" + /> + % +
+ +
+ {(() => { + const details = getTriggerDetails() + return ( + <> +
+ Condensing will trigger at:{" "} + {details.actualTrigger.toLocaleString()} tokens + {details.triggerReason === "token-limit" && ( + + (due to token limit, not percentage) + + )} +
+
+ Available context window:{" "} + {( + apiConfiguration?.contextLimit || + modelInfo?.contextWindow || + 1048576 + ).toLocaleString()}{" "} + tokens +
+
+
+ • Percentage trigger: {details.percentageBasedTrigger.toLocaleString()}{" "} + tokens ({getCurrentThreshold()}%) +
+
+ • Token limit trigger: {details.allowedTokens.toLocaleString()} tokens +
+
+ •{" "} + + Actual trigger: {details.actualTrigger.toLocaleString()} tokens + +
+
+ + ) + })()} +
+
+ )}
- setApiConfigurationField("enableUrlContext", checked)}> - {t("settings:providers.geminiParameters.urlContext.title")} - -
- {t("settings:providers.geminiParameters.urlContext.description")} -
- setApiConfigurationField("enableGrounding", checked)}> - {t("settings:providers.geminiParameters.groundingSearch.title")} - -
- {t("settings:providers.geminiParameters.groundingSearch.description")} -
-
- -
- setApiConfigurationField("contextLimit", values[0])} - className="flex-grow" - /> - parseInt((e as any).target.value, 10))} - className="w-16" - /> + +
+

Advanced Features

+ + setApiConfigurationField("enableUrlContext", checked)}> + {t("settings:providers.geminiParameters.urlContext.title")} + +
+ {t("settings:providers.geminiParameters.urlContext.description")}
-
- {t("settings:providers.geminiParameters.contextLimit.description")} + + setApiConfigurationField("enableGrounding", checked)}> + {t("settings:providers.geminiParameters.groundingSearch.title")} + +
+ {t("settings:providers.geminiParameters.groundingSearch.description")}
diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index 5a4b23b6b2..eb59833a84 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -236,6 +236,18 @@ "description": "Maximum number of previous messages to include in context. Lower values reduce token usage and costs but may limit conversation continuity." } }, + "geminiContextManagement": { + "title": "Gemini Context Management", + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window. When conversations approach this limit, Roo Code will automatically condense older messages.", + "modelDefault": "Model's default context window", + "condensingThreshold": { + "title": "Context Condensing Threshold", + "description": "Automatically condense context when it reaches this percentage of the context window.", + "triggerAt": "Condensing will trigger at", + "availableContext": "Available context window" + } + }, "googleCloudSetup": { "title": "To use Google Cloud Vertex AI, you need to:", "step1": "1. Create a Google Cloud account, enable the Vertex AI API & enable the desired Claude models.", From 9595f76a0de66f26bd4e754205da43b9662d8b1f Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Thu, 26 Jun 2025 00:37:56 +0100 Subject: [PATCH 05/31] feat: max value for context limit to model's limit + converting description and titles to settings for translation purposes --- .../components/settings/providers/Gemini.tsx | 78 +++++++++++++------ webview-ui/src/i18n/locales/en/settings.json | 16 +++- 2 files changed, 68 insertions(+), 26 deletions(-) diff --git a/webview-ui/src/components/settings/providers/Gemini.tsx b/webview-ui/src/components/settings/providers/Gemini.tsx index fd5051baf4..b9e3937515 100644 --- a/webview-ui/src/components/settings/providers/Gemini.tsx +++ b/webview-ui/src/components/settings/providers/Gemini.tsx @@ -149,7 +149,7 @@ export const Gemini = ({
-

Model Parameters

+

{t("settings:providers.geminiSections.modelParameters")}

-

{t("settings:providers.geminiContextManagement.title")}

+

+ {t("settings:providers.geminiSections.geminiContextManagement")} +

{t("settings:providers.geminiContextManagement.modelDefault")}:{" "} - {(modelInfo?.contextWindow || 1048576).toLocaleString()} tokens + {(modelInfo?.contextWindow || 1048576).toLocaleString()}{" "} + {t("settings:providers.geminiContextManagement.condensingThreshold.tokenLimitTriggered")}
{isCustomContextLimit && ( @@ -253,7 +256,7 @@ export const Gemini = ({
setApiConfigurationField("contextLimit", value)} @@ -271,7 +274,11 @@ export const Gemini = ({ )} className="w-24" /> - tokens + + {t( + "settings:providers.geminiContextManagement.condensingThreshold.tokenLimitTriggered", + )} +
@@ -284,8 +291,7 @@ export const Gemini = ({ {t("settings:providers.geminiContextManagement.condensingThreshold.title")}
- Context condensing threshold for this Gemini profile. When context reaches this percentage, - it will be automatically condensed. + {t("settings:providers.geminiContextManagement.condensingThreshold.description")}
@@ -318,36 +324,62 @@ export const Gemini = ({ return ( <>
- Condensing will trigger at:{" "} - {details.actualTrigger.toLocaleString()} tokens + + {t( + "settings:providers.geminiContextManagement.condensingThreshold.condensingtriggerAt", + )} + : + {" "} + {details.actualTrigger.toLocaleString()}{" "} + {t("settings:providers.geminiContextManagement.condensingThreshold.tokens")} {details.triggerReason === "token-limit" && ( - (due to token limit, not percentage) + ( + {t( + "settings:providers.geminiContextManagement.condensingThreshold.tokenLimitTriggered", + )} + ) )}
- Available context window:{" "} + + {t( + "settings:providers.geminiContextManagement.condensingThreshold.availableContext", + )} + : + {" "} {( apiConfiguration?.contextLimit || modelInfo?.contextWindow || 1048576 ).toLocaleString()}{" "} - tokens + {t("settings:providers.geminiContextManagement.condensingThreshold.tokens")}
-
-
- • Percentage trigger: {details.percentageBasedTrigger.toLocaleString()}{" "} - tokens ({getCurrentThreshold()}%) -
+
- • Token limit trigger: {details.allowedTokens.toLocaleString()} tokens + + {t( + "settings:providers.geminiContextManagement.condensingThreshold.tokenLimitTrigger", + )} + : + {" "} + {details.allowedTokens.toLocaleString()}{" "} + {t( + "settings:providers.geminiContextManagement.condensingThreshold.tokens", + )}
- •{" "} - Actual trigger: {details.actualTrigger.toLocaleString()} tokens - + {t( + "settings:providers.geminiContextManagement.condensingThreshold.actualTrigger", + )} + : + {" "} + {details.actualTrigger.toLocaleString()}{" "} + {t( + "settings:providers.geminiContextManagement.condensingThreshold.tokens", + )}
@@ -359,7 +391,9 @@ export const Gemini = ({
-

Advanced Features

+

+ {t("settings:providers.geminiSections.advancedFeatures")} +

Date: Thu, 26 Jun 2025 01:35:27 +0100 Subject: [PATCH 06/31] feat: all languages translated --- .../components/settings/providers/Gemini.tsx | 4 +--- webview-ui/src/i18n/locales/ca/settings.json | 20 +++++++++++++++++++ webview-ui/src/i18n/locales/de/settings.json | 20 +++++++++++++++++++ webview-ui/src/i18n/locales/es/settings.json | 20 +++++++++++++++++++ webview-ui/src/i18n/locales/fr/settings.json | 20 +++++++++++++++++++ webview-ui/src/i18n/locales/hi/settings.json | 20 +++++++++++++++++++ webview-ui/src/i18n/locales/id/settings.json | 20 +++++++++++++++++++ webview-ui/src/i18n/locales/it/settings.json | 20 +++++++++++++++++++ webview-ui/src/i18n/locales/ja/settings.json | 20 +++++++++++++++++++ webview-ui/src/i18n/locales/ko/settings.json | 20 +++++++++++++++++++ webview-ui/src/i18n/locales/nl/settings.json | 20 +++++++++++++++++++ webview-ui/src/i18n/locales/pl/settings.json | 20 +++++++++++++++++++ .../src/i18n/locales/pt-BR/settings.json | 20 +++++++++++++++++++ webview-ui/src/i18n/locales/ru/settings.json | 20 +++++++++++++++++++ webview-ui/src/i18n/locales/tr/settings.json | 20 +++++++++++++++++++ webview-ui/src/i18n/locales/vi/settings.json | 20 +++++++++++++++++++ .../src/i18n/locales/zh-CN/settings.json | 20 +++++++++++++++++++ .../src/i18n/locales/zh-TW/settings.json | 20 +++++++++++++++++++ 18 files changed, 341 insertions(+), 3 deletions(-) diff --git a/webview-ui/src/components/settings/providers/Gemini.tsx b/webview-ui/src/components/settings/providers/Gemini.tsx index b9e3937515..8f3f2b536b 100644 --- a/webview-ui/src/components/settings/providers/Gemini.tsx +++ b/webview-ui/src/components/settings/providers/Gemini.tsx @@ -275,9 +275,7 @@ export const Gemini = ({ className="w-24" /> - {t( - "settings:providers.geminiContextManagement.condensingThreshold.tokenLimitTriggered", - )} + {t("settings:providers.geminiContextManagement.condensingThreshold.tokens")}
diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index e5b7ecce9f..0e76c60e19 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -236,6 +236,26 @@ "description": "Nombre màxim de missatges anteriors a incloure en el context. Valors més baixos redueixen l’ús de tokens i els costos, però poden limitar la continuïtat de la conversa." } }, + "geminiSections": { + "modelParameters": "Paràmetres del model", + "advancedFeatures": "Funcions avançades", + "geminiContextManagement": "Gestió de context de Gemini" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Utilitzar límit de finestra de context personalitzada", + "description": "Sobrescriviu la finestra de context predeterminada del model. Quan la conversa s'acosti a aquest límit, Roo Code condensarà automàticament els missatges més antics.", + "modelDefault": "Finestra de context predeterminada del model", + "condensingThreshold": { + "tokens": "tokens", + "title": "Umbral de condensació de context", + "description": "Umbral de condensació de context per a aquest perfil de Gemini. Quan el context arribi a aquest percentatge, es condensarà automàticament.", + "condensingtriggerAt": "La condensació s'activarà a", + "tokenLimitTriggered": "a causa del límit de tokens, no del percentatge", + "availableContext": "Finestra de context disponible", + "tokenLimitTrigger": "Activació per límit de tokens", + "actualTrigger": "Activació real" + } + }, "googleCloudSetup": { "title": "Per utilitzar Google Cloud Vertex AI, necessiteu:", "step1": "1. Crear un compte de Google Cloud, habilitar l'API de Vertex AI i habilitar els models Claude necessaris.", diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index bf298b0904..ee96ff9f29 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -236,6 +236,26 @@ "description": "Maximale Anzahl vorheriger Nachrichten, die in den Kontext einbezogen werden. Niedrigere Werte reduzieren den Tokenverbrauch und die Kosten, können jedoch die Kontinuität der Konversation einschränken." } }, + "geminiSections": { + "modelParameters": "Model Parameter", + "advancedFeatures": "Erweiterte Funktionen", + "geminiContextManagement": "Gemini-Kontextverwaltung" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Benutzerdefiniertes Kontextfensterlimit verwenden", + "description": "Überschreibt das Standard-Kontextfenster des Modells. Wenn Konversationen dieses Limit erreichen, fasst Roo Code ältere Nachrichten automatisch zusammen.", + "modelDefault": "Standard-Kontextfenster des Modells", + "condensingThreshold": { + "tokens": "Tokens", + "title": "Schwellwert für Kontextverdichtung", + "description": "Kontextverdichtungs-Schwellenwert für dieses Gemini-Profil. Wenn der Kontext diesen Prozentsatz erreicht, wird automatisch verdichtet.", + "condensingtriggerAt": "Verdichtung wird ausgelöst bei", + "tokenLimitTriggered": "aufgrund der Token-Grenze, nicht des Prozentsatzes", + "availableContext": "Verfügbares Kontextfenster", + "tokenLimitTrigger": "Auslösung bei Token-Grenze", + "actualTrigger": "Tatsächliche Auslösung" + } + }, "googleCloudSetup": { "title": "Um Google Cloud Vertex AI zu verwenden, müssen Sie:", "step1": "1. Ein Google Cloud-Konto erstellen, die Vertex AI API aktivieren & die gewünschten Claude-Modelle aktivieren.", diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index 3fc158d362..bdbc3136e0 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -236,6 +236,26 @@ "description": "Número máximo de mensajes anteriores que se incluirán en el contexto. Valores más bajos reducen el uso de tokens y los costos, pero pueden limitar la continuidad de la conversación." } }, + "geminiSections": { + "modelParameters": "Parámetros del modelo", + "advancedFeatures": "Funciones avanzadas", + "geminiContextManagement": "Gestión de contexto de Gemini" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Usar límite de ventana de contexto personalizado", + "description": "Anular la ventana de contexto predeterminada del modelo. Cuando las conversaciones se acerquen a este límite, Roo Code condensará automáticamente los mensajes más antiguos.", + "modelDefault": "Ventana de contexto predeterminada del modelo", + "condensingThreshold": { + "tokens": "tokens", + "title": "Umbral de condensación de contexto", + "description": "Umbral de condensación de contexto para este perfil de Gemini. Cuando el contexto alcance este porcentaje, se condensará automáticamente.", + "condensingtriggerAt": "La condensación se activará a", + "tokenLimitTriggered": "debido al límite de tokens, no porcentaje", + "availableContext": "Ventana de contexto disponible", + "tokenLimitTrigger": "Activador de límite de tokens", + "actualTrigger": "Activación real" + } + }, "googleCloudSetup": { "title": "Para usar Google Cloud Vertex AI, necesita:", "step1": "1. Crear una cuenta de Google Cloud, habilitar la API de Vertex AI y habilitar los modelos Claude deseados.", diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index ba137af807..6a5be2a831 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -236,6 +236,26 @@ "description": "Nombre maximum de messages précédents à inclure dans le contexte. Des valeurs plus faibles réduisent l'utilisation des tokens et les coûts, mais peuvent limiter la continuité de la conversation." } }, + "geminiSections": { + "modelParameters": "Paramètres du modèle", + "advancedFeatures": "Fonctionnalités avancées", + "geminiContextManagement": "Gestion du contexte Gemini" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Utiliser la limite de fenêtre de contexte personnalisée", + "description": "Remplace la fenêtre de contexte par défaut du modèle. Lorsque les conversations atteignent cette limite, Roo Code condensera automatiquement les anciens messages.", + "modelDefault": "Fenêtre de contexte par défaut du modèle", + "condensingThreshold": { + "tokens": "jetons", + "title": "Seuil de condensation du contexte", + "description": "Seuil de condensation du contexte pour ce profil Gemini. Lorsque le contexte atteint ce pourcentage, il sera automatiquement condensé.", + "condensingtriggerAt": "La condensation se déclenchera à", + "tokenLimitTriggered": "en raison de la limite de jetons, non du pourcentage", + "availableContext": "Fenêtre de contexte disponible", + "tokenLimitTrigger": "Déclencheur de limite de jetons", + "actualTrigger": "Déclenchement réel" + } + }, "googleCloudSetup": { "title": "Pour utiliser Google Cloud Vertex AI, vous devez :", "step1": "1. Créer un compte Google Cloud, activer l'API Vertex AI et activer les modèles Claude souhaités.", diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index f6a0d2a595..c16405d403 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -236,6 +236,26 @@ "description": "संदर्भ में शामिल करने के लिए पिछले संदेशों की अधिकतम संख्या। निम्न मान टोकन उपयोग और लागत कम करते हैं, लेकिन बातचीत की निरंतरता सीमित कर सकते हैं।" } }, + "geminiSections": { + "modelParameters": "मॉडल पैरामीटर", + "advancedFeatures": "उन्नत सुविधाएँ", + "geminiContextManagement": "Gemini संदर्भ प्रबंधन" + }, + "geminiContextManagement": { + "useCustomContextWindow": "कस्टम संदर्भ विंडो सीमा का उपयोग करें", + "description": "मॉडल की डिफ़ॉल्ट संदर्भ विंडो को ओवरराइड करें। जब वार्तालाप इस सीमा के करीब पहुंचती है, तो Roo Code पुराने संदेशों को स्वचालित रूप से संक्षिप्त करेगा।", + "modelDefault": "मॉडल की डिफ़ॉल्ट संदर्भ विंडो", + "condensingThreshold": { + "tokens": "टोकन", + "title": "संदर्भ संक्षेपण थ्रेशोल्ड", + "description": "इस Gemini प्रोफ़ाइल के लिए संदर्भ संक्षेपण थ्रेशोल्ड। जब संदर्भ इस प्रतिशत तक पहुंचता है, तो इसे स्वचालित रूप से संक्षेपित किया जाएगा।", + "condensingtriggerAt": "संकुचन इस पर ट्रिगर होगा", + "tokenLimitTriggered": "टोकन सीमा के कारण, प्रतिशत नहीं", + "availableContext": "उपलब्ध संदर्भ विंडो", + "tokenLimitTrigger": "टोकन सीमा ट्रिगर", + "actualTrigger": "वास्तविक ट्रिगर" + } + }, "googleCloudSetup": { "title": "Google Cloud Vertex AI का उपयोग करने के लिए, आपको आवश्यकता है:", "step1": "1. Google Cloud खाता बनाएं, Vertex AI API सक्षम करें और वांछित Claude मॉडल सक्षम करें।", diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index 9675fa738a..b3a226f405 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -240,6 +240,26 @@ "description": "Jumlah maksimum pesan sebelumnya yang disertakan dalam konteks. Nilai lebih rendah mengurangi penggunaan token dan biaya tetapi dapat membatasi kelanjutan percakapan." } }, + "geminiSections": { + "modelParameters": "Parameter Model", + "advancedFeatures": "Fitur Lanjutan", + "geminiContextManagement": "Manajemen Konteks Gemini" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Gunakan batas jendela konteks kustom", + "description": "Ganti jendela konteks default model. Ketika percakapan mendekati batas ini, Roo Code akan secara otomatis merangkum pesan yang lebih lama.", + "modelDefault": "Jendela konteks default model", + "condensingThreshold": { + "tokens": "token", + "title": "Ambang Pemadatan Konteks", + "description": "Ambang pemadatan konteks untuk profil Gemini ini. Ketika konteks mencapai persentase ini, akan dipadatkan secara otomatis.", + "condensingtriggerAt": "Pemadatan akan terjadi pada", + "tokenLimitTriggered": "karena batas token, bukan persentase", + "availableContext": "Jendela konteks tersedia", + "tokenLimitTrigger": "Pemicu batas token", + "actualTrigger": "Pemicu aktual" + } + }, "googleCloudSetup": { "title": "Untuk menggunakan Google Cloud Vertex AI, kamu perlu:", "step1": "1. Buat akun Google Cloud, aktifkan Vertex AI API & aktifkan model Claude yang diinginkan.", diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index 159b975cbc..8f375bbc1d 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -236,6 +236,26 @@ "description": "Numero massimo di messaggi precedenti da includere nel contesto. Valori più bassi riducono l'utilizzo dei token e i costi ma possono limitare la continuità della conversazione." } }, + "geminiSections": { + "modelParameters": "Parametri del modello", + "advancedFeatures": "Funzionalità avanzate", + "geminiContextManagement": "Gestione del contesto Gemini" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Usa limite di finestra di contesto personalizzato", + "description": "Sovrascrive l'intervallo di contesto predefinito del modello. Quando le conversazioni raggiungono questo limite, Roo Code condenserà automaticamente i messaggi meno recenti.", + "modelDefault": "Intervallo di contesto predefinito del modello", + "condensingThreshold": { + "tokens": "token", + "title": "Soglia di condensazione del contesto", + "description": "Soglia di condensazione del contesto per questo profilo Gemini. Quando il contesto raggiunge questa percentuale, verrà automaticamente condensato.", + "condensingtriggerAt": "La condensazione si innescherà a", + "tokenLimitTriggered": "attivato a causa del limite di token, non della percentuale", + "availableContext": "Intervallo di contesto disponibile", + "tokenLimitTrigger": "Innesco del limite di token", + "actualTrigger": "Innesco effettivo" + } + }, "googleCloudSetup": { "title": "Per utilizzare Google Cloud Vertex AI, è necessario:", "step1": "1. Creare un account Google Cloud, abilitare l'API Vertex AI e abilitare i modelli Claude desiderati.", diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index 1944f6e324..c77e2f0c97 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -236,6 +236,26 @@ "description": "コンテキストに含める過去のメッセージの最大数。値を小さくするとトークン使用量とコストが削減されますが、会話の連続性が制限される場合があります。" } }, + "geminiSections": { + "modelParameters": "モデルパラメータ", + "advancedFeatures": "高度な機能", + "geminiContextManagement": "Gemini コンテキスト管理" + }, + "geminiContextManagement": { + "useCustomContextWindow": "カスタムコンテキストウィンドウ制限を使用", + "description": "モデルのデフォルトのコンテキストウィンドウを上書きします。この制限に近づくと、Roo Codeは古いメッセージを自動的に要約します。", + "modelDefault": "モデルのデフォルトのコンテキストウィンドウ", + "condensingThreshold": { + "tokens": "トークン", + "title": "コンテキスト圧縮しきい値", + "description": "このGeminiプロファイルのコンテキスト圧縮しきい値です。このパーセンテージに達すると、自動的に圧縮されます。", + "condensingtriggerAt": "圧縮は次の時にトリガーされます", + "tokenLimitTriggered": "トークン制限が原因で、パーセンテージではありません", + "availableContext": "利用可能なコンテキストウィンドウ", + "tokenLimitTrigger": "トークン制限トリガー", + "actualTrigger": "実際のトリガー" + } + }, "googleCloudSetup": { "title": "Google Cloud Vertex AIを使用するには:", "step1": "1. Google Cloudアカウントを作成し、Vertex AI APIを有効にして、希望するClaudeモデルを有効にします。", diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index aee95a8b66..924b634fc8 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -236,6 +236,26 @@ "description": "컨텍스트에 포함할 이전 메시지의 최대 수입니다. 낮은 값은 토큰 사용량과 비용을 줄이지만 대화 연속성이 제한될 수 있습니다." } }, + "geminiSections": { + "modelParameters": "모델 매개변수", + "advancedFeatures": "고급 기능", + "geminiContextManagement": "Gemini 컨텍스트 관리" + }, + "geminiContextManagement": { + "useCustomContextWindow": "사용자 지정 컨텍스트 창 제한 사용", + "description": "모델의 기본 컨텍스트 창을 재정의합니다. 이 제한에 가까워지면 Roo Code는 이전 메시지를 자동으로 요약합니다。", + "modelDefault": "모델의 기본 컨텍스트 창", + "condensingThreshold": { + "tokens": "토큰", + "title": "컨텍스트 압축 임계값", + "description": "이 Gemini 프로필의 컨텍스트 압축 임계값입니다. 컨텍스트가 이 백분율에 도달하면 자동으로 압축됩니다。", + "condensingtriggerAt": "압축은 다음에서 트리거됩니다", + "tokenLimitTriggered": "백분율이 아닌 토큰 제한으로 인해 트리거되었습니다", + "availableContext": "사용 가능한 컨텍스트 창", + "tokenLimitTrigger": "토큰 제한 트리거", + "actualTrigger": "실제 트리거" + } + }, "googleCloudSetup": { "title": "Google Cloud Vertex AI를 사용하려면:", "step1": "1. Google Cloud 계정을 만들고, Vertex AI API를 활성화하고, 원하는 Claude 모델을 활성화하세요.", diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index 315c148810..13be3d7671 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -236,6 +236,26 @@ "description": "Maximaal aantal vorige berichten dat in de context wordt opgenomen. Lagere waarden verlagen het tokengebruik en de kosten, maar kunnen de continuïteit van het gesprek beperken." } }, + "geminiSections": { + "modelParameters": "Modelparameters", + "advancedFeatures": "Geavanceerde functies", + "geminiContextManagement": "Gemini-contextbeheer" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Gebruik aangepast contextvensterlimiet", + "description": "Dit overschrijft het standaard contextvenster van het model. Wanneer gesprekken dit limiet naderen, zal Roo Code automatisch oudere berichten samenvatten.", + "modelDefault": "Standaard contextvenster van het model", + "condensingThreshold": { + "tokens": "tokens", + "title": "Drempel voor contextsamentrekking", + "description": "Drempel voor contextsamentrekking voor dit Gemini-profiel. Wanneer de context dit percentage bereikt, wordt het automatisch samengevat.", + "condensingtriggerAt": "Samentrekking wordt geactiveerd bij", + "tokenLimitTriggered": "vanwege tokenlimiet, niet percentage", + "availableContext": "Beschikbaar contextvenster", + "tokenLimitTrigger": "Tokenlimiet-trigger", + "actualTrigger": "Werkelijke trigger" + } + }, "googleCloudSetup": { "title": "Om Google Cloud Vertex AI te gebruiken, moet je:", "step1": "1. Maak een Google Cloud-account aan, schakel de Vertex AI API in en activeer de gewenste Claude-modellen.", diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index 30046f2c34..62ea465eee 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -236,6 +236,26 @@ "description": "Maksymalna liczba poprzednich wiadomości uwzględnianych w kontekście. Niższe wartości zmniejszają użycie tokenów i koszty, ale mogą ograniczać ciągłość rozmowy." } }, + "geminiSections": { + "modelParameters": "Parametry modelu", + "advancedFeatures": "Zaawansowane funkcje", + "geminiContextManagement": "Zarządzanie kontekstem Gemini" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Użyj niestandardowego limitu okna kontekstu", + "description": "Nadpisuje domyślne okno kontekstu modelu. Gdy rozmowy zbliżą się do tego limitu, Roo Code automatycznie skróci starsze wiadomości.", + "modelDefault": "Domyślne okno kontekstu modelu", + "condensingThreshold": { + "tokens": "tokeny", + "title": "Próg kondensacji kontekstu", + "description": "Próg kondensacji kontekstu dla tego profilu Gemini. Gdy kontekst osiągnie ten procent, zostanie automatycznie skrócony.", + "condensingtriggerAt": "Kondensacja zostanie wyzwolona przy", + "tokenLimitTriggered": "wyzwolone z powodu limitu tokenów, a nie procentu", + "availableContext": "Dostępne okno kontekstu", + "tokenLimitTrigger": "Wyzwalacz limitu tokenów", + "actualTrigger": "Rzeczywisty wyzwalacz" + } + }, "googleCloudSetup": { "title": "Aby korzystać z Google Cloud Vertex AI, potrzebujesz:", "step1": "1. Utworzyć konto Google Cloud, włączyć API Vertex AI i włączyć żądane modele Claude.", diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index e8755cc969..76daad7d87 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -236,6 +236,26 @@ "description": "Número máximo de mensagens anteriores a incluir no contexto. Valores mais baixos reduzem o uso de tokens e os custos, mas podem limitar a continuidade da conversa." } }, + "geminiSections": { + "modelParameters": "Parâmetros do modelo", + "advancedFeatures": "Recursos avançados", + "geminiContextManagement": "Gerenciamento de contexto do Gemini" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Usar limite de janela de contexto personalizado", + "description": "Substitua a janela de contexto padrão do modelo. Quando as conversas se aproximarem desse limite, o Roo Code irá condensar automaticamente as mensagens mais antigas.", + "modelDefault": "Janela de contexto padrão do modelo", + "condensingThreshold": { + "tokens": "tokens", + "title": "Limite de condensação de contexto", + "description": "Limite de condensação de contexto para este perfil Gemini. Quando o contexto atingir essa porcentagem, ele será automaticamente condensado.", + "condensingtriggerAt": "A condensação será acionada em", + "tokenLimitTriggered": "devido ao limite de tokens, não porcentagem", + "availableContext": "Janela de contexto disponível", + "tokenLimitTrigger": "Acionador de limite de tokens", + "actualTrigger": "Acionador real" + } + }, "googleCloudSetup": { "title": "Para usar o Google Cloud Vertex AI, você precisa:", "step1": "1. Criar uma conta Google Cloud, ativar a API Vertex AI e ativar os modelos Claude desejados.", diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index 46f3120267..6710c3985b 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -236,6 +236,26 @@ "description": "Максимальное число предыдущих сообщений, включаемых в контекст. Более низкие значения снижают использование токенов и стоимость, но могут ограничить непрерывность разговора." } }, + "geminiSections": { + "modelParameters": "Параметры модели", + "advancedFeatures": "Расширенные функции", + "geminiContextManagement": "Управление контекстом Gemini" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Использовать ограничение окна контекста", + "description": "Переопределяет окно контекста по умолчанию модели. Когда разговоры приближаются к этому пределу, Roo Code автоматически будет конденсировать старые сообщения.", + "modelDefault": "Окно контекста по умолчанию модели", + "condensingThreshold": { + "tokens": "токены", + "title": "Порог конденсации контекста", + "description": "Порог конденсации контекста для этого профиля Gemini. Когда контекст достигает этого процента, он будет автоматически конденсироваться.", + "condensingtriggerAt": "Конденсация будет запущена при", + "tokenLimitTriggered": "из-за ограничения токенов, а не процента", + "availableContext": "Доступное окно контекста", + "tokenLimitTrigger": "Триггер лимита токенов", + "actualTrigger": "Фактический триггер" + } + }, "googleCloudSetup": { "title": "Для использования Google Cloud Vertex AI необходимо:", "step1": "1. Создайте аккаунт Google Cloud, включите Vertex AI API и нужные модели Claude.", diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 7243921a11..1936ce6e8a 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -236,6 +236,26 @@ "description": "Bağlamda dahil edilecek önceki mesajların maksimum sayısı. Daha düşük değerler token kullanımını ve maliyeti azaltır, ancak konuşmanın devamlılığını sınırlayabilir." } }, + "geminiSections": { + "modelParameters": "Model Parametreleri", + "advancedFeatures": "Gelişmiş Özellikler", + "geminiContextManagement": "Gemini Bağlam Yönetimi" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Özel bağlam penceresi sınırı kullan", + "description": "Modelin varsayılan bağlam penceresini geçersiz kılar. Konuşmalar bu sınıra yaklaştığında, Roo Code eski mesajları otomatik olarak yoğunlaştırır.", + "modelDefault": "Modelin varsayılan bağlam penceresi", + "condensingThreshold": { + "tokens": "jetonlar", + "title": "Bağlam Yoğunlaştırma Eşiği", + "description": "Bu Gemini profili için bağlam yoğunlaştırma eşiği. Bağlam bu yüzdede olduğunda, otomatik olarak yoğunlaştırılır.", + "condensingtriggerAt": "Yoğunlaştırma şu anda tetiklenecek", + "tokenLimitTriggered": "yüzde değil jeton sınırı nedeniyle tetiklendi", + "availableContext": "Kullanılabilir bağlam penceresi", + "tokenLimitTrigger": "Jeton sınırı tetikleyicisi", + "actualTrigger": "Gerçek tetikleyici" + } + }, "googleCloudSetup": { "title": "Google Cloud Vertex AI'yi kullanmak için şunları yapmanız gerekir:", "step1": "1. Google Cloud hesabı oluşturun, Vertex AI API'sini etkinleştirin ve istediğiniz Claude modellerini etkinleştirin.", diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index d3b944aa8b..04ea88801f 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -236,6 +236,26 @@ "description": "Số lượng tối đa các tin nhắn trước đó được đưa vào ngữ cảnh. Giá trị thấp hơn giảm mức sử dụng token và chi phí nhưng có thể hạn chế tính liên tục của cuộc trò chuyện." } }, + "geminiSections": { + "modelParameters": "Tham số mô hình", + "advancedFeatures": "Tính năng nâng cao", + "geminiContextManagement": "Quản lý ngữ cảnh Gemini" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Sử dụng giới hạn cửa sổ ngữ cảnh tùy chỉnh", + "description": "Ghi đè cửa sổ ngữ cảnh mặc định của mô hình. Khi cuộc trò chuyện tiếp cận giới hạn này, Roo Code sẽ tự động cô đọng các tin nhắn cũ hơn.", + "modelDefault": "Cửa sổ ngữ cảnh mặc định của mô hình", + "condensingThreshold": { + "tokens": "token", + "title": "Ngưỡng cô đọng ngữ cảnh", + "description": "Ngưỡng cô đọng ngữ cảnh cho hồ sơ Gemini này. Khi ngữ cảnh đạt đến tỷ lệ phần trăm này, nó sẽ tự động được cô đọng.", + "condensingtriggerAt": "Cô đọng sẽ kích hoạt ở", + "tokenLimitTriggered": "do giới hạn token, không phải tỷ lệ phần trăm", + "availableContext": "Cửa sổ ngữ cảnh khả dụng", + "tokenLimitTrigger": "Kích hoạt giới hạn token", + "actualTrigger": "Kích hoạt thực tế" + } + }, "googleCloudSetup": { "title": "Để sử dụng Google Cloud Vertex AI, bạn cần:", "step1": "1. Tạo tài khoản Google Cloud, kích hoạt Vertex AI API và kích hoạt các mô hình Claude mong muốn.", diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index 68612fb382..6675582ab8 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -236,6 +236,26 @@ "description": "包括在上下文中的先前消息的最大数量。较低的值可减少令牌使用量和成本,但可能限制对话连续性。" } }, + "geminiSections": { + "modelParameters": "模型参数", + "advancedFeatures": "高级功能", + "geminiContextManagement": "Gemini 上下文管理" + }, + "geminiContextManagement": { + "useCustomContextWindow": "使用自定义上下文窗口限制", + "description": "覆盖模型的默认上下文窗口限制。当对话接近此限制时,Roo Code 会自动压缩较旧的消息。", + "modelDefault": "模型的默认上下文窗口限制", + "condensingThreshold": { + "tokens": "令牌", + "title": "上下文压缩阈值", + "description": "此 Gemini 配置文件的上下文压缩阈值。当上下文达到此百分比时,将自动压缩。", + "condensingtriggerAt": "压缩将在达到时触发", + "tokenLimitTriggered": "由于令牌限制触发,而非百分比", + "availableContext": "可用上下文窗口", + "tokenLimitTrigger": "令牌限制触发", + "actualTrigger": "实际触发" + } + }, "googleCloudSetup": { "title": "要使用 Google Cloud Vertex AI,您需要:", "step1": "1. 注册Google Cloud账号并启用Vertex AI API", diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index 4dfab4910b..5456e7bbb7 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -236,6 +236,26 @@ "description": "生成期間要包含的最大上下文大小(以代幣為單位)。" } }, + "geminiSections": { + "modelParameters": "模型參數", + "advancedFeatures": "進階功能", + "geminiContextManagement": "Gemini 上下文管理" + }, + "geminiContextManagement": { + "useCustomContextWindow": "使用自訂上下文視窗限制", + "description": "覆寫模型的預設上下文視窗限制。當對話接近此限制時,Roo Code 將自動濃縮較舊訊息。", + "modelDefault": "模型的預設上下文視窗限制", + "condensingThreshold": { + "tokens": "代幣", + "title": "上下文濃縮閾值", + "description": "此 Gemini 設定檔的上下文濃縮閾值。當上下文達到此百分比時,將自動濃縮。", + "condensingtriggerAt": "濃縮將在以下條件觸發", + "tokenLimitTriggered": "因代幣限制而非百分比", + "availableContext": "可用上下文視窗", + "tokenLimitTrigger": "代幣限制觸發", + "actualTrigger": "實際觸發" + } + }, "googleCloudSetup": { "title": "要使用 Google Cloud Vertex AI,您需要:", "step1": "1. 建立 Google Cloud 帳戶,啟用 Vertex AI API 並啟用所需的 Claude 模型。", From 8f468f49b06ea43657056db971bf530732ee544c Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Thu, 26 Jun 2025 19:08:38 +0100 Subject: [PATCH 07/31] feat: changing profile-specific threshold in context management setting will also change in Gemini context management - sync between Context Management Settting <-> Gemini Context Management with regards to thresholding --- webview-ui/src/components/settings/SettingsView.tsx | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/webview-ui/src/components/settings/SettingsView.tsx b/webview-ui/src/components/settings/SettingsView.tsx index 99b1407d97..d1f2364a18 100644 --- a/webview-ui/src/components/settings/SettingsView.tsx +++ b/webview-ui/src/components/settings/SettingsView.tsx @@ -177,6 +177,15 @@ const SettingsView = forwardRef(({ onDone, t const apiConfiguration = useMemo(() => cachedState.apiConfiguration ?? {}, [cachedState.apiConfiguration]) + const getCurrentProfileId = useCallback(() => { + if (!currentApiConfigName || !listApiConfigMeta) { + return currentApiConfigName + } + + const profile = listApiConfigMeta.find((p) => p.name === currentApiConfigName) + return profile ? profile.id : currentApiConfigName + }, [currentApiConfigName, listApiConfigMeta]) + useEffect(() => { // Update only when currentApiConfigName is changed. // Expected to be triggered by loadApiConfiguration/upsertApiConfiguration. @@ -586,7 +595,7 @@ const SettingsView = forwardRef(({ onDone, t setApiConfigurationField={setApiConfigurationField} errorMessage={errorMessage} setErrorMessage={setErrorMessage} - currentProfileId={currentApiConfigName} + currentProfileId={getCurrentProfileId()} profileThresholds={profileThresholds || {}} autoCondenseContextPercent={autoCondenseContextPercent || 75} setProfileThreshold={setProfileThreshold} From 98e813d1d3086a59d0ad85437fc5a0461d9dd745 Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Fri, 27 Jun 2025 00:13:14 +0100 Subject: [PATCH 08/31] feat: max value of maxOutputTokens is model's maxTokens + adding more tests --- .../__tests__/sliding-window.spec.ts | 24 ++++++ .../components/settings/providers/Gemini.tsx | 7 +- .../providers/__tests__/Gemini.spec.tsx | 85 +++++++++++++++++++ 3 files changed, 114 insertions(+), 2 deletions(-) create mode 100644 webview-ui/src/components/settings/providers/__tests__/Gemini.spec.tsx diff --git a/src/core/sliding-window/__tests__/sliding-window.spec.ts b/src/core/sliding-window/__tests__/sliding-window.spec.ts index 3bda5351d4..d3837c7a23 100644 --- a/src/core/sliding-window/__tests__/sliding-window.spec.ts +++ b/src/core/sliding-window/__tests__/sliding-window.spec.ts @@ -250,6 +250,30 @@ describe("Sliding Window", () => { { role: "assistant", content: "Fourth message" }, { role: "user", content: "Fifth message" }, ] + it("should use contextLimit as contextWindow when apiProvider is gemini", async () => { + const contextLimit = 2 + const messages: ApiMessage[] = [ + { role: "user", content: "First message" }, + { role: "assistant", content: "Second message" }, + { role: "user", content: "Third message" }, + { role: "assistant", content: "Fourth message" }, + { role: "user", content: "" }, + ] + const result = await truncateConversationIfNeeded({ + messages, + totalTokens: 2, + contextWindow: contextLimit, + maxTokens: null, + apiHandler: mockApiHandler, + autoCondenseContext: false, + autoCondenseContextPercent: 100, + systemPrompt: "", + taskId, + profileThresholds: {}, + currentProfileId: "default", + }) + expect(result.messages).toEqual([messages[0], messages[3], messages[4]]) + }) it("should not truncate if tokens are below max tokens threshold", async () => { const modelInfo = createModelInfo(100000, 30000) diff --git a/webview-ui/src/components/settings/providers/Gemini.tsx b/webview-ui/src/components/settings/providers/Gemini.tsx index 8f3f2b536b..54df49c392 100644 --- a/webview-ui/src/components/settings/providers/Gemini.tsx +++ b/webview-ui/src/components/settings/providers/Gemini.tsx @@ -198,7 +198,7 @@ export const Gemini = ({
setApiConfigurationField("maxOutputTokens", values[0])} @@ -208,7 +208,10 @@ export const Gemini = ({ value={(apiConfiguration.maxOutputTokens ?? 0).toString()} type="text" inputMode="numeric" - onInput={handleInputChange("maxOutputTokens", (e) => parseInt((e as any).target.value, 10))} + onInput={handleInputChange("maxOutputTokens", (e) => { + const val = parseInt((e as any).target.value, 10) + return Number.isNaN(val) ? 0 : Math.min(val, modelInfo.maxTokens) + })} className="w-16" />
diff --git a/webview-ui/src/components/settings/providers/__tests__/Gemini.spec.tsx b/webview-ui/src/components/settings/providers/__tests__/Gemini.spec.tsx new file mode 100644 index 0000000000..7b69538ff9 --- /dev/null +++ b/webview-ui/src/components/settings/providers/__tests__/Gemini.spec.tsx @@ -0,0 +1,85 @@ +import React from "react" +import { render, screen, fireEvent } from "@testing-library/react" +import { Gemini } from "../Gemini" +import type { ProviderSettings } from "@roo-code/types" +import { geminiModels, geminiDefaultModelId, type GeminiModelId } from "@roo-code/types" + +vi.mock("@vscode/webview-ui-toolkit/react", () => ({ + VSCodeTextField: ({ children, value, onInput, type }: any) => ( +
+ {children} + onInput(e)} /> +
+ ), +})) + +vi.mock("vscrui", () => ({ + Checkbox: ({ children, checked, onChange }: any) => ( + + ), +})) + +vi.mock("@src/components/ui", () => ({ + Slider: ({ min, max, step, value, onValueChange }: any) => ( + onValueChange([Number(e.target.value)])} + /> + ), +})) + +vi.mock("@src/i18n/TranslationContext", () => ({ + useAppTranslation: () => ({ t: (key: string) => key }), +})) + +vi.mock("@src/components/common/VSCodeButtonLink", () => ({ + VSCodeButtonLink: ({ children, href }: any) => {children}, +})) + +const defaultModelId: GeminiModelId = geminiDefaultModelId +const defaultContextWindow = geminiModels[defaultModelId].contextWindow + +describe("Gemini provider settings", () => { + it("does not render context limit slider when custom context limit is not enabled", () => { + const setApiField = vi.fn() + const config: ProviderSettings = {} + render( + , + ) + expect(screen.queryByTestId("slider")).toBeNull() + }) + + it("enables custom context limit on checkbox toggle and shows slider with default value", () => { + const setApiField = vi.fn() + const config: ProviderSettings = {} + render( + , + ) + const checkbox = screen.getByTestId("checkbox-custom-context-limit") + fireEvent.click(checkbox) + expect(setApiField).toHaveBeenCalledWith("contextLimit", defaultContextWindow) + const slider = screen.getByTestId("slider") + expect(slider).toHaveValue(defaultContextWindow.toString()) + }) + + it("renders slider when contextLimit already set and updates on slider change", () => { + const setApiField = vi.fn() + const initialLimit = 100000 + const config: ProviderSettings = { contextLimit: initialLimit } + render( + , + ) + const slider = screen.getByTestId("slider") + expect(slider).toHaveValue(initialLimit.toString()) + fireEvent.change(slider, { target: { value: "50000" } }) + expect(setApiField).toHaveBeenCalledWith("contextLimit", 50000) + }) +}) From 91c16cbec4724ebc6bfca2637c67991da5f4cc70 Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Fri, 27 Jun 2025 00:38:25 +0100 Subject: [PATCH 09/31] feat: improve unit tests and adding `data-testid` to slider and checkbox components --- .../components/settings/providers/Gemini.tsx | 8 +++++ .../providers/__tests__/Gemini.spec.tsx | 35 ++++++++++++++----- 2 files changed, 34 insertions(+), 9 deletions(-) diff --git a/webview-ui/src/components/settings/providers/Gemini.tsx b/webview-ui/src/components/settings/providers/Gemini.tsx index 54df49c392..b772b1529f 100644 --- a/webview-ui/src/components/settings/providers/Gemini.tsx +++ b/webview-ui/src/components/settings/providers/Gemini.tsx @@ -128,6 +128,7 @@ export const Gemini = ({
{ setGoogleGeminiBaseUrlSelected(checked) @@ -157,6 +158,7 @@ export const Gemini = ({
{ setIsCustomContextLimit(checked) @@ -258,6 +263,7 @@ export const Gemini = ({
setApiConfigurationField("enableUrlContext", checked)}> {t("settings:providers.geminiParameters.urlContext.title")} @@ -406,6 +413,7 @@ export const Gemini = ({
setApiConfigurationField("enableGrounding", checked)}> {t("settings:providers.geminiParameters.groundingSearch.title")} diff --git a/webview-ui/src/components/settings/providers/__tests__/Gemini.spec.tsx b/webview-ui/src/components/settings/providers/__tests__/Gemini.spec.tsx index 7b69538ff9..f36ef10785 100644 --- a/webview-ui/src/components/settings/providers/__tests__/Gemini.spec.tsx +++ b/webview-ui/src/components/settings/providers/__tests__/Gemini.spec.tsx @@ -1,4 +1,3 @@ -import React from "react" import { render, screen, fireEvent } from "@testing-library/react" import { Gemini } from "../Gemini" import type { ProviderSettings } from "@roo-code/types" @@ -14,8 +13,8 @@ vi.mock("@vscode/webview-ui-toolkit/react", () => ({ })) vi.mock("vscrui", () => ({ - Checkbox: ({ children, checked, onChange }: any) => ( -
diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index 9ec881f402..a164e1bb00 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -243,7 +243,7 @@ }, "geminiContextManagement": { "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window. Roo Code automatically condenses older messages when conversations reach either: (1) your chosen threshold percentage of the context window, or (2) the calculated token limit (which accounts for reserved tokens and safety buffers). The actual trigger uses whichever limit is reached first.", + "description": "Override the model's default context window.", "modelDefault": "Model's default context window", "condensingThreshold": { "tokens": "tokens", From f384f7355b5800ce0380df7386871744bde0bbd7 Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Thu, 3 Jul 2025 09:20:20 +0100 Subject: [PATCH 14/31] fix: Changing the translation to be consistent with the english one --- webview-ui/src/i18n/locales/ca/settings.json | 12 +++++------- webview-ui/src/i18n/locales/de/settings.json | 4 ++-- webview-ui/src/i18n/locales/es/settings.json | 10 +++++----- webview-ui/src/i18n/locales/fr/settings.json | 4 ++-- webview-ui/src/i18n/locales/hi/settings.json | 12 +++++------- webview-ui/src/i18n/locales/id/settings.json | 12 ++++++------ webview-ui/src/i18n/locales/it/settings.json | 12 +++++------- webview-ui/src/i18n/locales/ja/settings.json | 8 +++----- webview-ui/src/i18n/locales/ko/settings.json | 14 +++++++------- webview-ui/src/i18n/locales/nl/settings.json | 12 +++++------- webview-ui/src/i18n/locales/pl/settings.json | 8 +++----- webview-ui/src/i18n/locales/pt-BR/settings.json | 12 +++++------- webview-ui/src/i18n/locales/ru/settings.json | 8 +++----- webview-ui/src/i18n/locales/tr/settings.json | 14 +++++++------- webview-ui/src/i18n/locales/vi/settings.json | 8 +++----- webview-ui/src/i18n/locales/zh-CN/settings.json | 4 ++-- webview-ui/src/i18n/locales/zh-TW/settings.json | 4 ++-- 17 files changed, 70 insertions(+), 88 deletions(-) diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index 0e76c60e19..d6cb34476c 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -239,19 +239,17 @@ "geminiSections": { "modelParameters": "Paràmetres del model", "advancedFeatures": "Funcions avançades", - "geminiContextManagement": "Gestió de context de Gemini" + "geminiTokentManagement": "Gestió de tokens" }, "geminiContextManagement": { "useCustomContextWindow": "Utilitzar límit de finestra de context personalitzada", - "description": "Sobrescriviu la finestra de context predeterminada del model. Quan la conversa s'acosti a aquest límit, Roo Code condensarà automàticament els missatges més antics.", + "description": "Sobrescriu el límit de finestra de context predeterminat del model.", "modelDefault": "Finestra de context predeterminada del model", "condensingThreshold": { "tokens": "tokens", - "title": "Umbral de condensació de context", - "description": "Umbral de condensació de context per a aquest perfil de Gemini. Quan el context arribi a aquest percentatge, es condensarà automàticament.", - "condensingtriggerAt": "La condensació s'activarà a", - "tokenLimitTriggered": "a causa del límit de tokens, no del percentatge", - "availableContext": "Finestra de context disponible", + "title": "Llindar d'activació de condensació", + "description": "Llindar d'activació de condensació per a aquest perfil de Gemini. Quan el context arribi a aquest percentatge, es condensarà automàticament.", + "condensingTriggerAt": "Condensació s'activarà a", "tokenLimitTrigger": "Activació per límit de tokens", "actualTrigger": "Activació real" } diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index ee96ff9f29..fef2be6323 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -239,11 +239,11 @@ "geminiSections": { "modelParameters": "Model Parameter", "advancedFeatures": "Erweiterte Funktionen", - "geminiContextManagement": "Gemini-Kontextverwaltung" + "geminiTokentManagement": "Tokenverwaltung" }, "geminiContextManagement": { "useCustomContextWindow": "Benutzerdefiniertes Kontextfensterlimit verwenden", - "description": "Überschreibt das Standard-Kontextfenster des Modells. Wenn Konversationen dieses Limit erreichen, fasst Roo Code ältere Nachrichten automatisch zusammen.", + "description": "Überschreiben Sie das Standard-Kontextfenster des Modells.", "modelDefault": "Standard-Kontextfenster des Modells", "condensingThreshold": { "tokens": "Tokens", diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index bdbc3136e0..536e11f414 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -239,21 +239,21 @@ "geminiSections": { "modelParameters": "Parámetros del modelo", "advancedFeatures": "Funciones avanzadas", - "geminiContextManagement": "Gestión de contexto de Gemini" + "geminiTokentManagement": "Gestión de tokens" }, "geminiContextManagement": { "useCustomContextWindow": "Usar límite de ventana de contexto personalizado", - "description": "Anular la ventana de contexto predeterminada del modelo. Cuando las conversaciones se acerquen a este límite, Roo Code condensará automáticamente los mensajes más antiguos.", + "description": "Anular la ventana de contexto predeterminada del modelo.", "modelDefault": "Ventana de contexto predeterminada del modelo", "condensingThreshold": { "tokens": "tokens", "title": "Umbral de condensación de contexto", - "description": "Umbral de condensación de contexto para este perfil de Gemini. Cuando el contexto alcance este porcentaje, se condensará automáticamente.", + "description": "Establezca el porcentaje de uso de la ventana de contexto que activa la condensación automática. Nota: si el límite de tokens calculado (después de reservar espacio para la salida y buffers de seguridad) es inferior a este porcentaje, se activará la condensación por límite de tokens.", "condensingtriggerAt": "La condensación se activará a", "tokenLimitTriggered": "debido al límite de tokens, no porcentaje", "availableContext": "Ventana de contexto disponible", - "tokenLimitTrigger": "Activador de límite de tokens", - "actualTrigger": "Activación real" + "tokenLimitTrigger": "Activador de límite de tokens (después de reservar tokens de salida y buffer de seguridad)", + "actualTrigger": "Activación real (mínimo de porcentaje y límite de tokens)" } }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index 6a5be2a831..21165e0296 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -239,11 +239,11 @@ "geminiSections": { "modelParameters": "Paramètres du modèle", "advancedFeatures": "Fonctionnalités avancées", - "geminiContextManagement": "Gestion du contexte Gemini" + "geminiTokentManagement": "Gestion des jetons" }, "geminiContextManagement": { "useCustomContextWindow": "Utiliser la limite de fenêtre de contexte personnalisée", - "description": "Remplace la fenêtre de contexte par défaut du modèle. Lorsque les conversations atteignent cette limite, Roo Code condensera automatiquement les anciens messages.", + "description": "Remplacez la fenêtre de contexte par défaut du modèle.", "modelDefault": "Fenêtre de contexte par défaut du modèle", "condensingThreshold": { "tokens": "jetons", diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index c16405d403..3daea603d5 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -239,19 +239,17 @@ "geminiSections": { "modelParameters": "मॉडल पैरामीटर", "advancedFeatures": "उन्नत सुविधाएँ", - "geminiContextManagement": "Gemini संदर्भ प्रबंधन" + "geminiTokentManagement": "टोकन प्रबंधन" }, "geminiContextManagement": { "useCustomContextWindow": "कस्टम संदर्भ विंडो सीमा का उपयोग करें", - "description": "मॉडल की डिफ़ॉल्ट संदर्भ विंडो को ओवरराइड करें। जब वार्तालाप इस सीमा के करीब पहुंचती है, तो Roo Code पुराने संदेशों को स्वचालित रूप से संक्षिप्त करेगा।", + "description": "मॉडल की डिफ़ॉल्ट संदर्भ विंडो सीमा को ओवरराइड करें।", "modelDefault": "मॉडल की डिफ़ॉल्ट संदर्भ विंडो", "condensingThreshold": { "tokens": "टोकन", - "title": "संदर्भ संक्षेपण थ्रेशोल्ड", - "description": "इस Gemini प्रोफ़ाइल के लिए संदर्भ संक्षेपण थ्रेशोल्ड। जब संदर्भ इस प्रतिशत तक पहुंचता है, तो इसे स्वचालित रूप से संक्षेपित किया जाएगा।", - "condensingtriggerAt": "संकुचन इस पर ट्रिगर होगा", - "tokenLimitTriggered": "टोकन सीमा के कारण, प्रतिशत नहीं", - "availableContext": "उपलब्ध संदर्भ विंडो", + "title": "संघनन ट्रिगर सीमा", + "description": "इस Gemini प्रोफ़ाइल के लिए संघनन ट्रिगर सीमा। जब संदर्भ इस प्रतिशत तक पहुंचता है, तो इसे स्वचालित रूप से संघनित किया जाएगा।", + "condensingTriggerAt": "संघनन इस पर ट्रिगर होगा", "tokenLimitTrigger": "टोकन सीमा ट्रिगर", "actualTrigger": "वास्तविक ट्रिगर" } diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index b3a226f405..bee4e68e11 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -243,21 +243,21 @@ "geminiSections": { "modelParameters": "Parameter Model", "advancedFeatures": "Fitur Lanjutan", - "geminiContextManagement": "Manajemen Konteks Gemini" + "geminiTokentManagement": "Manajemen Token" }, "geminiContextManagement": { "useCustomContextWindow": "Gunakan batas jendela konteks kustom", - "description": "Ganti jendela konteks default model. Ketika percakapan mendekati batas ini, Roo Code akan secara otomatis merangkum pesan yang lebih lama.", + "description": "Ganti jendela konteks default model.", "modelDefault": "Jendela konteks default model", "condensingThreshold": { "tokens": "token", "title": "Ambang Pemadatan Konteks", - "description": "Ambang pemadatan konteks untuk profil Gemini ini. Ketika konteks mencapai persentase ini, akan dipadatkan secara otomatis.", - "condensingtriggerAt": "Pemadatan akan terjadi pada", + "description": "Atur persentase penggunaan jendela konteks yang memicu pemadatan otomatis. Catatan: Jika batas token yang dihitung (setelah menyisihkan token keluaran dan buffer keamanan) lebih rendah dari persentase ini, batas token akan memicu pemadatan sebagai gantinya.", + "condensingtriggerAt": "Pemadatan akan dipicu pada", "tokenLimitTriggered": "karena batas token, bukan persentase", "availableContext": "Jendela konteks tersedia", - "tokenLimitTrigger": "Pemicu batas token", - "actualTrigger": "Pemicu aktual" + "tokenLimitTrigger": "Pemicu batas token (setelah menyisihkan token keluaran dan buffer keamanan)", + "actualTrigger": "Pemicu aktual (nilai minimum antara persentase dan batas token)" } }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index 8f375bbc1d..4475dd4433 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -239,7 +239,7 @@ "geminiSections": { "modelParameters": "Parametri del modello", "advancedFeatures": "Funzionalità avanzate", - "geminiContextManagement": "Gestione del contesto Gemini" + "geminiTokentManagement": "Gestione del contesto Gemini" }, "geminiContextManagement": { "useCustomContextWindow": "Usa limite di finestra di contesto personalizzato", @@ -248,12 +248,10 @@ "condensingThreshold": { "tokens": "token", "title": "Soglia di condensazione del contesto", - "description": "Soglia di condensazione del contesto per questo profilo Gemini. Quando il contesto raggiunge questa percentuale, verrà automaticamente condensato.", - "condensingtriggerAt": "La condensazione si innescherà a", - "tokenLimitTriggered": "attivato a causa del limite di token, non della percentuale", - "availableContext": "Intervallo di contesto disponibile", - "tokenLimitTrigger": "Innesco del limite di token", - "actualTrigger": "Innesco effettivo" + "description": "La soglia (percentuale) alla quale questo profilo Gemini comprime il contesto. Quando viene raggiunta, il contesto viene automaticamente compresso.", + "condensingTriggerAt": "Percentuale alla quale viene avviata la condensazione", + "tokenLimitTrigger": "Attivazione limite di token", + "actualTrigger": "Attivazione effettiva" } }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index c77e2f0c97..8f8ea09d71 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -239,7 +239,7 @@ "geminiSections": { "modelParameters": "モデルパラメータ", "advancedFeatures": "高度な機能", - "geminiContextManagement": "Gemini コンテキスト管理" + "geminiTokentManagement": "Gemini コンテキスト管理" }, "geminiContextManagement": { "useCustomContextWindow": "カスタムコンテキストウィンドウ制限を使用", @@ -248,10 +248,8 @@ "condensingThreshold": { "tokens": "トークン", "title": "コンテキスト圧縮しきい値", - "description": "このGeminiプロファイルのコンテキスト圧縮しきい値です。このパーセンテージに達すると、自動的に圧縮されます。", - "condensingtriggerAt": "圧縮は次の時にトリガーされます", - "tokenLimitTriggered": "トークン制限が原因で、パーセンテージではありません", - "availableContext": "利用可能なコンテキストウィンドウ", + "description": "このGeminiプロファイルのしきい値(パーセンテージ)。到達すると、コンテキストは自動的に圧縮されます。", + "condensingTriggerAt": "圧縮がトリガーされる割合", "tokenLimitTrigger": "トークン制限トリガー", "actualTrigger": "実際のトリガー" } diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index 924b634fc8..016ba36236 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -239,21 +239,21 @@ "geminiSections": { "modelParameters": "모델 매개변수", "advancedFeatures": "고급 기능", - "geminiContextManagement": "Gemini 컨텍스트 관리" + "geminiTokentManagement": "토큰 관리" }, "geminiContextManagement": { "useCustomContextWindow": "사용자 지정 컨텍스트 창 제한 사용", - "description": "모델의 기본 컨텍스트 창을 재정의합니다. 이 제한에 가까워지면 Roo Code는 이전 메시지를 자동으로 요약합니다。", + "description": "모델의 기본 컨텍스트 창을 재정의합니다.", "modelDefault": "모델의 기본 컨텍스트 창", "condensingThreshold": { "tokens": "토큰", "title": "컨텍스트 압축 임계값", - "description": "이 Gemini 프로필의 컨텍스트 압축 임계값입니다. 컨텍스트가 이 백분율에 도달하면 자동으로 압축됩니다。", - "condensingtriggerAt": "압축은 다음에서 트리거됩니다", - "tokenLimitTriggered": "백분율이 아닌 토큰 제한으로 인해 트리거되었습니다", + "description": "자동 압축을 트리거하는 컨텍스트 창 사용 백분율을 설정합니다. 참고: 계산된 토큰 제한이(출력 토큰 및 안전 버퍼 예약 후) 이 백분율보다 낮으면, 대신 토큰 제한이 압축을 트리거합니다.", + "condensingtriggerAt": "압축은 다음 시점에서 트리거됩니다", + "tokenLimitTriggered": "퍼센티지가 아닌 토큰 제한으로 인해 트리거됩니다", "availableContext": "사용 가능한 컨텍스트 창", - "tokenLimitTrigger": "토큰 제한 트리거", - "actualTrigger": "실제 트리거" + "tokenLimitTrigger": "토큰 제한 트리거 (출력 토큰 및 안전 버퍼 예약 후)", + "actualTrigger": "실제 트리거 (백분율과 토큰 제한 중 최소값)" } }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index 13be3d7671..5d4dd84397 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -239,19 +239,17 @@ "geminiSections": { "modelParameters": "Modelparameters", "advancedFeatures": "Geavanceerde functies", - "geminiContextManagement": "Gemini-contextbeheer" + "geminiTokentManagement": "Tokenbeheer" }, "geminiContextManagement": { "useCustomContextWindow": "Gebruik aangepast contextvensterlimiet", - "description": "Dit overschrijft het standaard contextvenster van het model. Wanneer gesprekken dit limiet naderen, zal Roo Code automatisch oudere berichten samenvatten.", + "description": "Vervangt de standaardlimiet voor het contextvenster van het model.", "modelDefault": "Standaard contextvenster van het model", "condensingThreshold": { "tokens": "tokens", - "title": "Drempel voor contextsamentrekking", - "description": "Drempel voor contextsamentrekking voor dit Gemini-profiel. Wanneer de context dit percentage bereikt, wordt het automatisch samengevat.", - "condensingtriggerAt": "Samentrekking wordt geactiveerd bij", - "tokenLimitTriggered": "vanwege tokenlimiet, niet percentage", - "availableContext": "Beschikbaar contextvenster", + "title": "Limiet voor activering van condensatie", + "description": "Limiet voor activering van condensatie voor dit Gemini-profiel. Wanneer de context dit percentage bereikt, wordt het automatisch gecondenseerd.", + "condensingTriggerAt": "Condensatie wordt geactiveerd bij", "tokenLimitTrigger": "Tokenlimiet-trigger", "actualTrigger": "Werkelijke trigger" } diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index 62ea465eee..ab303bd386 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -239,9 +239,9 @@ "geminiSections": { "modelParameters": "Parametry modelu", "advancedFeatures": "Zaawansowane funkcje", - "geminiContextManagement": "Zarządzanie kontekstem Gemini" + "geminiTokentManagement": "Zarządzanie tokenami" }, - "geminiContextManagement": { + "geminiTokentManagement": { "useCustomContextWindow": "Użyj niestandardowego limitu okna kontekstu", "description": "Nadpisuje domyślne okno kontekstu modelu. Gdy rozmowy zbliżą się do tego limitu, Roo Code automatycznie skróci starsze wiadomości.", "modelDefault": "Domyślne okno kontekstu modelu", @@ -249,9 +249,7 @@ "tokens": "tokeny", "title": "Próg kondensacji kontekstu", "description": "Próg kondensacji kontekstu dla tego profilu Gemini. Gdy kontekst osiągnie ten procent, zostanie automatycznie skrócony.", - "condensingtriggerAt": "Kondensacja zostanie wyzwolona przy", - "tokenLimitTriggered": "wyzwolone z powodu limitu tokenów, a nie procentu", - "availableContext": "Dostępne okno kontekstu", + "condensingTriggerAt": "Kondensacja zostanie wyzwolona przy", "tokenLimitTrigger": "Wyzwalacz limitu tokenów", "actualTrigger": "Rzeczywisty wyzwalacz" } diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index 76daad7d87..61130f238a 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -239,19 +239,17 @@ "geminiSections": { "modelParameters": "Parâmetros do modelo", "advancedFeatures": "Recursos avançados", - "geminiContextManagement": "Gerenciamento de contexto do Gemini" + "geminiTokentManagement": "Gerenciamento de Tokens" }, "geminiContextManagement": { "useCustomContextWindow": "Usar limite de janela de contexto personalizado", - "description": "Substitua a janela de contexto padrão do modelo. Quando as conversas se aproximarem desse limite, o Roo Code irá condensar automaticamente as mensagens mais antigas.", + "description": "Substitui o limite de janela de contexto padrão do modelo.", "modelDefault": "Janela de contexto padrão do modelo", "condensingThreshold": { "tokens": "tokens", - "title": "Limite de condensação de contexto", - "description": "Limite de condensação de contexto para este perfil Gemini. Quando o contexto atingir essa porcentagem, ele será automaticamente condensado.", - "condensingtriggerAt": "A condensação será acionada em", - "tokenLimitTriggered": "devido ao limite de tokens, não porcentagem", - "availableContext": "Janela de contexto disponível", + "title": "Limite de ativação de condensação", + "description": "Limite de ativação de condensação para este perfil Gemini. Quando o contexto atingir essa porcentagem, ele será condensado automaticamente.", + "condensingTriggerAt": "A condensação será acionada em", "tokenLimitTrigger": "Acionador de limite de tokens", "actualTrigger": "Acionador real" } diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index 6710c3985b..9d5147aded 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -239,7 +239,7 @@ "geminiSections": { "modelParameters": "Параметры модели", "advancedFeatures": "Расширенные функции", - "geminiContextManagement": "Управление контекстом Gemini" + "geminiTokentManagement": "Управление контекстом Gemini" }, "geminiContextManagement": { "useCustomContextWindow": "Использовать ограничение окна контекста", @@ -248,10 +248,8 @@ "condensingThreshold": { "tokens": "токены", "title": "Порог конденсации контекста", - "description": "Порог конденсации контекста для этого профиля Gemini. Когда контекст достигает этого процента, он будет автоматически конденсироваться.", - "condensingtriggerAt": "Конденсация будет запущена при", - "tokenLimitTriggered": "из-за ограничения токенов, а не процента", - "availableContext": "Доступное окно контекста", + "description": "Порог (процент), при котором этот профиль Gemini сжимает контекст. При достижении порога контекст автоматически сжимается.", + "condensingTriggerAt": "Процент, при достижении которого запускается сжатие", "tokenLimitTrigger": "Триггер лимита токенов", "actualTrigger": "Фактический триггер" } diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 1936ce6e8a..f8796779a4 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -239,21 +239,21 @@ "geminiSections": { "modelParameters": "Model Parametreleri", "advancedFeatures": "Gelişmiş Özellikler", - "geminiContextManagement": "Gemini Bağlam Yönetimi" + "geminiTokentManagement": "Jeton Yönetimi" }, "geminiContextManagement": { "useCustomContextWindow": "Özel bağlam penceresi sınırı kullan", - "description": "Modelin varsayılan bağlam penceresini geçersiz kılar. Konuşmalar bu sınıra yaklaştığında, Roo Code eski mesajları otomatik olarak yoğunlaştırır.", + "description": "Modelin varsayılan bağlam penceresini geçersiz kılar.", "modelDefault": "Modelin varsayılan bağlam penceresi", "condensingThreshold": { "tokens": "jetonlar", "title": "Bağlam Yoğunlaştırma Eşiği", - "description": "Bu Gemini profili için bağlam yoğunlaştırma eşiği. Bağlam bu yüzdede olduğunda, otomatik olarak yoğunlaştırılır.", - "condensingtriggerAt": "Yoğunlaştırma şu anda tetiklenecek", - "tokenLimitTriggered": "yüzde değil jeton sınırı nedeniyle tetiklendi", + "description": "Otomatik yoğunlaştırmayı tetikleyen bağlam penceresi kullanım yüzdesini ayarlar. Not: Hesaplanan token limiti (çıkış tokenları ve güvenlik tamponu ayrıldıktan sonra) bu yüzde değerinin altındaysa, yoğunlaştırma token limiti nedeniyle tetiklenecektir.", + "condensingtriggerAt": "Yoğunlaştırma şu seviyede tetiklenecek", + "tokenLimitTriggered": "Yüzde değil, token limiti nedeniyle", "availableContext": "Kullanılabilir bağlam penceresi", - "tokenLimitTrigger": "Jeton sınırı tetikleyicisi", - "actualTrigger": "Gerçek tetikleyici" + "tokenLimitTrigger": "Token limiti tetikleyicisi (çıkış tokenları ve güvenlik tamponu ayrıldıktan sonra)", + "actualTrigger": "Gerçek tetikleyici (yüzde ve token limiti arasından düşük olan)" } }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index 04ea88801f..c2bfa0a8e3 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -239,9 +239,9 @@ "geminiSections": { "modelParameters": "Tham số mô hình", "advancedFeatures": "Tính năng nâng cao", - "geminiContextManagement": "Quản lý ngữ cảnh Gemini" + "geminiTokentManagement": "Quản lý token" }, - "geminiContextManagement": { + "geminiTokentManagement": { "useCustomContextWindow": "Sử dụng giới hạn cửa sổ ngữ cảnh tùy chỉnh", "description": "Ghi đè cửa sổ ngữ cảnh mặc định của mô hình. Khi cuộc trò chuyện tiếp cận giới hạn này, Roo Code sẽ tự động cô đọng các tin nhắn cũ hơn.", "modelDefault": "Cửa sổ ngữ cảnh mặc định của mô hình", @@ -249,9 +249,7 @@ "tokens": "token", "title": "Ngưỡng cô đọng ngữ cảnh", "description": "Ngưỡng cô đọng ngữ cảnh cho hồ sơ Gemini này. Khi ngữ cảnh đạt đến tỷ lệ phần trăm này, nó sẽ tự động được cô đọng.", - "condensingtriggerAt": "Cô đọng sẽ kích hoạt ở", - "tokenLimitTriggered": "do giới hạn token, không phải tỷ lệ phần trăm", - "availableContext": "Cửa sổ ngữ cảnh khả dụng", + "condensingTriggerAt": "Cô đọng sẽ kích hoạt ở", "tokenLimitTrigger": "Kích hoạt giới hạn token", "actualTrigger": "Kích hoạt thực tế" } diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index 6675582ab8..fa2da6983d 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -239,11 +239,11 @@ "geminiSections": { "modelParameters": "模型参数", "advancedFeatures": "高级功能", - "geminiContextManagement": "Gemini 上下文管理" + "geminiTokentManagement": "令牌管理" }, "geminiContextManagement": { "useCustomContextWindow": "使用自定义上下文窗口限制", - "description": "覆盖模型的默认上下文窗口限制。当对话接近此限制时,Roo Code 会自动压缩较旧的消息。", + "description": "覆盖模型的默认上下文窗口限制。", "modelDefault": "模型的默认上下文窗口限制", "condensingThreshold": { "tokens": "令牌", diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index 5456e7bbb7..11b9644fc6 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -239,11 +239,11 @@ "geminiSections": { "modelParameters": "模型參數", "advancedFeatures": "進階功能", - "geminiContextManagement": "Gemini 上下文管理" + "geminiTokentManagement": "令牌管理" }, "geminiContextManagement": { "useCustomContextWindow": "使用自訂上下文視窗限制", - "description": "覆寫模型的預設上下文視窗限制。當對話接近此限制時,Roo Code 將自動濃縮較舊訊息。", + "description": "覆寫模型的預設上下文視窗限制。", "modelDefault": "模型的預設上下文視窗限制", "condensingThreshold": { "tokens": "代幣", From 449a8c2784a01f08fdfd879e15466cefdb4bb776 Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Thu, 3 Jul 2025 15:28:17 +0100 Subject: [PATCH 15/31] fix: more translations --- webview-ui/src/i18n/locales/ca/settings.json | 2 +- webview-ui/src/i18n/locales/de/settings.json | 6 ++---- webview-ui/src/i18n/locales/es/settings.json | 6 ++---- webview-ui/src/i18n/locales/fr/settings.json | 6 ++---- webview-ui/src/i18n/locales/hi/settings.json | 2 +- webview-ui/src/i18n/locales/id/settings.json | 6 ++---- webview-ui/src/i18n/locales/it/settings.json | 2 +- webview-ui/src/i18n/locales/ja/settings.json | 2 +- webview-ui/src/i18n/locales/ko/settings.json | 6 ++---- webview-ui/src/i18n/locales/nl/settings.json | 2 +- webview-ui/src/i18n/locales/pt-BR/settings.json | 2 +- webview-ui/src/i18n/locales/ru/settings.json | 4 ++-- webview-ui/src/i18n/locales/tr/settings.json | 6 ++---- webview-ui/src/i18n/locales/zh-CN/settings.json | 6 ++---- webview-ui/src/i18n/locales/zh-TW/settings.json | 6 ++---- 15 files changed, 24 insertions(+), 40 deletions(-) diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index d6cb34476c..6a84ed9d6f 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -241,7 +241,7 @@ "advancedFeatures": "Funcions avançades", "geminiTokentManagement": "Gestió de tokens" }, - "geminiContextManagement": { + "geminiTokentManagement": { "useCustomContextWindow": "Utilitzar límit de finestra de context personalitzada", "description": "Sobrescriu el límit de finestra de context predeterminat del model.", "modelDefault": "Finestra de context predeterminada del model", diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index fef2be6323..0c0520a809 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -241,7 +241,7 @@ "advancedFeatures": "Erweiterte Funktionen", "geminiTokentManagement": "Tokenverwaltung" }, - "geminiContextManagement": { + "geminiTokentManagement": { "useCustomContextWindow": "Benutzerdefiniertes Kontextfensterlimit verwenden", "description": "Überschreiben Sie das Standard-Kontextfenster des Modells.", "modelDefault": "Standard-Kontextfenster des Modells", @@ -249,9 +249,7 @@ "tokens": "Tokens", "title": "Schwellwert für Kontextverdichtung", "description": "Kontextverdichtungs-Schwellenwert für dieses Gemini-Profil. Wenn der Kontext diesen Prozentsatz erreicht, wird automatisch verdichtet.", - "condensingtriggerAt": "Verdichtung wird ausgelöst bei", - "tokenLimitTriggered": "aufgrund der Token-Grenze, nicht des Prozentsatzes", - "availableContext": "Verfügbares Kontextfenster", + "condensingTriggerAt": "Verdichtung wird ausgelöst bei", "tokenLimitTrigger": "Auslösung bei Token-Grenze", "actualTrigger": "Tatsächliche Auslösung" } diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index 536e11f414..98005df70c 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -241,7 +241,7 @@ "advancedFeatures": "Funciones avanzadas", "geminiTokentManagement": "Gestión de tokens" }, - "geminiContextManagement": { + "geminiTokentManagement": { "useCustomContextWindow": "Usar límite de ventana de contexto personalizado", "description": "Anular la ventana de contexto predeterminada del modelo.", "modelDefault": "Ventana de contexto predeterminada del modelo", @@ -249,9 +249,7 @@ "tokens": "tokens", "title": "Umbral de condensación de contexto", "description": "Establezca el porcentaje de uso de la ventana de contexto que activa la condensación automática. Nota: si el límite de tokens calculado (después de reservar espacio para la salida y buffers de seguridad) es inferior a este porcentaje, se activará la condensación por límite de tokens.", - "condensingtriggerAt": "La condensación se activará a", - "tokenLimitTriggered": "debido al límite de tokens, no porcentaje", - "availableContext": "Ventana de contexto disponible", + "condensingTriggerAt": "La condensación se activará a", "tokenLimitTrigger": "Activador de límite de tokens (después de reservar tokens de salida y buffer de seguridad)", "actualTrigger": "Activación real (mínimo de porcentaje y límite de tokens)" } diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index 21165e0296..77f2de1dd7 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -241,7 +241,7 @@ "advancedFeatures": "Fonctionnalités avancées", "geminiTokentManagement": "Gestion des jetons" }, - "geminiContextManagement": { + "geminiTokentManagement": { "useCustomContextWindow": "Utiliser la limite de fenêtre de contexte personnalisée", "description": "Remplacez la fenêtre de contexte par défaut du modèle.", "modelDefault": "Fenêtre de contexte par défaut du modèle", @@ -249,9 +249,7 @@ "tokens": "jetons", "title": "Seuil de condensation du contexte", "description": "Seuil de condensation du contexte pour ce profil Gemini. Lorsque le contexte atteint ce pourcentage, il sera automatiquement condensé.", - "condensingtriggerAt": "La condensation se déclenchera à", - "tokenLimitTriggered": "en raison de la limite de jetons, non du pourcentage", - "availableContext": "Fenêtre de contexte disponible", + "condensingTriggerAt": "La condensation se déclenchera à", "tokenLimitTrigger": "Déclencheur de limite de jetons", "actualTrigger": "Déclenchement réel" } diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index 3daea603d5..48a05303e8 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -241,7 +241,7 @@ "advancedFeatures": "उन्नत सुविधाएँ", "geminiTokentManagement": "टोकन प्रबंधन" }, - "geminiContextManagement": { + "geminiTokentManagement": { "useCustomContextWindow": "कस्टम संदर्भ विंडो सीमा का उपयोग करें", "description": "मॉडल की डिफ़ॉल्ट संदर्भ विंडो सीमा को ओवरराइड करें।", "modelDefault": "मॉडल की डिफ़ॉल्ट संदर्भ विंडो", diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index bee4e68e11..cffd732ad1 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -245,7 +245,7 @@ "advancedFeatures": "Fitur Lanjutan", "geminiTokentManagement": "Manajemen Token" }, - "geminiContextManagement": { + "geminiTokentManagement": { "useCustomContextWindow": "Gunakan batas jendela konteks kustom", "description": "Ganti jendela konteks default model.", "modelDefault": "Jendela konteks default model", @@ -253,9 +253,7 @@ "tokens": "token", "title": "Ambang Pemadatan Konteks", "description": "Atur persentase penggunaan jendela konteks yang memicu pemadatan otomatis. Catatan: Jika batas token yang dihitung (setelah menyisihkan token keluaran dan buffer keamanan) lebih rendah dari persentase ini, batas token akan memicu pemadatan sebagai gantinya.", - "condensingtriggerAt": "Pemadatan akan dipicu pada", - "tokenLimitTriggered": "karena batas token, bukan persentase", - "availableContext": "Jendela konteks tersedia", + "condensingTriggerAt": "Pemadatan akan dipicu pada", "tokenLimitTrigger": "Pemicu batas token (setelah menyisihkan token keluaran dan buffer keamanan)", "actualTrigger": "Pemicu aktual (nilai minimum antara persentase dan batas token)" } diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index 4475dd4433..a42a290c5e 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -241,7 +241,7 @@ "advancedFeatures": "Funzionalità avanzate", "geminiTokentManagement": "Gestione del contesto Gemini" }, - "geminiContextManagement": { + "geminiTokentManagement": { "useCustomContextWindow": "Usa limite di finestra di contesto personalizzato", "description": "Sovrascrive l'intervallo di contesto predefinito del modello. Quando le conversazioni raggiungono questo limite, Roo Code condenserà automaticamente i messaggi meno recenti.", "modelDefault": "Intervallo di contesto predefinito del modello", diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index 8f8ea09d71..4c9d8e6d1a 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -241,7 +241,7 @@ "advancedFeatures": "高度な機能", "geminiTokentManagement": "Gemini コンテキスト管理" }, - "geminiContextManagement": { + "geminiTokentManagement": { "useCustomContextWindow": "カスタムコンテキストウィンドウ制限を使用", "description": "モデルのデフォルトのコンテキストウィンドウを上書きします。この制限に近づくと、Roo Codeは古いメッセージを自動的に要約します。", "modelDefault": "モデルのデフォルトのコンテキストウィンドウ", diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index 016ba36236..68187cd12d 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -241,7 +241,7 @@ "advancedFeatures": "고급 기능", "geminiTokentManagement": "토큰 관리" }, - "geminiContextManagement": { + "geminiTokentManagement": { "useCustomContextWindow": "사용자 지정 컨텍스트 창 제한 사용", "description": "모델의 기본 컨텍스트 창을 재정의합니다.", "modelDefault": "모델의 기본 컨텍스트 창", @@ -249,9 +249,7 @@ "tokens": "토큰", "title": "컨텍스트 압축 임계값", "description": "자동 압축을 트리거하는 컨텍스트 창 사용 백분율을 설정합니다. 참고: 계산된 토큰 제한이(출력 토큰 및 안전 버퍼 예약 후) 이 백분율보다 낮으면, 대신 토큰 제한이 압축을 트리거합니다.", - "condensingtriggerAt": "압축은 다음 시점에서 트리거됩니다", - "tokenLimitTriggered": "퍼센티지가 아닌 토큰 제한으로 인해 트리거됩니다", - "availableContext": "사용 가능한 컨텍스트 창", + "condensingTriggerAt": "압축은 다음 시점에서 트리거됩니다", "tokenLimitTrigger": "토큰 제한 트리거 (출력 토큰 및 안전 버퍼 예약 후)", "actualTrigger": "실제 트리거 (백분율과 토큰 제한 중 최소값)" } diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index 5d4dd84397..5c1e6238f1 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -241,7 +241,7 @@ "advancedFeatures": "Geavanceerde functies", "geminiTokentManagement": "Tokenbeheer" }, - "geminiContextManagement": { + "geminiTokentManagement": { "useCustomContextWindow": "Gebruik aangepast contextvensterlimiet", "description": "Vervangt de standaardlimiet voor het contextvenster van het model.", "modelDefault": "Standaard contextvenster van het model", diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index 61130f238a..9a7af91544 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -241,7 +241,7 @@ "advancedFeatures": "Recursos avançados", "geminiTokentManagement": "Gerenciamento de Tokens" }, - "geminiContextManagement": { + "geminiTokentManagement": { "useCustomContextWindow": "Usar limite de janela de contexto personalizado", "description": "Substitui o limite de janela de contexto padrão do modelo.", "modelDefault": "Janela de contexto padrão do modelo", diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index 9d5147aded..813ee79b5f 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -239,9 +239,9 @@ "geminiSections": { "modelParameters": "Параметры модели", "advancedFeatures": "Расширенные функции", - "geminiTokentManagement": "Управление контекстом Gemini" + "geminiTokentManagement": "Управление токенами" }, - "geminiContextManagement": { + "geminiTokentManagement": { "useCustomContextWindow": "Использовать ограничение окна контекста", "description": "Переопределяет окно контекста по умолчанию модели. Когда разговоры приближаются к этому пределу, Roo Code автоматически будет конденсировать старые сообщения.", "modelDefault": "Окно контекста по умолчанию модели", diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index f8796779a4..bd3d315391 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -241,7 +241,7 @@ "advancedFeatures": "Gelişmiş Özellikler", "geminiTokentManagement": "Jeton Yönetimi" }, - "geminiContextManagement": { + "geminiTokentManagement": { "useCustomContextWindow": "Özel bağlam penceresi sınırı kullan", "description": "Modelin varsayılan bağlam penceresini geçersiz kılar.", "modelDefault": "Modelin varsayılan bağlam penceresi", @@ -249,9 +249,7 @@ "tokens": "jetonlar", "title": "Bağlam Yoğunlaştırma Eşiği", "description": "Otomatik yoğunlaştırmayı tetikleyen bağlam penceresi kullanım yüzdesini ayarlar. Not: Hesaplanan token limiti (çıkış tokenları ve güvenlik tamponu ayrıldıktan sonra) bu yüzde değerinin altındaysa, yoğunlaştırma token limiti nedeniyle tetiklenecektir.", - "condensingtriggerAt": "Yoğunlaştırma şu seviyede tetiklenecek", - "tokenLimitTriggered": "Yüzde değil, token limiti nedeniyle", - "availableContext": "Kullanılabilir bağlam penceresi", + "condensingTriggerAt": "Yoğunlaştırma şu seviyede tetiklenecek", "tokenLimitTrigger": "Token limiti tetikleyicisi (çıkış tokenları ve güvenlik tamponu ayrıldıktan sonra)", "actualTrigger": "Gerçek tetikleyici (yüzde ve token limiti arasından düşük olan)" } diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index fa2da6983d..a85e38192a 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -241,7 +241,7 @@ "advancedFeatures": "高级功能", "geminiTokentManagement": "令牌管理" }, - "geminiContextManagement": { + "geminiTokentManagement": { "useCustomContextWindow": "使用自定义上下文窗口限制", "description": "覆盖模型的默认上下文窗口限制。", "modelDefault": "模型的默认上下文窗口限制", @@ -249,9 +249,7 @@ "tokens": "令牌", "title": "上下文压缩阈值", "description": "此 Gemini 配置文件的上下文压缩阈值。当上下文达到此百分比时,将自动压缩。", - "condensingtriggerAt": "压缩将在达到时触发", - "tokenLimitTriggered": "由于令牌限制触发,而非百分比", - "availableContext": "可用上下文窗口", + "condensingTriggerAt": "压缩将在达到时触发", "tokenLimitTrigger": "令牌限制触发", "actualTrigger": "实际触发" } diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index 11b9644fc6..413f82a660 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -241,7 +241,7 @@ "advancedFeatures": "進階功能", "geminiTokentManagement": "令牌管理" }, - "geminiContextManagement": { + "geminiTokentManagement": { "useCustomContextWindow": "使用自訂上下文視窗限制", "description": "覆寫模型的預設上下文視窗限制。", "modelDefault": "模型的預設上下文視窗限制", @@ -249,9 +249,7 @@ "tokens": "代幣", "title": "上下文濃縮閾值", "description": "此 Gemini 設定檔的上下文濃縮閾值。當上下文達到此百分比時,將自動濃縮。", - "condensingtriggerAt": "濃縮將在以下條件觸發", - "tokenLimitTriggered": "因代幣限制而非百分比", - "availableContext": "可用上下文視窗", + "condensingTriggerAt": "濃縮將在以下條件觸發", "tokenLimitTrigger": "代幣限制觸發", "actualTrigger": "實際觸發" } From f8c04c968db3f685e18c8a83ea7c8f4827956201 Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Thu, 3 Jul 2025 16:57:40 +0100 Subject: [PATCH 16/31] fix: translations --- webview-ui/src/i18n/locales/ca/settings.json | 33 ++++++++++++----- webview-ui/src/i18n/locales/de/settings.json | 36 +++++++++++++------ webview-ui/src/i18n/locales/es/settings.json | 33 ++++++++++++----- webview-ui/src/i18n/locales/fr/settings.json | 36 +++++++++++++------ webview-ui/src/i18n/locales/hi/settings.json | 35 +++++++++++++----- webview-ui/src/i18n/locales/id/settings.json | 35 +++++++++++++----- webview-ui/src/i18n/locales/it/settings.json | 35 +++++++++++++----- webview-ui/src/i18n/locales/ja/settings.json | 35 +++++++++++++----- webview-ui/src/i18n/locales/ko/settings.json | 35 +++++++++++++----- webview-ui/src/i18n/locales/nl/settings.json | 33 ++++++++++++----- webview-ui/src/i18n/locales/pl/settings.json | 35 +++++++++++++----- .../src/i18n/locales/pt-BR/settings.json | 33 ++++++++++++----- webview-ui/src/i18n/locales/ru/settings.json | 35 +++++++++++++----- webview-ui/src/i18n/locales/tr/settings.json | 35 +++++++++++++----- webview-ui/src/i18n/locales/vi/settings.json | 35 +++++++++++++----- .../src/i18n/locales/zh-CN/settings.json | 35 +++++++++++++----- .../src/i18n/locales/zh-TW/settings.json | 35 +++++++++++++----- 17 files changed, 438 insertions(+), 151 deletions(-) diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index 6a84ed9d6f..61e41a57d7 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -242,16 +242,18 @@ "geminiTokentManagement": "Gestió de tokens" }, "geminiTokentManagement": { - "useCustomContextWindow": "Utilitzar límit de finestra de context personalitzada", - "description": "Sobrescriu el límit de finestra de context predeterminat del model.", - "modelDefault": "Finestra de context predeterminada del model", + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", "condensingThreshold": { "tokens": "tokens", - "title": "Llindar d'activació de condensació", - "description": "Llindar d'activació de condensació per a aquest perfil de Gemini. Quan el context arribi a aquest percentatge, es condensarà automàticament.", - "condensingTriggerAt": "Condensació s'activarà a", - "tokenLimitTrigger": "Activació per límit de tokens", - "actualTrigger": "Activació real" + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" } }, "googleCloudSetup": { @@ -352,6 +354,21 @@ "pathLabel": "Ruta del Codi Claude", "description": "Ruta opcional al teu CLI de Claude Code. Per defecte, 'claude' si no s'estableix.", "placeholder": "Per defecte: claude" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", + "condensingThreshold": { + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" + } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index 0c0520a809..9700fde647 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -242,16 +242,18 @@ "geminiTokentManagement": "Tokenverwaltung" }, "geminiTokentManagement": { - "useCustomContextWindow": "Benutzerdefiniertes Kontextfensterlimit verwenden", - "description": "Überschreiben Sie das Standard-Kontextfenster des Modells.", - "modelDefault": "Standard-Kontextfenster des Modells", + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", "condensingThreshold": { - "tokens": "Tokens", - "title": "Schwellwert für Kontextverdichtung", - "description": "Kontextverdichtungs-Schwellenwert für dieses Gemini-Profil. Wenn der Kontext diesen Prozentsatz erreicht, wird automatisch verdichtet.", - "condensingTriggerAt": "Verdichtung wird ausgelöst bei", - "tokenLimitTrigger": "Auslösung bei Token-Grenze", - "actualTrigger": "Tatsächliche Auslösung" + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" } }, "googleCloudSetup": { @@ -352,6 +354,21 @@ "pathLabel": "Claude-Code-Pfad", "description": "Optionaler Pfad zu Ihrer Claude Code CLI. Standard ist 'claude', wenn nicht festgelegt.", "placeholder": "Standard: claude" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", + "condensingThreshold": { + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" + } } }, "browser": { @@ -560,7 +577,6 @@ "name": "Marketplace aktivieren", "description": "Wenn aktiviert, kannst du MCP und benutzerdefinierte Modi aus dem Marketplace installieren und verwalten." }, - "MULTI_FILE_APPLY_DIFF": { "name": "Gleichzeitige Dateibearbeitungen aktivieren", "description": "Wenn aktiviert, kann Roo mehrere Dateien in einer einzigen Anfrage bearbeiten. Wenn deaktiviert, muss Roo Dateien einzeln bearbeiten. Das Deaktivieren kann helfen, wenn du mit weniger fähigen Modellen arbeitest oder mehr Kontrolle über Dateiänderungen haben möchtest." diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index 98005df70c..c9bb0b12f8 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -242,16 +242,18 @@ "geminiTokentManagement": "Gestión de tokens" }, "geminiTokentManagement": { - "useCustomContextWindow": "Usar límite de ventana de contexto personalizado", - "description": "Anular la ventana de contexto predeterminada del modelo.", - "modelDefault": "Ventana de contexto predeterminada del modelo", + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", "condensingThreshold": { "tokens": "tokens", - "title": "Umbral de condensación de contexto", - "description": "Establezca el porcentaje de uso de la ventana de contexto que activa la condensación automática. Nota: si el límite de tokens calculado (después de reservar espacio para la salida y buffers de seguridad) es inferior a este porcentaje, se activará la condensación por límite de tokens.", - "condensingTriggerAt": "La condensación se activará a", - "tokenLimitTrigger": "Activador de límite de tokens (después de reservar tokens de salida y buffer de seguridad)", - "actualTrigger": "Activación real (mínimo de porcentaje y límite de tokens)" + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" } }, "googleCloudSetup": { @@ -352,6 +354,21 @@ "pathLabel": "Ruta de Claude Code", "description": "Ruta opcional a su CLI de Claude Code. Por defecto, es 'claude' si no se establece.", "placeholder": "Por defecto: claude" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", + "condensingThreshold": { + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" + } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index 77f2de1dd7..1ef94327b4 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -242,16 +242,18 @@ "geminiTokentManagement": "Gestion des jetons" }, "geminiTokentManagement": { - "useCustomContextWindow": "Utiliser la limite de fenêtre de contexte personnalisée", - "description": "Remplacez la fenêtre de contexte par défaut du modèle.", - "modelDefault": "Fenêtre de contexte par défaut du modèle", + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", "condensingThreshold": { - "tokens": "jetons", - "title": "Seuil de condensation du contexte", - "description": "Seuil de condensation du contexte pour ce profil Gemini. Lorsque le contexte atteint ce pourcentage, il sera automatiquement condensé.", - "condensingTriggerAt": "La condensation se déclenchera à", - "tokenLimitTrigger": "Déclencheur de limite de jetons", - "actualTrigger": "Déclenchement réel" + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" } }, "googleCloudSetup": { @@ -352,6 +354,21 @@ "pathLabel": "Chemin du code Claude", "description": "Chemin facultatif vers votre CLI Claude Code. La valeur par défaut est 'claude' si non défini.", "placeholder": "Défaut : claude" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", + "condensingThreshold": { + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" + } } }, "browser": { @@ -560,7 +577,6 @@ "name": "Activer le Marketplace", "description": "Lorsque cette option est activée, tu peux installer des MCP et des modes personnalisés depuis le Marketplace." }, - "MULTI_FILE_APPLY_DIFF": { "name": "Activer les éditions de fichiers concurrentes", "description": "Lorsque cette option est activée, Roo peut éditer plusieurs fichiers en une seule requête. Lorsqu'elle est désactivée, Roo doit éditer les fichiers un par un. Désactiver cette option peut aider lorsque tu travailles avec des modèles moins capables ou lorsque tu veux plus de contrôle sur les modifications de fichiers." diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index 48a05303e8..f31afc4191 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -242,16 +242,18 @@ "geminiTokentManagement": "टोकन प्रबंधन" }, "geminiTokentManagement": { - "useCustomContextWindow": "कस्टम संदर्भ विंडो सीमा का उपयोग करें", - "description": "मॉडल की डिफ़ॉल्ट संदर्भ विंडो सीमा को ओवरराइड करें।", - "modelDefault": "मॉडल की डिफ़ॉल्ट संदर्भ विंडो", + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", "condensingThreshold": { - "tokens": "टोकन", - "title": "संघनन ट्रिगर सीमा", - "description": "इस Gemini प्रोफ़ाइल के लिए संघनन ट्रिगर सीमा। जब संदर्भ इस प्रतिशत तक पहुंचता है, तो इसे स्वचालित रूप से संघनित किया जाएगा।", - "condensingTriggerAt": "संघनन इस पर ट्रिगर होगा", - "tokenLimitTrigger": "टोकन सीमा ट्रिगर", - "actualTrigger": "वास्तविक ट्रिगर" + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" } }, "googleCloudSetup": { @@ -352,6 +354,21 @@ "pathLabel": "क्लाउड कोड पथ", "description": "आपके क्लाउड कोड सीएलआई का वैकल्पिक पथ। यदि सेट नहीं है तो डिफ़ॉल्ट 'claude' है।", "placeholder": "डिफ़ॉल्ट: claude" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", + "condensingThreshold": { + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" + } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index cffd732ad1..81e19623e4 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -246,16 +246,18 @@ "geminiTokentManagement": "Manajemen Token" }, "geminiTokentManagement": { - "useCustomContextWindow": "Gunakan batas jendela konteks kustom", - "description": "Ganti jendela konteks default model.", - "modelDefault": "Jendela konteks default model", + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", "condensingThreshold": { - "tokens": "token", - "title": "Ambang Pemadatan Konteks", - "description": "Atur persentase penggunaan jendela konteks yang memicu pemadatan otomatis. Catatan: Jika batas token yang dihitung (setelah menyisihkan token keluaran dan buffer keamanan) lebih rendah dari persentase ini, batas token akan memicu pemadatan sebagai gantinya.", - "condensingTriggerAt": "Pemadatan akan dipicu pada", - "tokenLimitTrigger": "Pemicu batas token (setelah menyisihkan token keluaran dan buffer keamanan)", - "actualTrigger": "Pemicu aktual (nilai minimum antara persentase dan batas token)" + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" } }, "googleCloudSetup": { @@ -356,6 +358,21 @@ "pathLabel": "Jalur Kode Claude", "description": "Jalur opsional ke Claude Code CLI Anda. Defaultnya adalah 'claude' jika tidak diatur.", "placeholder": "Default: claude" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", + "condensingThreshold": { + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" + } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index a42a290c5e..81aabe553c 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -242,16 +242,18 @@ "geminiTokentManagement": "Gestione del contesto Gemini" }, "geminiTokentManagement": { - "useCustomContextWindow": "Usa limite di finestra di contesto personalizzato", - "description": "Sovrascrive l'intervallo di contesto predefinito del modello. Quando le conversazioni raggiungono questo limite, Roo Code condenserà automaticamente i messaggi meno recenti.", - "modelDefault": "Intervallo di contesto predefinito del modello", + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", "condensingThreshold": { - "tokens": "token", - "title": "Soglia di condensazione del contesto", - "description": "La soglia (percentuale) alla quale questo profilo Gemini comprime il contesto. Quando viene raggiunta, il contesto viene automaticamente compresso.", - "condensingTriggerAt": "Percentuale alla quale viene avviata la condensazione", - "tokenLimitTrigger": "Attivazione limite di token", - "actualTrigger": "Attivazione effettiva" + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" } }, "googleCloudSetup": { @@ -352,6 +354,21 @@ "pathLabel": "Percorso Claude Code", "description": "Percorso facoltativo per la tua CLI Claude Code. Predefinito 'claude' se non impostato.", "placeholder": "Predefinito: claude" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", + "condensingThreshold": { + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" + } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index 4c9d8e6d1a..e30fbb6b11 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -242,16 +242,18 @@ "geminiTokentManagement": "Gemini コンテキスト管理" }, "geminiTokentManagement": { - "useCustomContextWindow": "カスタムコンテキストウィンドウ制限を使用", - "description": "モデルのデフォルトのコンテキストウィンドウを上書きします。この制限に近づくと、Roo Codeは古いメッセージを自動的に要約します。", - "modelDefault": "モデルのデフォルトのコンテキストウィンドウ", + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", "condensingThreshold": { - "tokens": "トークン", - "title": "コンテキスト圧縮しきい値", - "description": "このGeminiプロファイルのしきい値(パーセンテージ)。到達すると、コンテキストは自動的に圧縮されます。", - "condensingTriggerAt": "圧縮がトリガーされる割合", - "tokenLimitTrigger": "トークン制限トリガー", - "actualTrigger": "実際のトリガー" + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" } }, "googleCloudSetup": { @@ -352,6 +354,21 @@ "pathLabel": "クロードコードパス", "description": "Claude Code CLIへのオプションパス。設定されていない場合、デフォルトは「claude」です。", "placeholder": "デフォルト:claude" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", + "condensingThreshold": { + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" + } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index 68187cd12d..0b6cf20649 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -242,16 +242,18 @@ "geminiTokentManagement": "토큰 관리" }, "geminiTokentManagement": { - "useCustomContextWindow": "사용자 지정 컨텍스트 창 제한 사용", - "description": "모델의 기본 컨텍스트 창을 재정의합니다.", - "modelDefault": "모델의 기본 컨텍스트 창", + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", "condensingThreshold": { - "tokens": "토큰", - "title": "컨텍스트 압축 임계값", - "description": "자동 압축을 트리거하는 컨텍스트 창 사용 백분율을 설정합니다. 참고: 계산된 토큰 제한이(출력 토큰 및 안전 버퍼 예약 후) 이 백분율보다 낮으면, 대신 토큰 제한이 압축을 트리거합니다.", - "condensingTriggerAt": "압축은 다음 시점에서 트리거됩니다", - "tokenLimitTrigger": "토큰 제한 트리거 (출력 토큰 및 안전 버퍼 예약 후)", - "actualTrigger": "실제 트리거 (백분율과 토큰 제한 중 최소값)" + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" } }, "googleCloudSetup": { @@ -352,6 +354,21 @@ "pathLabel": "클로드 코드 경로", "description": "Claude Code CLI의 선택적 경로입니다. 설정하지 않으면 'claude'가 기본값입니다.", "placeholder": "기본값: claude" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", + "condensingThreshold": { + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" + } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index 5c1e6238f1..5b30d5638f 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -242,16 +242,18 @@ "geminiTokentManagement": "Tokenbeheer" }, "geminiTokentManagement": { - "useCustomContextWindow": "Gebruik aangepast contextvensterlimiet", - "description": "Vervangt de standaardlimiet voor het contextvenster van het model.", - "modelDefault": "Standaard contextvenster van het model", + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", "condensingThreshold": { "tokens": "tokens", - "title": "Limiet voor activering van condensatie", - "description": "Limiet voor activering van condensatie voor dit Gemini-profiel. Wanneer de context dit percentage bereikt, wordt het automatisch gecondenseerd.", - "condensingTriggerAt": "Condensatie wordt geactiveerd bij", - "tokenLimitTrigger": "Tokenlimiet-trigger", - "actualTrigger": "Werkelijke trigger" + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" } }, "googleCloudSetup": { @@ -352,6 +354,21 @@ "pathLabel": "Claude Code Pad", "description": "Optioneel pad naar uw Claude Code CLI. Standaard 'claude' als niet ingesteld.", "placeholder": "Standaard: claude" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", + "condensingThreshold": { + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" + } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index ab303bd386..041165b753 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -242,16 +242,18 @@ "geminiTokentManagement": "Zarządzanie tokenami" }, "geminiTokentManagement": { - "useCustomContextWindow": "Użyj niestandardowego limitu okna kontekstu", - "description": "Nadpisuje domyślne okno kontekstu modelu. Gdy rozmowy zbliżą się do tego limitu, Roo Code automatycznie skróci starsze wiadomości.", - "modelDefault": "Domyślne okno kontekstu modelu", + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", "condensingThreshold": { - "tokens": "tokeny", - "title": "Próg kondensacji kontekstu", - "description": "Próg kondensacji kontekstu dla tego profilu Gemini. Gdy kontekst osiągnie ten procent, zostanie automatycznie skrócony.", - "condensingTriggerAt": "Kondensacja zostanie wyzwolona przy", - "tokenLimitTrigger": "Wyzwalacz limitu tokenów", - "actualTrigger": "Rzeczywisty wyzwalacz" + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" } }, "googleCloudSetup": { @@ -352,6 +354,21 @@ "pathLabel": "Ścieżka Claude Code", "description": "Opcjonalna ścieżka do Twojego CLI Claude Code. Domyślnie 'claude', jeśli nie ustawiono.", "placeholder": "Domyślnie: claude" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", + "condensingThreshold": { + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" + } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index 9a7af91544..e979fa9053 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -242,16 +242,18 @@ "geminiTokentManagement": "Gerenciamento de Tokens" }, "geminiTokentManagement": { - "useCustomContextWindow": "Usar limite de janela de contexto personalizado", - "description": "Substitui o limite de janela de contexto padrão do modelo.", - "modelDefault": "Janela de contexto padrão do modelo", + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", "condensingThreshold": { "tokens": "tokens", - "title": "Limite de ativação de condensação", - "description": "Limite de ativação de condensação para este perfil Gemini. Quando o contexto atingir essa porcentagem, ele será condensado automaticamente.", - "condensingTriggerAt": "A condensação será acionada em", - "tokenLimitTrigger": "Acionador de limite de tokens", - "actualTrigger": "Acionador real" + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" } }, "googleCloudSetup": { @@ -352,6 +354,21 @@ "pathLabel": "Caminho do Claude Code", "description": "Caminho opcional para o seu Claude Code CLI. O padrão é 'claude' se não for definido.", "placeholder": "Padrão: claude" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", + "condensingThreshold": { + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" + } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index 813ee79b5f..6909a55fe3 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -242,16 +242,18 @@ "geminiTokentManagement": "Управление токенами" }, "geminiTokentManagement": { - "useCustomContextWindow": "Использовать ограничение окна контекста", - "description": "Переопределяет окно контекста по умолчанию модели. Когда разговоры приближаются к этому пределу, Roo Code автоматически будет конденсировать старые сообщения.", - "modelDefault": "Окно контекста по умолчанию модели", + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", "condensingThreshold": { - "tokens": "токены", - "title": "Порог конденсации контекста", - "description": "Порог (процент), при котором этот профиль Gemini сжимает контекст. При достижении порога контекст автоматически сжимается.", - "condensingTriggerAt": "Процент, при достижении которого запускается сжатие", - "tokenLimitTrigger": "Триггер лимита токенов", - "actualTrigger": "Фактический триггер" + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" } }, "googleCloudSetup": { @@ -352,6 +354,21 @@ "pathLabel": "Путь к Claude Code", "description": "Необязательный путь к вашему Claude Code CLI. По умолчанию используется 'claude', если не установлено.", "placeholder": "По умолчанию: claude" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", + "condensingThreshold": { + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" + } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index bd3d315391..cc09bfe450 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -242,16 +242,18 @@ "geminiTokentManagement": "Jeton Yönetimi" }, "geminiTokentManagement": { - "useCustomContextWindow": "Özel bağlam penceresi sınırı kullan", - "description": "Modelin varsayılan bağlam penceresini geçersiz kılar.", - "modelDefault": "Modelin varsayılan bağlam penceresi", + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", "condensingThreshold": { - "tokens": "jetonlar", - "title": "Bağlam Yoğunlaştırma Eşiği", - "description": "Otomatik yoğunlaştırmayı tetikleyen bağlam penceresi kullanım yüzdesini ayarlar. Not: Hesaplanan token limiti (çıkış tokenları ve güvenlik tamponu ayrıldıktan sonra) bu yüzde değerinin altındaysa, yoğunlaştırma token limiti nedeniyle tetiklenecektir.", - "condensingTriggerAt": "Yoğunlaştırma şu seviyede tetiklenecek", - "tokenLimitTrigger": "Token limiti tetikleyicisi (çıkış tokenları ve güvenlik tamponu ayrıldıktan sonra)", - "actualTrigger": "Gerçek tetikleyici (yüzde ve token limiti arasından düşük olan)" + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" } }, "googleCloudSetup": { @@ -352,6 +354,21 @@ "pathLabel": "Claude Code Yolu", "description": "Claude Code CLI'nize isteğe bağlı yol. Ayarlanmazsa varsayılan olarak 'claude' kullanılır.", "placeholder": "Varsayılan: claude" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", + "condensingThreshold": { + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" + } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index c2bfa0a8e3..06ad4eec2a 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -242,16 +242,18 @@ "geminiTokentManagement": "Quản lý token" }, "geminiTokentManagement": { - "useCustomContextWindow": "Sử dụng giới hạn cửa sổ ngữ cảnh tùy chỉnh", - "description": "Ghi đè cửa sổ ngữ cảnh mặc định của mô hình. Khi cuộc trò chuyện tiếp cận giới hạn này, Roo Code sẽ tự động cô đọng các tin nhắn cũ hơn.", - "modelDefault": "Cửa sổ ngữ cảnh mặc định của mô hình", + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", "condensingThreshold": { - "tokens": "token", - "title": "Ngưỡng cô đọng ngữ cảnh", - "description": "Ngưỡng cô đọng ngữ cảnh cho hồ sơ Gemini này. Khi ngữ cảnh đạt đến tỷ lệ phần trăm này, nó sẽ tự động được cô đọng.", - "condensingTriggerAt": "Cô đọng sẽ kích hoạt ở", - "tokenLimitTrigger": "Kích hoạt giới hạn token", - "actualTrigger": "Kích hoạt thực tế" + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" } }, "googleCloudSetup": { @@ -352,6 +354,21 @@ "pathLabel": "Đường dẫn Claude Code", "description": "Đường dẫn tùy chọn đến Claude Code CLI của bạn. Mặc định là 'claude' nếu không được đặt.", "placeholder": "Mặc định: claude" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", + "condensingThreshold": { + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" + } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index a85e38192a..7737944473 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -242,16 +242,18 @@ "geminiTokentManagement": "令牌管理" }, "geminiTokentManagement": { - "useCustomContextWindow": "使用自定义上下文窗口限制", - "description": "覆盖模型的默认上下文窗口限制。", - "modelDefault": "模型的默认上下文窗口限制", + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", "condensingThreshold": { - "tokens": "令牌", - "title": "上下文压缩阈值", - "description": "此 Gemini 配置文件的上下文压缩阈值。当上下文达到此百分比时,将自动压缩。", - "condensingTriggerAt": "压缩将在达到时触发", - "tokenLimitTrigger": "令牌限制触发", - "actualTrigger": "实际触发" + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" } }, "googleCloudSetup": { @@ -352,6 +354,21 @@ "pathLabel": "Claude Code 路径", "description": "您的 Claude Code CLI 的可选路径。如果未设置,则默认为 “claude”。", "placeholder": "默认:claude" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", + "condensingThreshold": { + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" + } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index 413f82a660..dc6be2ed29 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -242,16 +242,18 @@ "geminiTokentManagement": "令牌管理" }, "geminiTokentManagement": { - "useCustomContextWindow": "使用自訂上下文視窗限制", - "description": "覆寫模型的預設上下文視窗限制。", - "modelDefault": "模型的預設上下文視窗限制", + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", "condensingThreshold": { - "tokens": "代幣", - "title": "上下文濃縮閾值", - "description": "此 Gemini 設定檔的上下文濃縮閾值。當上下文達到此百分比時,將自動濃縮。", - "condensingTriggerAt": "濃縮將在以下條件觸發", - "tokenLimitTrigger": "代幣限制觸發", - "actualTrigger": "實際觸發" + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" } }, "googleCloudSetup": { @@ -352,6 +354,21 @@ "pathLabel": "Claude Code 路徑", "description": "可選的 Claude Code CLI 路徑。如果未設定,則預設為 'claude'。", "placeholder": "預設:claude" + }, + "geminiContextManagement": { + "useCustomContextWindow": "Use custom context window limit", + "description": "Override the model's default context window.", + "modelDefault": "Model's default context window", + "condensingThreshold": { + "tokens": "tokens", + "title": "Context Condensing Threshold", + "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", + "condensingtriggerAt": "Condensing will trigger at", + "tokenLimitTriggered": "due to token limit, not percentage", + "availableContext": "Available context window", + "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", + "actualTrigger": "Actual trigger (minimum of percentage and token limit)" + } } }, "browser": { From 7e5a59d4ae3d48d01e83fd6fc16e988026359579 Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Tue, 8 Jul 2025 00:31:17 +0100 Subject: [PATCH 17/31] fix: removing contextLimit and token management related code - due to the decision in: https://github.com/RooCodeInc/Roo-Code/issues/3717 --- packages/types/src/provider-settings.ts | 1 - src/api/providers/gemini.ts | 10 +- .../__tests__/sliding-window.spec.ts | 25 -- src/core/task/Task.ts | 9 +- .../src/components/settings/ApiOptions.tsx | 12 - .../src/components/settings/SettingsView.tsx | 23 -- .../components/settings/providers/Gemini.tsx | 237 +----------------- .../providers/__tests__/Gemini.spec.tsx | 54 +--- webview-ui/src/i18n/locales/de/settings.json | 37 +-- webview-ui/src/i18n/locales/en/settings.json | 19 -- webview-ui/src/i18n/locales/es/settings.json | 37 +-- webview-ui/src/i18n/locales/fr/settings.json | 37 +-- webview-ui/src/i18n/locales/hi/settings.json | 37 +-- webview-ui/src/i18n/locales/id/settings.json | 37 +-- webview-ui/src/i18n/locales/it/settings.json | 37 +-- webview-ui/src/i18n/locales/ja/settings.json | 37 +-- webview-ui/src/i18n/locales/ko/settings.json | 37 +-- webview-ui/src/i18n/locales/nl/settings.json | 37 +-- webview-ui/src/i18n/locales/pl/settings.json | 37 +-- .../src/i18n/locales/pt-BR/settings.json | 37 +-- webview-ui/src/i18n/locales/ru/settings.json | 37 +-- webview-ui/src/i18n/locales/tr/settings.json | 37 +-- webview-ui/src/i18n/locales/vi/settings.json | 37 +-- .../src/i18n/locales/zh-CN/settings.json | 37 +-- .../src/i18n/locales/zh-TW/settings.json | 37 +-- 25 files changed, 25 insertions(+), 957 deletions(-) diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index 0c42d811df..7a978e6b47 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -162,7 +162,6 @@ const geminiSchema = apiModelIdProviderModelSchema.extend({ maxOutputTokens: z.number().optional(), enableUrlContext: z.boolean().optional(), enableGrounding: z.boolean().optional(), - contextLimit: z.number().optional(), }) const geminiCliSchema = apiModelIdProviderModelSchema.extend({ diff --git a/src/api/providers/gemini.ts b/src/api/providers/gemini.ts index 1fb10749b2..bbdc146528 100644 --- a/src/api/providers/gemini.ts +++ b/src/api/providers/gemini.ts @@ -65,8 +65,7 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl ): ApiStream { const { id: model, info, reasoning: thinkingConfig, maxTokens } = this.getModel() - const limitedMessages = this.options.contextLimit ? messages.slice(-this.options.contextLimit) : messages - const contents = limitedMessages.map(convertAnthropicMessageToGemini) + const contents = messages.map(convertAnthropicMessageToGemini) const tools: Array> = [] if (this.options.enableUrlContext) { @@ -147,13 +146,6 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl let info: ModelInfo = geminiModels[id] const params = getModelParams({ format: "gemini", modelId: id, model: info, settings: this.options }) - if (this.options.contextLimit) { - info = { - ...info, - contextWindow: this.options.contextLimit, - } - } - // The `:thinking` suffix indicates that the model is a "Hybrid" // reasoning model and that reasoning is required to be enabled. // The actual model ID honored by Gemini's API does not have this diff --git a/src/core/sliding-window/__tests__/sliding-window.spec.ts b/src/core/sliding-window/__tests__/sliding-window.spec.ts index d3837c7a23..b6f09125ac 100644 --- a/src/core/sliding-window/__tests__/sliding-window.spec.ts +++ b/src/core/sliding-window/__tests__/sliding-window.spec.ts @@ -250,31 +250,6 @@ describe("Sliding Window", () => { { role: "assistant", content: "Fourth message" }, { role: "user", content: "Fifth message" }, ] - it("should use contextLimit as contextWindow when apiProvider is gemini", async () => { - const contextLimit = 2 - const messages: ApiMessage[] = [ - { role: "user", content: "First message" }, - { role: "assistant", content: "Second message" }, - { role: "user", content: "Third message" }, - { role: "assistant", content: "Fourth message" }, - { role: "user", content: "" }, - ] - const result = await truncateConversationIfNeeded({ - messages, - totalTokens: 2, - contextWindow: contextLimit, - maxTokens: null, - apiHandler: mockApiHandler, - autoCondenseContext: false, - autoCondenseContextPercent: 100, - systemPrompt: "", - taskId, - profileThresholds: {}, - currentProfileId: "default", - }) - expect(result.messages).toEqual([messages[0], messages[3], messages[4]]) - }) - it("should not truncate if tokens are below max tokens threshold", async () => { const modelInfo = createModelInfo(100000, 30000) const dynamicBuffer = modelInfo.contextWindow * TOKEN_BUFFER_PERCENTAGE // 10000 diff --git a/src/core/task/Task.ts b/src/core/task/Task.ts index 38e9c51a17..f184e41fda 100644 --- a/src/core/task/Task.ts +++ b/src/core/task/Task.ts @@ -1714,12 +1714,11 @@ export class Task extends EventEmitter { ? this.apiConfiguration.modelMaxTokens || DEFAULT_THINKING_MODEL_MAX_TOKENS : modelInfo.maxTokens - const contextWindow = - this.apiConfiguration.apiProvider === "gemini" && this.apiConfiguration.contextLimit - ? this.apiConfiguration.contextLimit - : modelInfo.contextWindow + const contextWindow = modelInfo.contextWindow - const currentProfileId = state?.listApiConfigMeta.find((profile) => profile.name === state?.currentApiConfigName)?.id ?? "default"; + const currentProfileId = + state?.listApiConfigMeta.find((profile) => profile.name === state?.currentApiConfigName)?.id ?? + "default" const truncateResult = await truncateConversationIfNeeded({ messages: this.apiConversationHistory, diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index f8e4764afc..166f96c524 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -74,10 +74,6 @@ export interface ApiOptionsProps { fromWelcomeView?: boolean errorMessage: string | undefined setErrorMessage: React.Dispatch> - currentProfileId?: string - profileThresholds?: Record - autoCondenseContextPercent?: number - setProfileThreshold?: (profileId: string, threshold: number) => void } const ApiOptions = ({ @@ -87,10 +83,6 @@ const ApiOptions = ({ fromWelcomeView, errorMessage, setErrorMessage, - currentProfileId, - profileThresholds, - autoCondenseContextPercent, - setProfileThreshold, }: ApiOptionsProps) => { const { t } = useAppTranslation() const { organizationAllowList } = useExtensionState() @@ -423,10 +415,6 @@ const ApiOptions = ({ apiConfiguration={apiConfiguration} setApiConfigurationField={setApiConfigurationField} currentModelId={selectedModelId} - currentProfileId={currentProfileId} - profileThresholds={profileThresholds} - autoCondenseContextPercent={autoCondenseContextPercent} - setProfileThreshold={setProfileThreshold} /> )} diff --git a/webview-ui/src/components/settings/SettingsView.tsx b/webview-ui/src/components/settings/SettingsView.tsx index 94641660e8..c3b9179e37 100644 --- a/webview-ui/src/components/settings/SettingsView.tsx +++ b/webview-ui/src/components/settings/SettingsView.tsx @@ -180,15 +180,6 @@ const SettingsView = forwardRef(({ onDone, t const apiConfiguration = useMemo(() => cachedState.apiConfiguration ?? {}, [cachedState.apiConfiguration]) - const getCurrentProfileId = useCallback(() => { - if (!currentApiConfigName || !listApiConfigMeta) { - return currentApiConfigName - } - - const profile = listApiConfigMeta.find((p) => p.name === currentApiConfigName) - return profile ? profile.id : currentApiConfigName - }, [currentApiConfigName, listApiConfigMeta]) - useEffect(() => { // Update only when currentApiConfigName is changed. // Expected to be triggered by loadApiConfiguration/upsertApiConfiguration. @@ -245,16 +236,6 @@ const SettingsView = forwardRef(({ onDone, t }) }, []) - const setProfileThreshold = useCallback( - (profileId: string, threshold: number) => { - setCachedStateField("profileThresholds", { - ...profileThresholds, - [profileId]: threshold, - }) - }, - [profileThresholds, setCachedStateField], - ) - const setTelemetrySetting = useCallback((setting: TelemetrySetting) => { setCachedState((prevState) => { if (prevState.telemetrySetting === setting) { @@ -601,10 +582,6 @@ const SettingsView = forwardRef(({ onDone, t setApiConfigurationField={setApiConfigurationField} errorMessage={errorMessage} setErrorMessage={setErrorMessage} - currentProfileId={getCurrentProfileId()} - profileThresholds={profileThresholds || {}} - autoCondenseContextPercent={autoCondenseContextPercent || 75} - setProfileThreshold={setProfileThreshold} />
diff --git a/webview-ui/src/components/settings/providers/Gemini.tsx b/webview-ui/src/components/settings/providers/Gemini.tsx index 971ba8d71c..f3fe6ec49e 100644 --- a/webview-ui/src/components/settings/providers/Gemini.tsx +++ b/webview-ui/src/components/settings/providers/Gemini.tsx @@ -8,7 +8,6 @@ import { geminiModels, geminiDefaultModelId, type GeminiModelId } from "@roo-cod import { useAppTranslation } from "@src/i18n/TranslationContext" import { VSCodeButtonLink } from "@src/components/common/VSCodeButtonLink" -import { vscode } from "@src/utils/vscode" import { inputEventTransform } from "../transforms" @@ -16,31 +15,15 @@ type GeminiProps = { apiConfiguration: ProviderSettings setApiConfigurationField: (field: keyof ProviderSettings, value: ProviderSettings[keyof ProviderSettings]) => void currentModelId?: string - currentProfileId?: string - profileThresholds?: Record - autoCondenseContextPercent?: number - setProfileThreshold?: (profileId: string, threshold: number) => void } -export const Gemini = ({ - apiConfiguration, - setApiConfigurationField, - currentModelId, - currentProfileId, - profileThresholds = {}, - autoCondenseContextPercent = 75, - setProfileThreshold, -}: GeminiProps) => { +export const Gemini = ({ apiConfiguration, setApiConfigurationField, currentModelId }: GeminiProps) => { const { t } = useAppTranslation() const [googleGeminiBaseUrlSelected, setGoogleGeminiBaseUrlSelected] = useState( !!apiConfiguration?.googleGeminiBaseUrl, ) - const [isCustomContextLimit, setIsCustomContextLimit] = useState( - apiConfiguration?.contextLimit !== undefined && apiConfiguration?.contextLimit !== null, - ) - const modelInfo = useMemo(() => { const modelId = ( currentModelId && currentModelId in geminiModels ? currentModelId : geminiDefaultModelId @@ -48,54 +31,6 @@ export const Gemini = ({ return geminiModels[modelId] }, [currentModelId]) - const getCurrentThreshold = useCallback(() => { - if (!currentProfileId) return autoCondenseContextPercent - - const profileThreshold = profileThresholds[currentProfileId] - if (profileThreshold === undefined || profileThreshold === -1) { - return autoCondenseContextPercent - } - return profileThreshold - }, [currentProfileId, profileThresholds, autoCondenseContextPercent]) - - const handleThresholdChange = useCallback( - (newThreshold: number) => { - if (!currentProfileId || !setProfileThreshold) return - - setProfileThreshold(currentProfileId, newThreshold) - - vscode.postMessage({ - type: "profileThresholds", - values: { - ...profileThresholds, - [currentProfileId]: newThreshold, - }, - }) - }, - [currentProfileId, profileThresholds, setProfileThreshold], - ) - - const getTriggerDetails = useCallback(() => { - const contextWindow = apiConfiguration?.contextLimit || modelInfo?.contextWindow || 1048576 - const threshold = getCurrentThreshold() - - const TOKEN_BUFFER_PERCENTAGE = 0.1 - const maxTokens = modelInfo?.maxTokens - const reservedTokens = maxTokens || contextWindow * 0.2 - const allowedTokens = Math.floor(contextWindow * (1 - TOKEN_BUFFER_PERCENTAGE) - reservedTokens) - - const percentageBasedTrigger = Math.floor(contextWindow * (threshold / 100)) - - return { - percentageBasedTrigger, - allowedTokens, - actualTrigger: Math.min(percentageBasedTrigger, allowedTokens), - triggerReason: allowedTokens < percentageBasedTrigger ? "token-limit" : "percentage-threshold", - maxTokens, - reservedTokens, - } - }, [apiConfiguration?.contextLimit, modelInfo, getCurrentThreshold]) - const handleInputChange = useCallback( ( field: K, @@ -225,176 +160,6 @@ export const Gemini = ({
-
-

- {t("settings:providers.geminiSections.geminiTokentManagement")} -

-
- { - setIsCustomContextLimit(checked) - if (!checked) { - setApiConfigurationField("contextLimit", null) - } else { - setApiConfigurationField( - "contextLimit", - apiConfiguration.contextLimit ?? modelInfo?.contextWindow ?? 1048576, - ) - } - }}> - - -
- {t("settings:providers.geminiContextManagement.description")} -
- -
- {t("settings:providers.geminiContextManagement.modelDefault")}:{" "} - {(modelInfo?.contextWindow || 1048576).toLocaleString()}{" "} - {t("settings:providers.geminiContextManagement.condensingThreshold.tokens")} -
- - {isCustomContextLimit && ( -
-
-
- setApiConfigurationField("contextLimit", value)} - /> - { - const val = - apiConfiguration.contextLimit ?? modelInfo?.contextWindow ?? 1048576 - return Number.isNaN(val) ? "" : val.toString() - })()} - type="text" - inputMode="numeric" - onInput={handleInputChange("contextLimit", (e) => { - const val = parseInt((e as any).target.value, 10) - return Number.isNaN(val) ? undefined : val - })} - className="w-24" - /> -
-
-
- )} -
- - {currentProfileId && ( -
- -
- {t("settings:providers.geminiContextManagement.condensingThreshold.description")} -
- -
- handleThresholdChange(value)} - className="flex-grow" - /> - { - const value = parseInt((e.target as HTMLInputElement).value, 10) - if (!isNaN(value) && value >= 5 && value <= 100) { - handleThresholdChange(value) - } - }} - className="w-16" - /> - % -
- -
- {(() => { - const details = getTriggerDetails() - return ( - <> -
- - {t( - "settings:providers.geminiContextManagement.condensingThreshold.condensingtriggerAt", - )} - : - {" "} - {details.actualTrigger.toLocaleString()}{" "} - {t("settings:providers.geminiContextManagement.condensingThreshold.tokens")} - {details.triggerReason === "token-limit" && ( - - ( - {t( - "settings:providers.geminiContextManagement.condensingThreshold.tokenLimitTriggered", - )} - ) - - )} -
-
- - {t( - "settings:providers.geminiContextManagement.condensingThreshold.availableContext", - )} - : - {" "} - {( - apiConfiguration?.contextLimit || - modelInfo?.contextWindow || - 1048576 - ).toLocaleString()}{" "} - {t("settings:providers.geminiContextManagement.condensingThreshold.tokens")} -
-
-
- - {t( - "settings:providers.geminiContextManagement.condensingThreshold.tokenLimitTrigger", - )} - : - {" "} - {details.allowedTokens.toLocaleString()}{" "} - {t( - "settings:providers.geminiContextManagement.condensingThreshold.tokens", - )} -
-
- - {t( - "settings:providers.geminiContextManagement.condensingThreshold.actualTrigger", - )} - : - {" "} - {details.actualTrigger.toLocaleString()}{" "} - {t( - "settings:providers.geminiContextManagement.condensingThreshold.tokens", - )} -
-
- - ) - })()} -
-
- )} -
-

{t("settings:providers.geminiSections.advancedFeatures")} diff --git a/webview-ui/src/components/settings/providers/__tests__/Gemini.spec.tsx b/webview-ui/src/components/settings/providers/__tests__/Gemini.spec.tsx index f36ef10785..2a420c6c02 100644 --- a/webview-ui/src/components/settings/providers/__tests__/Gemini.spec.tsx +++ b/webview-ui/src/components/settings/providers/__tests__/Gemini.spec.tsx @@ -1,7 +1,6 @@ -import { render, screen, fireEvent } from "@testing-library/react" +import { render, screen } from "@testing-library/react" import { Gemini } from "../Gemini" import type { ProviderSettings } from "@roo-code/types" -import { geminiModels, geminiDefaultModelId, type GeminiModelId } from "@roo-code/types" vi.mock("@vscode/webview-ui-toolkit/react", () => ({ VSCodeTextField: ({ children, value, onInput, type }: any) => ( @@ -43,60 +42,13 @@ vi.mock("@src/components/common/VSCodeButtonLink", () => ({ VSCodeButtonLink: ({ children, href }: any) => {children}, })) -const defaultModelId: GeminiModelId = geminiDefaultModelId -const defaultContextWindow = geminiModels[defaultModelId].contextWindow - describe("Gemini provider settings", () => { - it("does not render context limit slider when custom context limit is not enabled", () => { + it("renders sliders for topP, topK and maxOutputTokens", () => { const setApiField = vi.fn() const config: ProviderSettings = {} - render( - , - ) - expect(screen.queryByTestId("slider-context-limit")).toBeNull() - + render() expect(screen.getByTestId("slider-top-p")).toBeInTheDocument() expect(screen.getByTestId("slider-top-k")).toBeInTheDocument() expect(screen.getByTestId("slider-max-output-tokens")).toBeInTheDocument() }) - - it("enables custom context limit on checkbox toggle and shows slider with default value", () => { - const setApiField = vi.fn() - const config: ProviderSettings = {} - const { rerender } = render( - , - ) - - const checkbox = screen.getByTestId("checkbox-custom-context-limit") - fireEvent.click(checkbox) - - expect(setApiField).toHaveBeenCalledWith("contextLimit", defaultContextWindow) - - const updatedConfig = { ...config, contextLimit: defaultContextWindow } - rerender( - , - ) - - const slider = screen.getByTestId("slider-context-limit") - expect(slider).toHaveValue(defaultContextWindow.toString()) - }) - - it("renders slider when contextLimit already set and updates on slider change", () => { - const setApiField = vi.fn() - const initialLimit = 100000 - const config: ProviderSettings = { contextLimit: initialLimit } - render( - , - ) - - const slider = screen.getByTestId("slider-context-limit") - expect(slider).toHaveValue(initialLimit.toString()) - - fireEvent.change(slider, { target: { value: "50000" } }) - expect(setApiField).toHaveBeenCalledWith("contextLimit", 50000) - }) }) diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index 57ede3f16a..1668cb0f0c 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -242,31 +242,11 @@ "groundingSearch": { "title": "Grounding mit Google-Suche aktivieren", "description": "Ermöglicht es Gemini, Google nach aktuellen Informationen zu durchsuchen und Antworten auf Echtzeitdaten zu stützen. Nützlich für Abfragen, die aktuelle Informationen erfordern." - }, - "contextLimit": { - "title": "Kontextlimit", - "description": "Maximale Anzahl vorheriger Nachrichten, die in den Kontext einbezogen werden. Niedrigere Werte reduzieren den Tokenverbrauch und die Kosten, können jedoch die Kontinuität der Konversation einschränken." } }, "geminiSections": { "modelParameters": "Model Parameter", - "advancedFeatures": "Erweiterte Funktionen", - "geminiTokentManagement": "Tokenverwaltung" - }, - "geminiTokentManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } + "advancedFeatures": "Erweiterte Funktionen" }, "googleCloudSetup": { "title": "Um Google Cloud Vertex AI zu verwenden, müssen Sie:", @@ -366,21 +346,6 @@ "pathLabel": "Claude-Code-Pfad", "description": "Optionaler Pfad zu Ihrer Claude Code CLI. Standard ist 'claude', wenn nicht festgelegt.", "placeholder": "Standard: claude" - }, - "geminiContextManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index d72fa504f7..555231ab29 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -247,25 +247,6 @@ "groundingSearch": { "title": "Enable Grounding with Google Search", "description": "Enables Gemini to search Google for current information and ground responses in real-time data. Useful for queries requiring up-to-date information." - }, - "contextLimit": { - "title": "Context Limit", - "description": "Maximum number of previous messages to include in context. Lower values reduce token usage and costs but may limit conversation continuity." - } - }, - "geminiContextManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" } }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index d76d200781..45d8746e47 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -242,31 +242,11 @@ "groundingSearch": { "title": "Habilitar grounding con búsqueda en Google", "description": "Permite que Gemini busque en Google información actual y fundamente las respuestas en datos en tiempo real. Útil para consultas que requieren información actualizada." - }, - "contextLimit": { - "title": "Límite de contexto", - "description": "Número máximo de mensajes anteriores que se incluirán en el contexto. Valores más bajos reducen el uso de tokens y los costos, pero pueden limitar la continuidad de la conversación." } }, "geminiSections": { "modelParameters": "Parámetros del modelo", - "advancedFeatures": "Funciones avanzadas", - "geminiTokentManagement": "Gestión de tokens" - }, - "geminiTokentManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } + "advancedFeatures": "Funciones avanzadas" }, "googleCloudSetup": { "title": "Para usar Google Cloud Vertex AI, necesita:", @@ -366,21 +346,6 @@ "pathLabel": "Ruta de Claude Code", "description": "Ruta opcional a su CLI de Claude Code. Por defecto, es 'claude' si no se establece.", "placeholder": "Por defecto: claude" - }, - "geminiContextManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index 77a86d1926..acbd732623 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -242,31 +242,11 @@ "groundingSearch": { "title": "Activer la mise en contexte via la recherche Google", "description": "Permet à Gemini d'effectuer des recherches sur Google pour obtenir des informations actuelles et fonder les réponses sur des données en temps réel. Utile pour les requêtes nécessitant des informations à jour." - }, - "contextLimit": { - "title": "Limite de contexte", - "description": "Nombre maximum de messages précédents à inclure dans le contexte. Des valeurs plus faibles réduisent l'utilisation des tokens et les coûts, mais peuvent limiter la continuité de la conversation." } }, "geminiSections": { "modelParameters": "Paramètres du modèle", - "advancedFeatures": "Fonctionnalités avancées", - "geminiTokentManagement": "Gestion des jetons" - }, - "geminiTokentManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } + "advancedFeatures": "Fonctionnalités avancées" }, "googleCloudSetup": { "title": "Pour utiliser Google Cloud Vertex AI, vous devez :", @@ -366,21 +346,6 @@ "pathLabel": "Chemin du code Claude", "description": "Chemin facultatif vers votre CLI Claude Code. La valeur par défaut est 'claude' si non défini.", "placeholder": "Défaut : claude" - }, - "geminiContextManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index ae6ee9acc9..1cf2a23194 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -242,31 +242,11 @@ "groundingSearch": { "title": "Google खोज के साथ ग्राउंडिंग सक्षम करें", "description": "Gemini को वास्तविक समय के डेटा पर आधारित उत्तर प्रदान करने के लिए Google पर जानकारी खोजने और उत्तरों को ग्राउंड करने की अनुमति देता है। अद्यतित जानकारी की आवश्यकता वाली क्वेरीज़ के लिए उपयोगी।" - }, - "contextLimit": { - "title": "संदर्भ सीमा", - "description": "संदर्भ में शामिल करने के लिए पिछले संदेशों की अधिकतम संख्या। निम्न मान टोकन उपयोग और लागत कम करते हैं, लेकिन बातचीत की निरंतरता सीमित कर सकते हैं।" } }, "geminiSections": { "modelParameters": "मॉडल पैरामीटर", - "advancedFeatures": "उन्नत सुविधाएँ", - "geminiTokentManagement": "टोकन प्रबंधन" - }, - "geminiTokentManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } + "advancedFeatures": "उन्नत सुविधाएँ" }, "googleCloudSetup": { "title": "Google Cloud Vertex AI का उपयोग करने के लिए, आपको आवश्यकता है:", @@ -366,21 +346,6 @@ "pathLabel": "क्लाउड कोड पथ", "description": "आपके क्लाउड कोड सीएलआई का वैकल्पिक पथ। यदि सेट नहीं है तो डिफ़ॉल्ट 'claude' है।", "placeholder": "डिफ़ॉल्ट: claude" - }, - "geminiContextManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index 46a5d663da..d1c9b115fc 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -246,31 +246,11 @@ "groundingSearch": { "title": "Aktifkan Grounding dengan Pencarian Google", "description": "Memungkinkan Gemini mencari informasi terkini di Google dan mendasarkan respons pada data waktu nyata. Berguna untuk kueri yang memerlukan informasi terkini." - }, - "contextLimit": { - "title": "Batas Konteks", - "description": "Jumlah maksimum pesan sebelumnya yang disertakan dalam konteks. Nilai lebih rendah mengurangi penggunaan token dan biaya tetapi dapat membatasi kelanjutan percakapan." } }, "geminiSections": { "modelParameters": "Parameter Model", - "advancedFeatures": "Fitur Lanjutan", - "geminiTokentManagement": "Manajemen Token" - }, - "geminiTokentManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } + "advancedFeatures": "Fitur Lanjutan" }, "googleCloudSetup": { "title": "Untuk menggunakan Google Cloud Vertex AI, kamu perlu:", @@ -370,21 +350,6 @@ "pathLabel": "Jalur Kode Claude", "description": "Jalur opsional ke Claude Code CLI Anda. Defaultnya adalah 'claude' jika tidak diatur.", "placeholder": "Default: claude" - }, - "geminiContextManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index 7043dfd968..beab75b773 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -242,31 +242,11 @@ "groundingSearch": { "title": "Abilita grounding con ricerca Google", "description": "Consente a Gemini di cercare informazioni aggiornate su Google e basare le risposte su dati in tempo reale. Utile per query che richiedono informazioni aggiornate." - }, - "contextLimit": { - "title": "Limite di contesto", - "description": "Numero massimo di messaggi precedenti da includere nel contesto. Valori più bassi riducono l'utilizzo dei token e i costi ma possono limitare la continuità della conversazione." } }, "geminiSections": { "modelParameters": "Parametri del modello", - "advancedFeatures": "Funzionalità avanzate", - "geminiTokentManagement": "Gestione del contesto Gemini" - }, - "geminiTokentManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } + "advancedFeatures": "Funzionalità avanzate" }, "googleCloudSetup": { "title": "Per utilizzare Google Cloud Vertex AI, è necessario:", @@ -366,21 +346,6 @@ "pathLabel": "Percorso Claude Code", "description": "Percorso facoltativo per la tua CLI Claude Code. Predefinito 'claude' se non impostato.", "placeholder": "Predefinito: claude" - }, - "geminiContextManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index f643d22612..0f5eb73173 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -242,31 +242,11 @@ "groundingSearch": { "title": "Google検索でのグラウンディングを有効にする", "description": "GeminiがGoogleを検索して最新情報を取得し、リアルタイムデータに基づいて応答をグラウンディングできるようにします。最新情報が必要なクエリに便利です。" - }, - "contextLimit": { - "title": "コンテキスト制限", - "description": "コンテキストに含める過去のメッセージの最大数。値を小さくするとトークン使用量とコストが削減されますが、会話の連続性が制限される場合があります。" } }, "geminiSections": { "modelParameters": "モデルパラメータ", - "advancedFeatures": "高度な機能", - "geminiTokentManagement": "Gemini コンテキスト管理" - }, - "geminiTokentManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } + "advancedFeatures": "高度な機能" }, "googleCloudSetup": { "title": "Google Cloud Vertex AIを使用するには:", @@ -366,21 +346,6 @@ "pathLabel": "クロードコードパス", "description": "Claude Code CLIへのオプションパス。設定されていない場合、デフォルトは「claude」です。", "placeholder": "デフォルト:claude" - }, - "geminiContextManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index 080ae9e6e3..d64865a5e4 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -242,31 +242,11 @@ "groundingSearch": { "title": "Google 검색과 함께 근거 지정 활성화", "description": "Gemini가 최신 정보를 얻기 위해 Google을 검색하고 응답을 실시간 데이터에 근거하도록 합니다. 최신 정보가 필요한 쿼리에 유용합니다." - }, - "contextLimit": { - "title": "컨텍스트 제한", - "description": "컨텍스트에 포함할 이전 메시지의 최대 수입니다. 낮은 값은 토큰 사용량과 비용을 줄이지만 대화 연속성이 제한될 수 있습니다." } }, "geminiSections": { "modelParameters": "모델 매개변수", - "advancedFeatures": "고급 기능", - "geminiTokentManagement": "토큰 관리" - }, - "geminiTokentManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } + "advancedFeatures": "고급 기능" }, "googleCloudSetup": { "title": "Google Cloud Vertex AI를 사용하려면:", @@ -366,21 +346,6 @@ "pathLabel": "클로드 코드 경로", "description": "Claude Code CLI의 선택적 경로입니다. 설정하지 않으면 'claude'가 기본값입니다.", "placeholder": "기본값: claude" - }, - "geminiContextManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index 4198b1ebe3..e98db08ac9 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -242,31 +242,11 @@ "groundingSearch": { "title": "Grounding met Google-zoekopdracht inschakelen", "description": "Staat Gemini toe om Google te doorzoeken voor actuele informatie en antwoorden op realtime gegevens te baseren. Handig voor vragen die actuele informatie vereisen." - }, - "contextLimit": { - "title": "Contextlimiet", - "description": "Maximaal aantal vorige berichten dat in de context wordt opgenomen. Lagere waarden verlagen het tokengebruik en de kosten, maar kunnen de continuïteit van het gesprek beperken." } }, "geminiSections": { "modelParameters": "Modelparameters", - "advancedFeatures": "Geavanceerde functies", - "geminiTokentManagement": "Tokenbeheer" - }, - "geminiTokentManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } + "advancedFeatures": "Geavanceerde functies" }, "googleCloudSetup": { "title": "Om Google Cloud Vertex AI te gebruiken, moet je:", @@ -366,21 +346,6 @@ "pathLabel": "Claude Code Pad", "description": "Optioneel pad naar uw Claude Code CLI. Standaard 'claude' als niet ingesteld.", "placeholder": "Standaard: claude" - }, - "geminiContextManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index 0d43001448..21c9d617f3 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -242,31 +242,11 @@ "groundingSearch": { "title": "Włącz grounding przy użyciu wyszukiwarki Google", "description": "Pozwala Gemini przeszukiwać Google w celu uzyskania aktualnych informacji i opierać odpowiedzi na danych w czasie rzeczywistym. Przydatne w zapytaniach wymagających najnowszych informacji." - }, - "contextLimit": { - "title": "Limit kontekstu", - "description": "Maksymalna liczba poprzednich wiadomości uwzględnianych w kontekście. Niższe wartości zmniejszają użycie tokenów i koszty, ale mogą ograniczać ciągłość rozmowy." } }, "geminiSections": { "modelParameters": "Parametry modelu", - "advancedFeatures": "Zaawansowane funkcje", - "geminiTokentManagement": "Zarządzanie tokenami" - }, - "geminiTokentManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } + "advancedFeatures": "Zaawansowane funkcje" }, "googleCloudSetup": { "title": "Aby korzystać z Google Cloud Vertex AI, potrzebujesz:", @@ -366,21 +346,6 @@ "pathLabel": "Ścieżka Claude Code", "description": "Opcjonalna ścieżka do Twojego CLI Claude Code. Domyślnie 'claude', jeśli nie ustawiono.", "placeholder": "Domyślnie: claude" - }, - "geminiContextManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index d6dd8dea53..4c8b730446 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -242,31 +242,11 @@ "groundingSearch": { "title": "Ativar grounding com pesquisa no Google", "description": "Permite que o Gemini pesquise informações atuais no Google e fundamente as respostas em dados em tempo real. Útil para consultas que requerem informações atualizadas." - }, - "contextLimit": { - "title": "Limite de contexto", - "description": "Número máximo de mensagens anteriores a incluir no contexto. Valores mais baixos reduzem o uso de tokens e os custos, mas podem limitar a continuidade da conversa." } }, "geminiSections": { "modelParameters": "Parâmetros do modelo", - "advancedFeatures": "Recursos avançados", - "geminiTokentManagement": "Gerenciamento de Tokens" - }, - "geminiTokentManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } + "advancedFeatures": "Recursos avançados" }, "googleCloudSetup": { "title": "Para usar o Google Cloud Vertex AI, você precisa:", @@ -366,21 +346,6 @@ "pathLabel": "Caminho do Claude Code", "description": "Caminho opcional para o seu Claude Code CLI. O padrão é 'claude' se não for definido.", "placeholder": "Padrão: claude" - }, - "geminiContextManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index 09a75c9850..0b836a3616 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -242,31 +242,11 @@ "groundingSearch": { "title": "Включить grounding через поиск Google", "description": "Позволяет Gemini искать актуальную информацию в Google и основывать ответы на данных в реальном времени. Полезно для запросов, требующих актуальной информации." - }, - "contextLimit": { - "title": "Ограничение контекста", - "description": "Максимальное число предыдущих сообщений, включаемых в контекст. Более низкие значения снижают использование токенов и стоимость, но могут ограничить непрерывность разговора." } }, "geminiSections": { "modelParameters": "Параметры модели", - "advancedFeatures": "Расширенные функции", - "geminiTokentManagement": "Управление токенами" - }, - "geminiTokentManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } + "advancedFeatures": "Расширенные функции" }, "googleCloudSetup": { "title": "Для использования Google Cloud Vertex AI необходимо:", @@ -366,21 +346,6 @@ "pathLabel": "Путь к Claude Code", "description": "Необязательный путь к вашему Claude Code CLI. По умолчанию используется 'claude', если не установлено.", "placeholder": "По умолчанию: claude" - }, - "geminiContextManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 620b782132..c236a3c843 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -242,31 +242,11 @@ "groundingSearch": { "title": "Google Aramasıyla Grounding Etkinleştir", "description": "Gemini'nin güncel bilgileri almak için Google'da arama yapmasına ve yanıtları gerçek zamanlı verilere dayandırmasına izin verir. Güncel bilgi gerektiren sorgular için kullanışlıdır." - }, - "contextLimit": { - "title": "Bağlam Sınırı", - "description": "Bağlamda dahil edilecek önceki mesajların maksimum sayısı. Daha düşük değerler token kullanımını ve maliyeti azaltır, ancak konuşmanın devamlılığını sınırlayabilir." } }, "geminiSections": { "modelParameters": "Model Parametreleri", - "advancedFeatures": "Gelişmiş Özellikler", - "geminiTokentManagement": "Jeton Yönetimi" - }, - "geminiTokentManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } + "advancedFeatures": "Gelişmiş Özellikler" }, "googleCloudSetup": { "title": "Google Cloud Vertex AI'yi kullanmak için şunları yapmanız gerekir:", @@ -366,21 +346,6 @@ "pathLabel": "Claude Code Yolu", "description": "Claude Code CLI'nize isteğe bağlı yol. Ayarlanmazsa varsayılan olarak 'claude' kullanılır.", "placeholder": "Varsayılan: claude" - }, - "geminiContextManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index c731e5a162..4a54d074df 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -242,31 +242,11 @@ "groundingSearch": { "title": "Bật grounding với tìm kiếm Google", "description": "Cho phép Gemini tìm kiếm trên Google để lấy thông tin mới nhất và căn cứ phản hồi dựa trên dữ liệu thời gian thực. Hữu ích cho các truy vấn yêu cầu thông tin cập nhật." - }, - "contextLimit": { - "title": "Giới hạn ngữ cảnh", - "description": "Số lượng tối đa các tin nhắn trước đó được đưa vào ngữ cảnh. Giá trị thấp hơn giảm mức sử dụng token và chi phí nhưng có thể hạn chế tính liên tục của cuộc trò chuyện." } }, "geminiSections": { "modelParameters": "Tham số mô hình", - "advancedFeatures": "Tính năng nâng cao", - "geminiTokentManagement": "Quản lý token" - }, - "geminiTokentManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } + "advancedFeatures": "Tính năng nâng cao" }, "googleCloudSetup": { "title": "Để sử dụng Google Cloud Vertex AI, bạn cần:", @@ -366,21 +346,6 @@ "pathLabel": "Đường dẫn Claude Code", "description": "Đường dẫn tùy chọn đến Claude Code CLI của bạn. Mặc định là 'claude' nếu không được đặt.", "placeholder": "Mặc định: claude" - }, - "geminiContextManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index 3995ea2535..07fe4470db 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -242,31 +242,11 @@ "groundingSearch": { "title": "启用Google搜索落地", "description": "允许Gemini在Google中搜索最新信息,并在实时数据的基础上生成响应。适用于需要最新信息的查询。" - }, - "contextLimit": { - "title": "上下文限制", - "description": "包括在上下文中的先前消息的最大数量。较低的值可减少令牌使用量和成本,但可能限制对话连续性。" } }, "geminiSections": { "modelParameters": "模型参数", - "advancedFeatures": "高级功能", - "geminiTokentManagement": "令牌管理" - }, - "geminiTokentManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } + "advancedFeatures": "高级功能" }, "googleCloudSetup": { "title": "要使用 Google Cloud Vertex AI,您需要:", @@ -366,21 +346,6 @@ "pathLabel": "Claude Code 路径", "description": "您的 Claude Code CLI 的可选路径。如果未设置,则默认为 “claude”。", "placeholder": "默认:claude" - }, - "geminiContextManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index d47f91343b..d1b9bbdfd7 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -242,31 +242,11 @@ "groundingSearch": { "title": "啟用使用 Google 搜索進行基礎支持", "description": "在生成期間使用 Google 搜索以獲取最新資訊並將其包含在上下文中。" - }, - "contextLimit": { - "title": "上下文限制", - "description": "生成期間要包含的最大上下文大小(以代幣為單位)。" } }, "geminiSections": { "modelParameters": "模型參數", - "advancedFeatures": "進階功能", - "geminiTokentManagement": "令牌管理" - }, - "geminiTokentManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } + "advancedFeatures": "進階功能" }, "googleCloudSetup": { "title": "要使用 Google Cloud Vertex AI,您需要:", @@ -366,21 +346,6 @@ "pathLabel": "Claude Code 路徑", "description": "可選的 Claude Code CLI 路徑。如果未設定,則預設為 'claude'。", "placeholder": "預設:claude" - }, - "geminiContextManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } } }, "browser": { From edb96c62b18c60a2dff04da2ad906206f2f0d5ee Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Tue, 8 Jul 2025 00:46:35 +0100 Subject: [PATCH 18/31] fix: removing `contextLimit` test and removing token management in translations --- .../__tests__/gemini-handler.spec.ts | 18 ---------- webview-ui/src/i18n/locales/ca/settings.json | 33 +------------------ webview-ui/src/i18n/locales/en/settings.json | 3 +- 3 files changed, 2 insertions(+), 52 deletions(-) diff --git a/src/api/providers/__tests__/gemini-handler.spec.ts b/src/api/providers/__tests__/gemini-handler.spec.ts index 2805593ca5..5f9e088ff2 100644 --- a/src/api/providers/__tests__/gemini-handler.spec.ts +++ b/src/api/providers/__tests__/gemini-handler.spec.ts @@ -1,26 +1,8 @@ import { describe, it, expect, vi } from "vitest" import { GeminiHandler } from "../gemini" import type { ApiHandlerOptions } from "../../../shared/api" -import type { Anthropic } from "@anthropic-ai/sdk" describe("GeminiHandler backend support", () => { - it("slices messages when contextLimit is set", async () => { - const options = { apiProvider: "gemini", contextLimit: 1 } as ApiHandlerOptions - const handler = new GeminiHandler(options) - const stub = vi.fn().mockReturnValue((async function* () {})()) - // @ts-ignore access private client - handler["client"].models.generateContentStream = stub - const messages = [ - { role: "user", content: [{ type: "text", text: "first" }] }, - { role: "assistant", content: [{ type: "text", text: "second" }] }, - ] as Anthropic.Messages.MessageParam[] - for await (const _ of handler.createMessage("instr", messages)) { - } - expect(stub).toHaveBeenCalledOnce() - const params = stub.mock.calls[0][0] - expect(params.contents).toHaveLength(1) - }) - it("passes maxOutputTokens, topP, topK, and tools for URL context and grounding in config", async () => { const options = { apiProvider: "gemini", diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index 6e9d1da760..674094f425 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -250,23 +250,7 @@ }, "geminiSections": { "modelParameters": "Paràmetres del model", - "advancedFeatures": "Funcions avançades", - "geminiTokentManagement": "Gestió de tokens" - }, - "geminiTokentManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } + "advancedFeatures": "Funcions avançades" }, "googleCloudSetup": { "title": "Per utilitzar Google Cloud Vertex AI, necessiteu:", @@ -366,21 +350,6 @@ "pathLabel": "Ruta del Codi Claude", "description": "Ruta opcional al teu CLI de Claude Code. Per defecte, 'claude' si no s'estableix.", "placeholder": "Per defecte: claude" - }, - "geminiContextManagement": { - "useCustomContextWindow": "Use custom context window limit", - "description": "Override the model's default context window.", - "modelDefault": "Model's default context window", - "condensingThreshold": { - "tokens": "tokens", - "title": "Context Condensing Threshold", - "description": "Set the percentage of context window usage that triggers automatic condensing. Note: If the calculated token limit (after reserving space for output and safety buffers) is lower than this percentage, the token limit will trigger condensing instead.", - "condensingtriggerAt": "Condensing will trigger at", - "tokenLimitTriggered": "due to token limit, not percentage", - "availableContext": "Available context window", - "tokenLimitTrigger": "Token limit trigger (after reserving output tokens and safety buffer)", - "actualTrigger": "Actual trigger (minimum of percentage and token limit)" - } } }, "browser": { diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index 555231ab29..577caf6987 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -224,8 +224,7 @@ "vscodeLmWarning": "Note: This is a very experimental integration and provider support will vary. If you get an error about a model not being supported, that's an issue on the provider's end.", "geminiSections": { "modelParameters": "Model Parameters", - "advancedFeatures": "Advanced Features", - "geminiTokentManagement": "Token Management" + "advancedFeatures": "Advanced Features" }, "geminiParameters": { "topK": { From cae3de957befe7e10e99827979b381b890a59565 Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Tue, 8 Jul 2025 00:58:16 +0100 Subject: [PATCH 19/31] fix: changing from `Advanced Features` to `Tools` to be consistent with Gemini docs/AI studio --- webview-ui/src/components/settings/providers/Gemini.tsx | 4 +--- webview-ui/src/i18n/locales/ca/settings.json | 2 +- webview-ui/src/i18n/locales/de/settings.json | 2 +- webview-ui/src/i18n/locales/en/settings.json | 2 +- webview-ui/src/i18n/locales/es/settings.json | 2 +- webview-ui/src/i18n/locales/fr/settings.json | 2 +- webview-ui/src/i18n/locales/hi/settings.json | 2 +- webview-ui/src/i18n/locales/id/settings.json | 2 +- webview-ui/src/i18n/locales/it/settings.json | 2 +- webview-ui/src/i18n/locales/ja/settings.json | 2 +- webview-ui/src/i18n/locales/ko/settings.json | 2 +- webview-ui/src/i18n/locales/nl/settings.json | 2 +- webview-ui/src/i18n/locales/pl/settings.json | 2 +- webview-ui/src/i18n/locales/pt-BR/settings.json | 2 +- webview-ui/src/i18n/locales/ru/settings.json | 2 +- webview-ui/src/i18n/locales/tr/settings.json | 2 +- webview-ui/src/i18n/locales/vi/settings.json | 2 +- webview-ui/src/i18n/locales/zh-CN/settings.json | 2 +- webview-ui/src/i18n/locales/zh-TW/settings.json | 2 +- 19 files changed, 19 insertions(+), 21 deletions(-) diff --git a/webview-ui/src/components/settings/providers/Gemini.tsx b/webview-ui/src/components/settings/providers/Gemini.tsx index f3fe6ec49e..394490699f 100644 --- a/webview-ui/src/components/settings/providers/Gemini.tsx +++ b/webview-ui/src/components/settings/providers/Gemini.tsx @@ -161,9 +161,7 @@ export const Gemini = ({ apiConfiguration, setApiConfigurationField, currentMode

-

- {t("settings:providers.geminiSections.advancedFeatures")} -

+

{t("settings:providers.geminiSections.tools")}

Date: Tue, 8 Jul 2025 01:51:42 +0100 Subject: [PATCH 20/31] fix: adding `try-catch` block for `generateContentStream` --- src/api/providers/gemini.ts | 88 ++++++++++++++++++++----------------- 1 file changed, 48 insertions(+), 40 deletions(-) diff --git a/src/api/providers/gemini.ts b/src/api/providers/gemini.ts index bbdc146528..a148363edd 100644 --- a/src/api/providers/gemini.ts +++ b/src/api/providers/gemini.ts @@ -88,55 +88,63 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl const params: GenerateContentParameters = { model, contents, config } - const result = await this.client.models.generateContentStream(params) - - let lastUsageMetadata: GenerateContentResponseUsageMetadata | undefined - - for await (const chunk of result) { - // Process candidates and their parts to separate thoughts from content - if (chunk.candidates && chunk.candidates.length > 0) { - const candidate = chunk.candidates[0] - if (candidate.content && candidate.content.parts) { - for (const part of candidate.content.parts) { - if (part.thought) { - // This is a thinking/reasoning part - if (part.text) { - yield { type: "reasoning", text: part.text } - } - } else { - // This is regular content - if (part.text) { - yield { type: "text", text: part.text } + try { + const result = await this.client.models.generateContentStream(params) + + let lastUsageMetadata: GenerateContentResponseUsageMetadata | undefined + + for await (const chunk of result) { + // Process candidates and their parts to separate thoughts from content + if (chunk.candidates && chunk.candidates.length > 0) { + const candidate = chunk.candidates[0] + if (candidate.content && candidate.content.parts) { + for (const part of candidate.content.parts) { + if (part.thought) { + // This is a thinking/reasoning part + if (part.text) { + yield { type: "reasoning", text: part.text } + } + } else { + // This is regular content + if (part.text) { + yield { type: "text", text: part.text } + } } } } } - } - // Fallback to the original text property if no candidates structure - else if (chunk.text) { - yield { type: "text", text: chunk.text } - } + // Fallback to the original text property if no candidates structure + else if (chunk.text) { + yield { type: "text", text: chunk.text } + } - if (chunk.usageMetadata) { - lastUsageMetadata = chunk.usageMetadata + if (chunk.usageMetadata) { + lastUsageMetadata = chunk.usageMetadata + } } - } - if (lastUsageMetadata) { - const inputTokens = lastUsageMetadata.promptTokenCount ?? 0 - const outputTokens = lastUsageMetadata.candidatesTokenCount ?? 0 - const cacheReadTokens = lastUsageMetadata.cachedContentTokenCount - const reasoningTokens = lastUsageMetadata.thoughtsTokenCount - - yield { - type: "usage", - inputTokens, - outputTokens, - cacheReadTokens, - reasoningTokens, - totalCost: this.calculateCost({ info, inputTokens, outputTokens, cacheReadTokens }), + if (lastUsageMetadata) { + const inputTokens = lastUsageMetadata.promptTokenCount ?? 0 + const outputTokens = lastUsageMetadata.candidatesTokenCount ?? 0 + const cacheReadTokens = lastUsageMetadata.cachedContentTokenCount + const reasoningTokens = lastUsageMetadata.thoughtsTokenCount + + yield { + type: "usage", + inputTokens, + outputTokens, + cacheReadTokens, + reasoningTokens, + totalCost: this.calculateCost({ info, inputTokens, outputTokens, cacheReadTokens }), + } + } + } catch (error) { + if (error instanceof Error) { + throw new Error(`Gemini Generate Context Stream error: ${error.message}`) } + + throw error } } From a5f46b4240ebab5d81e32257cee54eaa38f0b41b Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Tue, 8 Jul 2025 03:24:32 +0100 Subject: [PATCH 21/31] feat: Include citations + improved type safety --- src/api/providers/gemini.ts | 49 ++++++++++++++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 3 deletions(-) diff --git a/src/api/providers/gemini.ts b/src/api/providers/gemini.ts index a148363edd..e9687c15cc 100644 --- a/src/api/providers/gemini.ts +++ b/src/api/providers/gemini.ts @@ -4,6 +4,7 @@ import { type GenerateContentResponseUsageMetadata, type GenerateContentParameters, type GenerateContentConfig, + type GroundingMetadata, } from "@google/genai" import type { JWTInput } from "google-auth-library" @@ -67,7 +68,7 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl const contents = messages.map(convertAnthropicMessageToGemini) - const tools: Array> = [] + const tools: GenerateContentConfig["tools"] = [] if (this.options.enableUrlContext) { tools.push({ urlContext: {} }) } @@ -161,11 +162,46 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl return { id: id.endsWith(":thinking") ? id.replace(":thinking", "") : id, info, ...params } } + private processGroundingCitations(text: string, groundingMetadata?: GroundingMetadata): string { + const supports = groundingMetadata?.groundingSupports + const chunks = groundingMetadata?.groundingChunks + + if (!supports || !chunks) { + return text + } + + const sortedSupports = [...supports].sort((a, b) => (b.segment?.endIndex ?? 0) - (a.segment?.endIndex ?? 0)) + + for (const support of sortedSupports) { + const endIndex = support.segment?.endIndex + if (endIndex === undefined || !support.groundingChunkIndices?.length) { + continue + } + + const citationLinks = support.groundingChunkIndices + .map((i) => { + const uri = chunks[i]?.web?.uri + if (uri) { + return `[${i + 1}](${uri})` + } + return null + }) + .filter(Boolean) + + if (citationLinks.length > 0) { + const citationString = citationLinks.join(", ") + text = text.slice(0, endIndex) + citationString + text.slice(endIndex) + } + } + + return text + } + async completePrompt(prompt: string): Promise { try { const { id: model } = this.getModel() - const tools: Array> = [] + const tools: GenerateContentConfig["tools"] = [] if (this.options.enableUrlContext) { tools.push({ urlContext: {} }) } @@ -190,7 +226,14 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl config: promptConfig, }) - return result.text ?? "" + let text = result.text ?? "" + + const candidate = result.candidates?.[0] + if (candidate?.groundingMetadata) { + text = this.processGroundingCitations(text, candidate.groundingMetadata) + } + + return text } catch (error) { if (error instanceof Error) { throw new Error(`Gemini completion error: ${error.message}`) From 63c7b25f0d256f1eef170406b2fe4b9e9d29541f Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Tue, 8 Jul 2025 16:17:12 +0100 Subject: [PATCH 22/31] feat: adding citation for streams (generateContextStream) --- src/api/providers/gemini.ts | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/src/api/providers/gemini.ts b/src/api/providers/gemini.ts index e9687c15cc..49d4c0e653 100644 --- a/src/api/providers/gemini.ts +++ b/src/api/providers/gemini.ts @@ -93,11 +93,19 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl const result = await this.client.models.generateContentStream(params) let lastUsageMetadata: GenerateContentResponseUsageMetadata | undefined + let accumulatedText = "" + let pendingGroundingMetadata: GroundingMetadata | undefined + let hasGroundingEnabled = this.options.enableGrounding for await (const chunk of result) { // Process candidates and their parts to separate thoughts from content if (chunk.candidates && chunk.candidates.length > 0) { const candidate = chunk.candidates[0] + + if (candidate.groundingMetadata) { + pendingGroundingMetadata = candidate.groundingMetadata + } + if (candidate.content && candidate.content.parts) { for (const part of candidate.content.parts) { if (part.thought) { @@ -108,7 +116,11 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl } else { // This is regular content if (part.text) { - yield { type: "text", text: part.text } + accumulatedText += part.text + + if (!hasGroundingEnabled) { + yield { type: "text", text: part.text } + } } } } @@ -117,7 +129,11 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl // Fallback to the original text property if no candidates structure else if (chunk.text) { - yield { type: "text", text: chunk.text } + accumulatedText += chunk.text + + if (!hasGroundingEnabled) { + yield { type: "text", text: chunk.text } + } } if (chunk.usageMetadata) { @@ -125,6 +141,16 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl } } + if (hasGroundingEnabled && accumulatedText) { + let finalText = accumulatedText + + if (pendingGroundingMetadata) { + finalText = this.processGroundingCitations(accumulatedText, pendingGroundingMetadata) + } + + yield { type: "text", text: finalText } + } + if (lastUsageMetadata) { const inputTokens = lastUsageMetadata.promptTokenCount ?? 0 const outputTokens = lastUsageMetadata.candidatesTokenCount ?? 0 From 055bd798359a5731ccbe8e8b6fe3e8561b289e43 Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Tue, 8 Jul 2025 16:58:04 +0100 Subject: [PATCH 23/31] fix: set default values for `topP`, `topK` and `maxOutputTokens` --- .../src/components/settings/providers/Gemini.tsx | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/webview-ui/src/components/settings/providers/Gemini.tsx b/webview-ui/src/components/settings/providers/Gemini.tsx index 394490699f..a8aedbffc4 100644 --- a/webview-ui/src/components/settings/providers/Gemini.tsx +++ b/webview-ui/src/components/settings/providers/Gemini.tsx @@ -97,11 +97,11 @@ export const Gemini = ({ apiConfiguration, setApiConfigurationField, currentMode min={0} max={1} step={0.01} - value={[apiConfiguration.topP ?? 0]} + value={[apiConfiguration.topP ?? 0.95]} onValueChange={(values: number[]) => setApiConfigurationField("topP", values[0])} className="flex-grow" /> - {(apiConfiguration.topP ?? 0).toFixed(2)} + {(apiConfiguration.topP ?? 0.95).toFixed(2)}
{t("settings:providers.geminiParameters.topP.description")} @@ -118,11 +118,11 @@ export const Gemini = ({ apiConfiguration, setApiConfigurationField, currentMode min={0} max={100} step={1} - value={[apiConfiguration.topK ?? 0]} + value={[apiConfiguration.topK ?? 64]} onValueChange={(values: number[]) => setApiConfigurationField("topK", values[0])} className="flex-grow" /> - {apiConfiguration.topK ?? 0} + {apiConfiguration.topK ?? 64}
{t("settings:providers.geminiParameters.topK.description")} @@ -139,12 +139,12 @@ export const Gemini = ({ apiConfiguration, setApiConfigurationField, currentMode min={3000} max={modelInfo.maxTokens} step={1} - value={[apiConfiguration.maxOutputTokens ?? 0]} + value={[apiConfiguration.maxOutputTokens ?? modelInfo.maxTokens]} onValueChange={(values: number[]) => setApiConfigurationField("maxOutputTokens", values[0])} className="flex-grow" /> { From 4200cff28ad217a1e640e73b5c704343f95e4212 Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Wed, 9 Jul 2025 17:53:25 +0100 Subject: [PATCH 24/31] fix: changing UI/UX according to the review/feedback from `daniel-lxs` --- .../components/settings/providers/Gemini.tsx | 181 ++++++++++-------- webview-ui/src/i18n/locales/ca/settings.json | 5 +- webview-ui/src/i18n/locales/de/settings.json | 5 +- webview-ui/src/i18n/locales/en/settings.json | 5 +- webview-ui/src/i18n/locales/es/settings.json | 5 +- webview-ui/src/i18n/locales/fr/settings.json | 5 +- webview-ui/src/i18n/locales/hi/settings.json | 5 +- webview-ui/src/i18n/locales/id/settings.json | 5 +- webview-ui/src/i18n/locales/it/settings.json | 5 +- webview-ui/src/i18n/locales/ja/settings.json | 5 +- webview-ui/src/i18n/locales/ko/settings.json | 5 +- webview-ui/src/i18n/locales/nl/settings.json | 5 +- webview-ui/src/i18n/locales/pl/settings.json | 5 +- .../src/i18n/locales/pt-BR/settings.json | 5 +- webview-ui/src/i18n/locales/ru/settings.json | 5 +- webview-ui/src/i18n/locales/tr/settings.json | 5 +- webview-ui/src/i18n/locales/vi/settings.json | 5 +- .../src/i18n/locales/zh-CN/settings.json | 5 +- .../src/i18n/locales/zh-TW/settings.json | 5 +- 19 files changed, 175 insertions(+), 96 deletions(-) diff --git a/webview-ui/src/components/settings/providers/Gemini.tsx b/webview-ui/src/components/settings/providers/Gemini.tsx index a8aedbffc4..13243dab4c 100644 --- a/webview-ui/src/components/settings/providers/Gemini.tsx +++ b/webview-ui/src/components/settings/providers/Gemini.tsx @@ -1,7 +1,9 @@ import { useCallback, useState, useMemo } from "react" import { Checkbox } from "vscrui" import { VSCodeTextField } from "@vscode/webview-ui-toolkit/react" +import { Collapsible, CollapsibleContent, CollapsibleTrigger } from "@src/components/ui/collapsible" import { Slider } from "@src/components/ui" +import { ChevronRight } from "lucide-react" import type { ProviderSettings } from "@roo-code/types" import { geminiModels, geminiDefaultModelId, type GeminiModelId } from "@roo-code/types" @@ -23,6 +25,7 @@ export const Gemini = ({ apiConfiguration, setApiConfigurationField, currentMode const [googleGeminiBaseUrlSelected, setGoogleGeminiBaseUrlSelected] = useState( !!apiConfiguration?.googleGeminiBaseUrl, ) + const [isModelParametersOpen, setIsModelParametersOpen] = useState(false) const modelInfo = useMemo(() => { const modelId = ( @@ -84,84 +87,8 @@ export const Gemini = ({ apiConfiguration, setApiConfigurationField, currentMode )}
-
-

{t("settings:providers.geminiSections.modelParameters")}

- -
- -
- setApiConfigurationField("topP", values[0])} - className="flex-grow" - /> - {(apiConfiguration.topP ?? 0.95).toFixed(2)} -
-
- {t("settings:providers.geminiParameters.topP.description")} -
-
- -
- -
- setApiConfigurationField("topK", values[0])} - className="flex-grow" - /> - {apiConfiguration.topK ?? 64} -
-
- {t("settings:providers.geminiParameters.topK.description")} -
-
- -
- -
- setApiConfigurationField("maxOutputTokens", values[0])} - className="flex-grow" - /> - { - const val = parseInt((e as any).target.value, 10) - return Number.isNaN(val) ? 0 : Math.min(val, modelInfo.maxTokens) - })} - className="w-16" - /> -
-
- {t("settings:providers.geminiParameters.maxOutputTokens.description")} -
-
-
- -
-

{t("settings:providers.geminiSections.tools")}

+
+

{t("settings:providers.geminiSections.tools")}

+ +
+ + +
+
+

+ {t("settings:providers.geminiSections.modelParameters.title")} +

+

+ {t("settings:providers.geminiSections.modelParameters.description")} +

+
+ +
+
+ +
+ +
+ setApiConfigurationField("topP", values[0])} + className="flex-grow" + /> + {(apiConfiguration.topP ?? 0.95).toFixed(2)} +
+
+ {t("settings:providers.geminiParameters.topP.description")} +
+
+ +
+ +
+ setApiConfigurationField("topK", values[0])} + className="flex-grow" + /> + {apiConfiguration.topK ?? 64} +
+
+ {t("settings:providers.geminiParameters.topK.description")} +
+
+ +
+ +
+ + setApiConfigurationField("maxOutputTokens", values[0]) + } + className="flex-grow" + /> + { + const val = parseInt((e as any).target.value, 10) + return Number.isNaN(val) ? 0 : Math.min(val, modelInfo.maxTokens) + })} + className="w-16" + /> +
+
+ {t("settings:providers.geminiParameters.maxOutputTokens.description")}_{" "} +
+
+
+
+
) } diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index 72b8df4892..f5bea43246 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -303,7 +303,10 @@ } }, "geminiSections": { - "modelParameters": "Paràmetres del model", + "modelParameters": { + "title": "Paràmetres del model", + "description": "Ajusta la temperatura, top-p i altres paràmetres avançats." + }, "tools": "Eines" }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index 30f47021d3..a337dce074 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -299,7 +299,10 @@ } }, "geminiSections": { - "modelParameters": "Model Parameter", + "modelParameters": { + "title": "Modellparameter", + "description": "Feinabstimmung von Temperatur, Top-P und anderen erweiterten Einstellungen." + }, "tools": "Werkzeuge" }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index aa2a39044d..491f89184f 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -277,7 +277,10 @@ "vscodeLmModel": "Language Model", "vscodeLmWarning": "Note: This is a very experimental integration and provider support will vary. If you get an error about a model not being supported, that's an issue on the provider's end.", "geminiSections": { - "modelParameters": "Model Parameters", + "modelParameters": { + "title": "Model Parameters", + "description": "Fine-tune topP, topK and maxOutputTokens" + }, "tools": "Tools" }, "geminiParameters": { diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index 7205ff90ba..3b7e522784 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -299,7 +299,10 @@ } }, "geminiSections": { - "modelParameters": "Parámetros del modelo", + "modelParameters": { + "title": "Parámetros del modelo", + "description": "Ajusta la temperatura, top-p y otros parámetros avanzados." + }, "tools": "Herramientas" }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index 5a18d7f55c..525a7866ec 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -299,7 +299,10 @@ } }, "geminiSections": { - "modelParameters": "Paramètres du modèle", + "modelParameters": { + "title": "Paramètres du modèle", + "description": "Ajustez la température, top-p et d'autres paramètres avancés." + }, "tools": "Outils" }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index e59c5e0059..2ac60df5a9 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -299,7 +299,10 @@ } }, "geminiSections": { - "modelParameters": "मॉडल पैरामीटर", + "modelParameters": { + "title": "मॉडल पैरामीटर", + "description": "टेम्परेचर, टॉप-पी और अन्य उन्नत सेटिंग्स को फाइन-ट्यून करें।" + }, "tools": "उपकरण" }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index 12755ef6fa..78572f30de 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -303,7 +303,10 @@ } }, "geminiSections": { - "modelParameters": "Parameter Model", + "modelParameters": { + "title": "Parameter Model", + "description": "Menyesuaikan suhu, top-p, dan pengaturan lanjutan lainnya." + }, "tools": "Alat" }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index 3f27152aa3..b40ef2a059 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -299,7 +299,10 @@ } }, "geminiSections": { - "modelParameters": "Parametri del modello", + "modelParameters": { + "title": "Parametri del modello", + "description": "Regola la temperatura, top-p e altre impostazioni avanzate." + }, "tools": "Strumenti" }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index 5843fd03b9..9568ff624b 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -299,7 +299,10 @@ } }, "geminiSections": { - "modelParameters": "モデルパラメータ", + "modelParameters": { + "title": "モデルパラメータ", + "description": "温度、top-p、およびその他の詳細設定を調整します。" + }, "tools": "ツール" }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index 92dd864233..dd18394e74 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -299,7 +299,10 @@ } }, "geminiSections": { - "modelParameters": "모델 매개변수", + "modelParameters": { + "title": "모델 매개변수", + "description": "온도, top-p 및 기타 고급 설정을 조정합니다." + }, "tools": "도구" }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index b56a40ed39..c310f03b01 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -299,7 +299,10 @@ } }, "geminiSections": { - "modelParameters": "Modelparameters", + "modelParameters": { + "title": "Modelparameters", + "description": "Pas de temperatuur, top-p en andere geavanceerde instellingen aan." + }, "tools": "Gereedschap" }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index 7277e19842..3e879138d3 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -299,7 +299,10 @@ } }, "geminiSections": { - "modelParameters": "Parametry modelu", + "modelParameters": { + "title": "Parametry modelu", + "description": "Dostosuj temperaturę, top-p i inne zaawansowane ustawienia." + }, "tools": "Narzędzia" }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index ec7f54f06a..f3ed8d0d95 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -299,7 +299,10 @@ } }, "geminiSections": { - "modelParameters": "Parâmetros do modelo", + "modelParameters": { + "title": "Parâmetros do modelo", + "description": "Ajuste a temperatura, top-p e outras configurações avançadas." + }, "tools": "Ferramentas" }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index 806204ae0c..6d8646efb9 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -299,7 +299,10 @@ } }, "geminiSections": { - "modelParameters": "Параметры модели", + "modelParameters": { + "title": "Параметры модели", + "description": "Настройте температуру, top-p и другие расширенные параметры." + }, "tools": "Инструменты" }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 3b053fba80..a5565d00e6 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -299,7 +299,10 @@ } }, "geminiSections": { - "modelParameters": "Model Parametreleri", + "modelParameters": { + "title": "Model Parametreleri", + "description": "Sıcaklık, top-p ve diğer gelişmiş ayarları yapın." + }, "tools": "Araçlar" }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index 6c9f2d0d8f..cc673c3897 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -299,7 +299,10 @@ } }, "geminiSections": { - "modelParameters": "Tham số mô hình", + "modelParameters": { + "title": "Tham số mô hình", + "description": "Điều chỉnh nhiệt độ, top-p và các cài đặt nâng cao khác." + }, "tools": "Công cụ" }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index 97b78e7d0c..b516caebe0 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -299,7 +299,10 @@ } }, "geminiSections": { - "modelParameters": "模型参数", + "modelParameters": { + "title": "模型参数", + "description": "调整温度、top-p 和其他高级设置。" + }, "tools": "工具" }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index ec4b41eb74..9775bf9908 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -299,7 +299,10 @@ } }, "geminiSections": { - "modelParameters": "模型參數", + "modelParameters": { + "title": "模型參數", + "description": "調整溫度、top-p 和其他進階設定。" + }, "tools": "工具" }, "googleCloudSetup": { From 0d72f089461e5f080843e811308eb521ecc5bb58 Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Wed, 9 Jul 2025 18:27:01 +0100 Subject: [PATCH 25/31] fix: updating the `Gemini.spec.tsx` unit test - testing when it is hidden - testing when users click on the collapsible trigger and model configuration appears --- .../settings/providers/__tests__/Gemini.spec.tsx | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/webview-ui/src/components/settings/providers/__tests__/Gemini.spec.tsx b/webview-ui/src/components/settings/providers/__tests__/Gemini.spec.tsx index 2a420c6c02..725cdf9fd4 100644 --- a/webview-ui/src/components/settings/providers/__tests__/Gemini.spec.tsx +++ b/webview-ui/src/components/settings/providers/__tests__/Gemini.spec.tsx @@ -1,4 +1,5 @@ import { render, screen } from "@testing-library/react" +import userEvent from "@testing-library/user-event" import { Gemini } from "../Gemini" import type { ProviderSettings } from "@roo-code/types" @@ -43,10 +44,19 @@ vi.mock("@src/components/common/VSCodeButtonLink", () => ({ })) describe("Gemini provider settings", () => { - it("renders sliders for topP, topK and maxOutputTokens", () => { + it("renders sliders for topP, topK and maxOutputTokens after expanding", async () => { + const user = userEvent.setup() const setApiField = vi.fn() const config: ProviderSettings = {} render() + + expect(screen.queryByTestId("slider-top-p")).not.toBeInTheDocument() + expect(screen.queryByTestId("slider-top-k")).not.toBeInTheDocument() + expect(screen.queryByTestId("slider-max-output-tokens")).not.toBeInTheDocument() + + const trigger = screen.getByText("settings:providers.geminiSections.modelParameters.title") + await user.click(trigger) + expect(screen.getByTestId("slider-top-p")).toBeInTheDocument() expect(screen.getByTestId("slider-top-k")).toBeInTheDocument() expect(screen.getByTestId("slider-max-output-tokens")).toBeInTheDocument() From d18b143f41c45b25437efc189f17bab84b880a54 Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Wed, 9 Jul 2025 22:30:36 +0100 Subject: [PATCH 26/31] fix: more changes from the feedback/review from `daniel-lxs` --- .../components/settings/providers/Gemini.tsx | 191 +++++++++--------- webview-ui/src/i18n/locales/ca/settings.json | 5 +- webview-ui/src/i18n/locales/de/settings.json | 5 +- webview-ui/src/i18n/locales/en/settings.json | 5 +- webview-ui/src/i18n/locales/es/settings.json | 5 +- webview-ui/src/i18n/locales/fr/settings.json | 5 +- webview-ui/src/i18n/locales/hi/settings.json | 5 +- webview-ui/src/i18n/locales/id/settings.json | 5 +- webview-ui/src/i18n/locales/it/settings.json | 5 +- webview-ui/src/i18n/locales/ja/settings.json | 5 +- webview-ui/src/i18n/locales/ko/settings.json | 5 +- webview-ui/src/i18n/locales/nl/settings.json | 5 +- webview-ui/src/i18n/locales/pl/settings.json | 5 +- .../src/i18n/locales/pt-BR/settings.json | 5 +- webview-ui/src/i18n/locales/ru/settings.json | 5 +- webview-ui/src/i18n/locales/tr/settings.json | 5 +- webview-ui/src/i18n/locales/vi/settings.json | 5 +- .../src/i18n/locales/zh-CN/settings.json | 5 +- .../src/i18n/locales/zh-TW/settings.json | 5 +- 19 files changed, 133 insertions(+), 148 deletions(-) diff --git a/webview-ui/src/components/settings/providers/Gemini.tsx b/webview-ui/src/components/settings/providers/Gemini.tsx index 13243dab4c..05d083e381 100644 --- a/webview-ui/src/components/settings/providers/Gemini.tsx +++ b/webview-ui/src/components/settings/providers/Gemini.tsx @@ -85,12 +85,9 @@ export const Gemini = ({ apiConfiguration, setApiConfigurationField, currentMode className="w-full mt-1" /> )} -
- -
-

{t("settings:providers.geminiSections.tools")}

setApiConfigurationField("enableUrlContext", checked)}> @@ -109,104 +106,110 @@ export const Gemini = ({ apiConfiguration, setApiConfigurationField, currentMode
{t("settings:providers.geminiParameters.groundingSearch.description")}
-
-
- - -
-
-

- {t("settings:providers.geminiSections.modelParameters.title")} -

-

- {t("settings:providers.geminiSections.modelParameters.description")} -

-
- -
-
- -
- -
- setApiConfigurationField("topP", values[0])} - className="flex-grow" +
+ + +
+
+

+ {t("settings:providers.geminiSections.modelParameters.title")} +

+

+ {t("settings:providers.geminiSections.modelParameters.description")} +

+
+ - {(apiConfiguration.topP ?? 0.95).toFixed(2)}
-
- {t("settings:providers.geminiParameters.topP.description")} + + +
+ +
+ + setApiConfigurationField("topP", values[0]) + } + className="flex-grow" + /> + + {(apiConfiguration.topP ?? 0.95).toFixed(2)} + +
+
+ {t("settings:providers.geminiParameters.topP.description")} +
-
-
- -
- setApiConfigurationField("topK", values[0])} - className="flex-grow" - /> - {apiConfiguration.topK ?? 64} +
+ +
+ + setApiConfigurationField("topK", values[0]) + } + className="flex-grow" + /> + {apiConfiguration.topK ?? 64} +
+
+ {t("settings:providers.geminiParameters.topK.description")} +
-
- {t("settings:providers.geminiParameters.topK.description")} -
-
-
- -
- - setApiConfigurationField("maxOutputTokens", values[0]) - } - className="flex-grow" - /> - { - const val = parseInt((e as any).target.value, 10) - return Number.isNaN(val) ? 0 : Math.min(val, modelInfo.maxTokens) - })} - className="w-16" - /> +
+ +
+ + setApiConfigurationField("maxOutputTokens", values[0]) + } + className="flex-grow" + /> + { + const val = parseInt((e as any).target.value, 10) + return Number.isNaN(val) ? 0 : Math.min(val, modelInfo.maxTokens) + })} + className="w-16" + /> +
+
+ {t("settings:providers.geminiParameters.maxOutputTokens.description")}_{" "} +
-
- {t("settings:providers.geminiParameters.maxOutputTokens.description")}_{" "} -
-
- - + + +
) diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index f5bea43246..e4bebe3dc8 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -304,10 +304,9 @@ }, "geminiSections": { "modelParameters": { - "title": "Paràmetres del model", + "title": "Configuració avançada", "description": "Ajusta la temperatura, top-p i altres paràmetres avançats." - }, - "tools": "Eines" + } }, "googleCloudSetup": { "title": "Per utilitzar Google Cloud Vertex AI, necessiteu:", diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index a337dce074..c5d362598a 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -300,10 +300,9 @@ }, "geminiSections": { "modelParameters": { - "title": "Modellparameter", + "title": "Erweiterte Einstellungen", "description": "Feinabstimmung von Temperatur, Top-P und anderen erweiterten Einstellungen." - }, - "tools": "Werkzeuge" + } }, "googleCloudSetup": { "title": "Um Google Cloud Vertex AI zu verwenden, müssen Sie:", diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index 491f89184f..91cce5dccd 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -278,10 +278,9 @@ "vscodeLmWarning": "Note: This is a very experimental integration and provider support will vary. If you get an error about a model not being supported, that's an issue on the provider's end.", "geminiSections": { "modelParameters": { - "title": "Model Parameters", + "title": "Advanced Settings", "description": "Fine-tune topP, topK and maxOutputTokens" - }, - "tools": "Tools" + } }, "geminiParameters": { "topK": { diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index 3b7e522784..0ee11ef249 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -300,10 +300,9 @@ }, "geminiSections": { "modelParameters": { - "title": "Parámetros del modelo", + "title": "Configuración avanzada", "description": "Ajusta la temperatura, top-p y otros parámetros avanzados." - }, - "tools": "Herramientas" + } }, "googleCloudSetup": { "title": "Para usar Google Cloud Vertex AI, necesita:", diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index 525a7866ec..44944833b4 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -300,10 +300,9 @@ }, "geminiSections": { "modelParameters": { - "title": "Paramètres du modèle", + "title": "Paramètres avancés", "description": "Ajustez la température, top-p et d'autres paramètres avancés." - }, - "tools": "Outils" + } }, "googleCloudSetup": { "title": "Pour utiliser Google Cloud Vertex AI, vous devez :", diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index 2ac60df5a9..dc78cb147f 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -300,10 +300,9 @@ }, "geminiSections": { "modelParameters": { - "title": "मॉडल पैरामीटर", + "title": "उन्नत सेटिंग्स", "description": "टेम्परेचर, टॉप-पी और अन्य उन्नत सेटिंग्स को फाइन-ट्यून करें।" - }, - "tools": "उपकरण" + } }, "googleCloudSetup": { "title": "Google Cloud Vertex AI का उपयोग करने के लिए, आपको आवश्यकता है:", diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index 78572f30de..117fab0df9 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -304,10 +304,9 @@ }, "geminiSections": { "modelParameters": { - "title": "Parameter Model", + "title": "Pengaturan Lanjutan", "description": "Menyesuaikan suhu, top-p, dan pengaturan lanjutan lainnya." - }, - "tools": "Alat" + } }, "googleCloudSetup": { "title": "Untuk menggunakan Google Cloud Vertex AI, kamu perlu:", diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index b40ef2a059..b8e4934ee2 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -300,10 +300,9 @@ }, "geminiSections": { "modelParameters": { - "title": "Parametri del modello", + "title": "Impostazioni avanzate", "description": "Regola la temperatura, top-p e altre impostazioni avanzate." - }, - "tools": "Strumenti" + } }, "googleCloudSetup": { "title": "Per utilizzare Google Cloud Vertex AI, è necessario:", diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index 9568ff624b..55d31147f7 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -300,10 +300,9 @@ }, "geminiSections": { "modelParameters": { - "title": "モデルパラメータ", + "title": "詳細設定", "description": "温度、top-p、およびその他の詳細設定を調整します。" - }, - "tools": "ツール" + } }, "googleCloudSetup": { "title": "Google Cloud Vertex AIを使用するには:", diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index dd18394e74..167c42e78e 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -300,10 +300,9 @@ }, "geminiSections": { "modelParameters": { - "title": "모델 매개변수", + "title": "고급 설정", "description": "온도, top-p 및 기타 고급 설정을 조정합니다." - }, - "tools": "도구" + } }, "googleCloudSetup": { "title": "Google Cloud Vertex AI를 사용하려면:", diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index c310f03b01..70f8ede8c7 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -300,10 +300,9 @@ }, "geminiSections": { "modelParameters": { - "title": "Modelparameters", + "title": "Geavanceerde instellingen", "description": "Pas de temperatuur, top-p en andere geavanceerde instellingen aan." - }, - "tools": "Gereedschap" + } }, "googleCloudSetup": { "title": "Om Google Cloud Vertex AI te gebruiken, moet je:", diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index 3e879138d3..9b483a93fc 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -300,10 +300,9 @@ }, "geminiSections": { "modelParameters": { - "title": "Parametry modelu", + "title": "Ustawienia zaawansowane", "description": "Dostosuj temperaturę, top-p i inne zaawansowane ustawienia." - }, - "tools": "Narzędzia" + } }, "googleCloudSetup": { "title": "Aby korzystać z Google Cloud Vertex AI, potrzebujesz:", diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index f3ed8d0d95..c682543da3 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -300,10 +300,9 @@ }, "geminiSections": { "modelParameters": { - "title": "Parâmetros do modelo", + "title": "Configurações Avançadas", "description": "Ajuste a temperatura, top-p e outras configurações avançadas." - }, - "tools": "Ferramentas" + } }, "googleCloudSetup": { "title": "Para usar o Google Cloud Vertex AI, você precisa:", diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index 6d8646efb9..cecb2da747 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -300,10 +300,9 @@ }, "geminiSections": { "modelParameters": { - "title": "Параметры модели", + "title": "Расширенные настройки", "description": "Настройте температуру, top-p и другие расширенные параметры." - }, - "tools": "Инструменты" + } }, "googleCloudSetup": { "title": "Для использования Google Cloud Vertex AI необходимо:", diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index a5565d00e6..303fe94c52 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -300,10 +300,9 @@ }, "geminiSections": { "modelParameters": { - "title": "Model Parametreleri", + "title": "Gelişmiş Ayarlar", "description": "Sıcaklık, top-p ve diğer gelişmiş ayarları yapın." - }, - "tools": "Araçlar" + } }, "googleCloudSetup": { "title": "Google Cloud Vertex AI'yi kullanmak için şunları yapmanız gerekir:", diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index cc673c3897..52dd0e00a2 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -300,10 +300,9 @@ }, "geminiSections": { "modelParameters": { - "title": "Tham số mô hình", + "title": "Cài đặt nâng cao", "description": "Điều chỉnh nhiệt độ, top-p và các cài đặt nâng cao khác." - }, - "tools": "Công cụ" + } }, "googleCloudSetup": { "title": "Để sử dụng Google Cloud Vertex AI, bạn cần:", diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index b516caebe0..5fb49d9a5a 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -300,10 +300,9 @@ }, "geminiSections": { "modelParameters": { - "title": "模型参数", + "title": "高级设置", "description": "调整温度、top-p 和其他高级设置。" - }, - "tools": "工具" + } }, "googleCloudSetup": { "title": "要使用 Google Cloud Vertex AI,您需要:", diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index 9775bf9908..56eb8f5644 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -300,10 +300,9 @@ }, "geminiSections": { "modelParameters": { - "title": "模型參數", + "title": "進階設定", "description": "調整溫度、top-p 和其他進階設定。" - }, - "tools": "工具" + } }, "googleCloudSetup": { "title": "要使用 Google Cloud Vertex AI,您需要:", From 7e9d2527f5100b5776ee083422573ac60842f683 Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Mon, 14 Jul 2025 21:21:56 +0100 Subject: [PATCH 27/31] fix: adding sources at the end of the stream to preserve --- src/api/providers/gemini.ts | 71 +++++++++++++------------------------ 1 file changed, 25 insertions(+), 46 deletions(-) diff --git a/src/api/providers/gemini.ts b/src/api/providers/gemini.ts index 49d4c0e653..53d361a2ac 100644 --- a/src/api/providers/gemini.ts +++ b/src/api/providers/gemini.ts @@ -93,9 +93,7 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl const result = await this.client.models.generateContentStream(params) let lastUsageMetadata: GenerateContentResponseUsageMetadata | undefined - let accumulatedText = "" let pendingGroundingMetadata: GroundingMetadata | undefined - let hasGroundingEnabled = this.options.enableGrounding for await (const chunk of result) { // Process candidates and their parts to separate thoughts from content @@ -116,11 +114,7 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl } else { // This is regular content if (part.text) { - accumulatedText += part.text - - if (!hasGroundingEnabled) { - yield { type: "text", text: part.text } - } + yield { type: "text", text: part.text } } } } @@ -129,11 +123,7 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl // Fallback to the original text property if no candidates structure else if (chunk.text) { - accumulatedText += chunk.text - - if (!hasGroundingEnabled) { - yield { type: "text", text: chunk.text } - } + yield { type: "text", text: chunk.text } } if (chunk.usageMetadata) { @@ -141,14 +131,11 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl } } - if (hasGroundingEnabled && accumulatedText) { - let finalText = accumulatedText - - if (pendingGroundingMetadata) { - finalText = this.processGroundingCitations(accumulatedText, pendingGroundingMetadata) + if (pendingGroundingMetadata) { + const citations = this.extractCitationsOnly(pendingGroundingMetadata) + if (citations) { + yield { type: "text", text: `\n\nSources: ${citations}` } } - - yield { type: "text", text: finalText } } if (lastUsageMetadata) { @@ -188,39 +175,28 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl return { id: id.endsWith(":thinking") ? id.replace(":thinking", "") : id, info, ...params } } - private processGroundingCitations(text: string, groundingMetadata?: GroundingMetadata): string { - const supports = groundingMetadata?.groundingSupports + private extractCitationsOnly(groundingMetadata?: GroundingMetadata): string | null { const chunks = groundingMetadata?.groundingChunks - if (!supports || !chunks) { - return text + if (!chunks) { + return null } - const sortedSupports = [...supports].sort((a, b) => (b.segment?.endIndex ?? 0) - (a.segment?.endIndex ?? 0)) - - for (const support of sortedSupports) { - const endIndex = support.segment?.endIndex - if (endIndex === undefined || !support.groundingChunkIndices?.length) { - continue - } - - const citationLinks = support.groundingChunkIndices - .map((i) => { - const uri = chunks[i]?.web?.uri - if (uri) { - return `[${i + 1}](${uri})` - } - return null - }) - .filter(Boolean) + const citationLinks = chunks + .map((chunk, i) => { + const uri = chunk.web?.uri + if (uri) { + return `[${i + 1}](${uri})` + } + return null + }) + .filter((link): link is string => link !== null) - if (citationLinks.length > 0) { - const citationString = citationLinks.join(", ") - text = text.slice(0, endIndex) + citationString + text.slice(endIndex) - } + if (citationLinks.length > 0) { + return citationLinks.join(", ") } - return text + return null } async completePrompt(prompt: string): Promise { @@ -256,7 +232,10 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl const candidate = result.candidates?.[0] if (candidate?.groundingMetadata) { - text = this.processGroundingCitations(text, candidate.groundingMetadata) + const citations = this.extractCitationsOnly(candidate.groundingMetadata) + if (citations) { + text += `\n\nSources: ${citations}` + } } return text From 8ca442e9003df21c2597aab534ada92e17f34e06 Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Thu, 17 Jul 2025 22:05:52 +0100 Subject: [PATCH 28/31] fix: change the description for grounding with google search and url context --- webview-ui/src/i18n/locales/en/settings.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index 674b6b81b4..d0bf38d539 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -301,11 +301,11 @@ }, "urlContext": { "title": "Enable URL Context", - "description": "Allows Gemini to access and process URLs for additional context when generating responses. Useful for tasks requiring web content analysis." + "description": "Lets Gemini read linked pages to extract, compare, and synthesize their content into informed responses." }, "groundingSearch": { "title": "Enable Grounding with Google Search", - "description": "Enables Gemini to search Google for current information and ground responses in real-time data. Useful for queries requiring up-to-date information." + "description": "Connects Gemini to real‑time web data for accurate, up‑to‑date answers with verifiable citations." } }, "googleCloudSetup": { From 1c2aa36dd1ec66d2bf8993fd16bc477d2c3e20c8 Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Thu, 17 Jul 2025 23:09:39 +0100 Subject: [PATCH 29/31] fix: adding translations --- webview-ui/src/i18n/locales/de/settings.json | 28 +++++++++++++++++++ webview-ui/src/i18n/locales/en/settings.json | 8 +++--- webview-ui/src/i18n/locales/es/settings.json | 28 +++++++++++++++++++ webview-ui/src/i18n/locales/fr/settings.json | 28 +++++++++++++++++++ webview-ui/src/i18n/locales/hi/settings.json | 28 +++++++++++++++++++ webview-ui/src/i18n/locales/id/settings.json | 28 +++++++++++++++++++ webview-ui/src/i18n/locales/it/settings.json | 28 +++++++++++++++++++ webview-ui/src/i18n/locales/ja/settings.json | 28 +++++++++++++++++++ webview-ui/src/i18n/locales/ko/settings.json | 28 +++++++++++++++++++ webview-ui/src/i18n/locales/nl/settings.json | 28 +++++++++++++++++++ webview-ui/src/i18n/locales/pl/settings.json | 28 +++++++++++++++++++ .../src/i18n/locales/pt-BR/settings.json | 28 +++++++++++++++++++ webview-ui/src/i18n/locales/ru/settings.json | 28 +++++++++++++++++++ webview-ui/src/i18n/locales/tr/settings.json | 28 +++++++++++++++++++ webview-ui/src/i18n/locales/vi/settings.json | 28 +++++++++++++++++++ .../src/i18n/locales/zh-CN/settings.json | 28 +++++++++++++++++++ .../src/i18n/locales/zh-TW/settings.json | 28 +++++++++++++++++++ 17 files changed, 452 insertions(+), 4 deletions(-) diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index 3b10fe2dd0..4440afdb28 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -311,6 +311,34 @@ "description": "Feinabstimmung von Temperatur, Top-P und anderen erweiterten Einstellungen." } }, + "geminiSections": { + "modelParameters": { + "title": "Ausgabesteuerung", + "description": "Feinabstimmung von topP, topK und maxOutputTokens" + } + }, + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Ein niedriger Wert macht den Text vorhersagbarer, während ein hoher Wert ihn kreativer macht." + }, + "topP": { + "title": "Top P", + "description": "Ein niedriger Wert führt zu fokussierterem Text, während ein hoher Wert zu vielfältigeren Ergebnissen führt." + }, + "maxOutputTokens": { + "title": "Maximale Ausgabe-Tokens", + "description": "Dieser Parameter legt die maximale Länge der Antwort fest, die das Modell generieren darf." + }, + "urlContext": { + "title": "URL-Kontext aktivieren", + "description": "Ermöglicht Gemini, verlinkte Seiten zu lesen, um deren Inhalt zu extrahieren, zu vergleichen und in fundierte Antworten zu synthetisieren." + }, + "groundingSearch": { + "title": "Grounding mit Google Suche aktivieren", + "description": "Verbindet Gemini mit Echtzeit-Webdaten für genaue, aktuelle Antworten mit überprüfbaren Zitaten." + } + }, "googleCloudSetup": { "title": "Um Google Cloud Vertex AI zu verwenden, müssen Sie:", "step1": "1. Ein Google Cloud-Konto erstellen, die Vertex AI API aktivieren & die gewünschten Claude-Modelle aktivieren.", diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index c60d780bbc..30ccd11ba8 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -285,22 +285,22 @@ "vscodeLmWarning": "Note: This is a very experimental integration and provider support will vary. If you get an error about a model not being supported, that's an issue on the provider's end.", "geminiSections": { "modelParameters": { - "title": "Advanced Settings", + "title": "Output Controls", "description": "Fine-tune topP, topK and maxOutputTokens" } }, "geminiParameters": { "topK": { "title": "Top K", - "description": "Controls the number of highest probability tokens to consider for each step. Higher values increase diversity, lower values make output more focused and deterministic." + "description": "A low value makes the text more predictable, while a high value makes it more creative." }, "topP": { "title": "Top P", - "description": "Controls the cumulative probability of tokens to consider (nucleus sampling). Values closer to 1.0 increase diversity, while lower values make output more focused." + "description": "A low value leads to more focused text, while a high value results in more diverse outcomes." }, "maxOutputTokens": { "title": "Max Output Tokens", - "description": "Maximum number of tokens the model can generate in a single response. Higher values allow longer responses but increase token usage and costs." + "description": "This parameter sets the maximum length of the response the model is allowed to generate." }, "urlContext": { "title": "Enable URL Context", diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index 224b423e4c..dc01756e63 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -311,6 +311,34 @@ "description": "Ajusta la temperatura, top-p y otros parámetros avanzados." } }, + "geminiSections": { + "modelParameters": { + "title": "Controles de Salida", + "description": "Ajusta topP, topK y maxOutputTokens" + } + }, + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Un valor bajo hace que el texto sea más predecible, mientras que un valor alto lo hace más creativo." + }, + "topP": { + "title": "Top P", + "description": "Un valor bajo conduce a un texto más enfocado, mientras que un valor alto da como resultado resultados más diversos." + }, + "maxOutputTokens": { + "title": "Máximo de Tokens de Salida", + "description": "Este parámetro establece la longitud máxima de la respuesta que el modelo puede generar." + }, + "urlContext": { + "title": "Habilitar Contexto de URL", + "description": "Permite a Gemini leer páginas enlazadas para extraer, comparar y sintetizar su contenido en respuestas informadas." + }, + "groundingSearch": { + "title": "Habilitar Grounding con Búsqueda de Google", + "description": "Conecta a Gemini con datos web en tiempo real para obtener respuestas precisas y actualizadas con citas verificables." + } + }, "googleCloudSetup": { "title": "Para usar Google Cloud Vertex AI, necesita:", "step1": "1. Crear una cuenta de Google Cloud, habilitar la API de Vertex AI y habilitar los modelos Claude deseados.", diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index e0176f4d6c..52542f1667 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -311,6 +311,34 @@ "description": "Ajustez la température, top-p et d'autres paramètres avancés." } }, + "geminiSections": { + "modelParameters": { + "title": "Contrôles de sortie", + "description": "Ajustez topP, topK et maxOutputTokens" + } + }, + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Une valeur faible rend le texte plus prévisible, tandis qu'une valeur élevée le rend plus créatif." + }, + "topP": { + "title": "Top P", + "description": "Une valeur faible conduit à un texte plus ciblé, tandis qu'une valeur élevée donne des résultats plus diversifiés." + }, + "maxOutputTokens": { + "title": "Jetons de sortie max", + "description": "Ce paramètre définit la longueur maximale de la réponse que le modèle est autorisé à générer." + }, + "urlContext": { + "title": "Activer le contexte d'URL", + "description": "Permet à Gemini de lire les pages liées pour extraire, comparer et synthétiser leur contenu en réponses éclairées." + }, + "groundingSearch": { + "title": "Activer l'ancrage avec la recherche Google", + "description": "Connecte Gemini aux données Web en temps réel pour des réponses précises et à jour avec des citations vérifiables." + } + }, "googleCloudSetup": { "title": "Pour utiliser Google Cloud Vertex AI, vous devez :", "step1": "1. Créer un compte Google Cloud, activer l'API Vertex AI et activer les modèles Claude souhaités.", diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index 47474840ef..7389103ba7 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -311,6 +311,34 @@ "description": "टेम्परेचर, टॉप-पी और अन्य उन्नत सेटिंग्स को फाइन-ट्यून करें।" } }, + "geminiSections": { + "modelParameters": { + "title": "आउटपुट नियंत्रण", + "description": "topP, topK और maxOutputTokens को ठीक करें" + } + }, + "geminiParameters": { + "topK": { + "title": "शीर्ष K", + "description": "कम मान पाठ को अधिक पूर्वानुमानित बनाता है, जबकि उच्च मान इसे अधिक रचनात्मक बनाता है।" + }, + "topP": { + "title": "शीर्ष P", + "description": "कम मान अधिक केंद्रित पाठ की ओर ले जाता है, जबकि उच्च मान के परिणामस्वरूप अधिक विविध परिणाम होते हैं।" + }, + "maxOutputTokens": { + "title": "अधिकतम आउटपुट टोकन", + "description": "यह पैरामीटर प्रतिक्रिया की अधिकतम लंबाई निर्धारित करता है जिसे मॉडल उत्पन्न करने की अनुमति है।" + }, + "urlContext": { + "title": "URL प्रसंग सक्षम करें", + "description": "जेमिनी को लिंक किए गए पृष्ठों को पढ़ने, तुलना करने और उनकी सामग्री को सूचित प्रतिक्रियाओं में संश्लेषित करने देता है।" + }, + "groundingSearch": { + "title": "Google खोज के साथ ग्राउंडिंग सक्षम करें", + "description": "सटीक, अद्यतित उत्तरों के लिए जेमिनी को रीयल-टाइम वेब डेटा से जोड़ता है जिसमें सत्यापन योग्य उद्धरण होते हैं।" + } + }, "googleCloudSetup": { "title": "Google Cloud Vertex AI का उपयोग करने के लिए, आपको आवश्यकता है:", "step1": "1. Google Cloud खाता बनाएं, Vertex AI API सक्षम करें और वांछित Claude मॉडल सक्षम करें।", diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index c7135ef32d..e1b230612e 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -315,6 +315,34 @@ "description": "Menyesuaikan suhu, top-p, dan pengaturan lanjutan lainnya." } }, + "geminiSections": { + "modelParameters": { + "title": "Kontrol Output", + "description": "Menyesuaikan topP, topK, dan maxOutputTokens" + } + }, + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Nilai rendah membuat teks lebih dapat diprediksi, sedangkan nilai tinggi membuatnya lebih kreatif." + }, + "topP": { + "title": "Top P", + "description": "Nilai rendah menghasilkan teks yang lebih terfokus, sedangkan nilai tinggi menghasilkan hasil yang lebih beragam." + }, + "maxOutputTokens": { + "title": "Token Output Maks", + "description": "Parameter ini menetapkan panjang maksimum respons yang diizinkan untuk dibuat oleh model." + }, + "urlContext": { + "title": "Aktifkan Konteks URL", + "description": "Memungkinkan Gemini membaca halaman tertaut untuk mengekstrak, membandingkan, dan mensintesis kontennya menjadi respons yang terinformasi." + }, + "groundingSearch": { + "title": "Aktifkan Grounding dengan Google Search", + "description": "Menghubungkan Gemini ke data web real-time untuk jawaban yang akurat dan terkini dengan kutipan yang dapat diverifikasi." + } + }, "googleCloudSetup": { "title": "Untuk menggunakan Google Cloud Vertex AI, kamu perlu:", "step1": "1. Buat akun Google Cloud, aktifkan Vertex AI API & aktifkan model Claude yang diinginkan.", diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index a0170c2948..13460bfa21 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -311,6 +311,34 @@ "description": "Regola la temperatura, top-p e altre impostazioni avanzate." } }, + "geminiSections": { + "modelParameters": { + "title": "Controlli di output", + "description": "Perfeziona topP, topK e maxOutputTokens" + } + }, + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Un valore basso rende il testo più prevedibile, mentre un valore alto lo rende più creativo." + }, + "topP": { + "title": "Top P", + "description": "Un valore basso porta a un testo più mirato, mentre un valore alto si traduce in risultati più diversi." + }, + "maxOutputTokens": { + "title": "Token di output massimi", + "description": "Questo parametro imposta la lunghezza massima della risposta che il modello è autorizzato a generare." + }, + "urlContext": { + "title": "Abilita contesto URL", + "description": "Consente a Gemini di leggere le pagine collegate per estrarre, confrontare e sintetizzare il loro contenuto in risposte informate." + }, + "groundingSearch": { + "title": "Abilita il grounding con la Ricerca Google", + "description": "Collega Gemini ai dati web in tempo reale per risposte accurate e aggiornate con citazioni verificabili." + } + }, "googleCloudSetup": { "title": "Per utilizzare Google Cloud Vertex AI, è necessario:", "step1": "1. Creare un account Google Cloud, abilitare l'API Vertex AI e abilitare i modelli Claude desiderati.", diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index 92d734eafe..91d2bac289 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -311,6 +311,34 @@ "description": "温度、top-p、およびその他の詳細設定を調整します。" } }, + "geminiSections": { + "modelParameters": { + "title": "出力制御", + "description": "topP、topK、maxOutputTokensを微調整します" + } + }, + "geminiParameters": { + "topK": { + "title": "トップK", + "description": "値を低くするとテキストの予測可能性が高まり、値を高くするとより創造的になります。" + }, + "topP": { + "title": "トップP", + "description": "値を低くするとより焦点の合ったテキストになり、値を高くするとより多様な結果になります。" + }, + "maxOutputTokens": { + "title": "最大出力トークン", + "description": "このパラメータは、モデルが生成できる応答の最大長を設定します。" + }, + "urlContext": { + "title": "URLコンテキストを有効にする", + "description": "Geminiがリンクされたページを読み取り、そのコンテンツを抽出、比較、統合して、情報に基づいた応答を生成できるようにします。" + }, + "groundingSearch": { + "title": "Google検索によるグラウンディングを有効にする", + "description": "GeminiをリアルタイムのWebデータに接続して、検証可能な引用付きの正確で最新の回答を提供します。" + } + }, "googleCloudSetup": { "title": "Google Cloud Vertex AIを使用するには:", "step1": "1. Google Cloudアカウントを作成し、Vertex AI APIを有効にして、希望するClaudeモデルを有効にします。", diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index 46ebc3e4d0..ae41c03c94 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -311,6 +311,34 @@ "description": "온도, top-p 및 기타 고급 설정을 조정합니다." } }, + "geminiSections": { + "modelParameters": { + "title": "출력 제어", + "description": "topP, topK 및 maxOutputTokens 미세 조정" + } + }, + "geminiParameters": { + "topK": { + "title": "상위 K", + "description": "값이 낮으면 텍스트를 더 예측하기 쉬워지고, 값이 높으면 더 창의적으로 만듭니다." + }, + "topP": { + "title": "상위 P", + "description": "값이 낮으면 더 집중된 텍스트가 되고, 값이 높으면 더 다양한 결과가 나옵니다." + }, + "maxOutputTokens": { + "title": "최대 출력 토큰", + "description": "이 매개변수는 모델이 생성할 수 있는 응답의 최대 길이를 설정합니다." + }, + "urlContext": { + "title": "URL 컨텍스트 활성화", + "description": "Gemini가 연결된 페이지를 읽고 콘텐츠를 추출, 비교 및 종합하여 정보에 입각한 응답을 생성하도록 합니다." + }, + "groundingSearch": { + "title": "Google 검색으로 그라운딩 활성화", + "description": "Gemini를 실시간 웹 데이터에 연결하여 검증 가능한 인용과 함께 정확하고 최신 답변을 제공합니다." + } + }, "googleCloudSetup": { "title": "Google Cloud Vertex AI를 사용하려면:", "step1": "1. Google Cloud 계정을 만들고, Vertex AI API를 활성화하고, 원하는 Claude 모델을 활성화하세요.", diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index 2f7c799e23..d5ef688808 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -311,6 +311,34 @@ "description": "Pas de temperatuur, top-p en andere geavanceerde instellingen aan." } }, + "geminiSections": { + "modelParameters": { + "title": "Uitvoerregelaars", + "description": "Verfijn topP, topK en maxOutputTokens" + } + }, + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Een lage waarde maakt de tekst voorspelbaarder, terwijl een hoge waarde deze creatiever maakt." + }, + "topP": { + "title": "Top P", + "description": "Een lage waarde leidt tot meer gerichte tekst, terwijl een hoge waarde resulteert in meer diverse resultaten." + }, + "maxOutputTokens": { + "title": "Max. uitvoertokens", + "description": "Deze parameter stelt de maximale lengte in van de respons die het model mag genereren." + }, + "urlContext": { + "title": "URL-context inschakelen", + "description": "Laat Gemini gelinkte pagina's lezen om hun inhoud te extraheren, te vergelijken en te synthetiseren tot geïnformeerde antwoorden." + }, + "groundingSearch": { + "title": "Aarding met Google Zoeken inschakelen", + "description": "Verbindt Gemini met realtime webgegevens voor nauwkeurige, up-to-date antwoorden met verifieerbare citaten." + } + }, "googleCloudSetup": { "title": "Om Google Cloud Vertex AI te gebruiken, moet je:", "step1": "1. Maak een Google Cloud-account aan, schakel de Vertex AI API in en activeer de gewenste Claude-modellen.", diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index 11163b1666..981b6394b7 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -311,6 +311,34 @@ "description": "Dostosuj temperaturę, top-p i inne zaawansowane ustawienia." } }, + "geminiSections": { + "modelParameters": { + "title": "Kontrola wyjścia", + "description": "Dostosuj topP, topK i maxOutputTokens" + } + }, + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Niska wartość sprawia, że tekst jest bardziej przewidywalny, a wysoka wartość czyni go bardziej kreatywnym." + }, + "topP": { + "title": "Top P", + "description": "Niska wartość prowadzi do bardziej skoncentrowanego tekstu, a wysoka wartość skutkuje bardziej zróżnicowanymi wynikami." + }, + "maxOutputTokens": { + "title": "Maksymalna liczba tokenów wyjściowych", + "description": "Ten parametr ustawia maksymalną długość odpowiedzi, jaką model może wygenerować." + }, + "urlContext": { + "title": "Włącz kontekst adresu URL", + "description": "Pozwala Gemini czytać połączone strony w celu wyodrębnienia, porównania i syntezy ich treści w świadome odpowiedzi." + }, + "groundingSearch": { + "title": "Włącz uziemienie za pomocą wyszukiwarki Google", + "description": "Łączy Gemini z danymi internetowymi w czasie rzeczywistym w celu uzyskania dokładnych, aktualnych odpowiedzi z weryfikowalnymi cytatami." + } + }, "googleCloudSetup": { "title": "Aby korzystać z Google Cloud Vertex AI, potrzebujesz:", "step1": "1. Utworzyć konto Google Cloud, włączyć API Vertex AI i włączyć żądane modele Claude.", diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index dd5955d488..b320342d49 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -311,6 +311,34 @@ "description": "Ajuste a temperatura, top-p e outras configurações avançadas." } }, + "geminiSections": { + "modelParameters": { + "title": "Controles de saída", + "description": "Ajuste fino de topP, topK e maxOutputTokens" + } + }, + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Um valor baixo torna o texto mais previsível, enquanto um valor alto o torna mais criativo." + }, + "topP": { + "title": "Top P", + "description": "Um valor baixo leva a um texto mais focado, enquanto um valor alto resulta em resultados mais diversos." + }, + "maxOutputTokens": { + "title": "Tokens de saída máximos", + "description": "Este parâmetro define o comprimento máximo da resposta que o modelo tem permissão para gerar." + }, + "urlContext": { + "title": "Ativar contexto de URL", + "description": "Permite que o Gemini leia páginas vinculadas para extrair, comparar e sintetizar seu conteúdo em respostas informadas." + }, + "groundingSearch": { + "title": "Ativar o aterramento com a Pesquisa Google", + "description": "Conecta o Gemini a dados da web em tempo real para respostas precisas e atualizadas com citações verificáveis." + } + }, "googleCloudSetup": { "title": "Para usar o Google Cloud Vertex AI, você precisa:", "step1": "1. Criar uma conta Google Cloud, ativar a API Vertex AI e ativar os modelos Claude desejados.", diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index ec4a3861f4..1ae05b22bf 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -311,6 +311,34 @@ "description": "Настройте температуру, top-p и другие расширенные параметры." } }, + "geminiSections": { + "modelParameters": { + "title": "Элементы управления выводом", + "description": "Точная настройка topP, topK и maxOutputTokens" + } + }, + "geminiParameters": { + "topK": { + "title": "Топ K", + "description": "Низкое значение делает текст более предсказуемым, а высокое — более креативным." + }, + "topP": { + "title": "Топ P", + "description": "Низкое значение приводит к более сфокусированному тексту, а высокое — к более разнообразным результатам." + }, + "maxOutputTokens": { + "title": "Максимальное количество выходных токенов", + "description": "Этот параметр устанавливает максимальную длину ответа, который разрешено генерировать модели." + }, + "urlContext": { + "title": "Включить контекст URL", + "description": "Позволяет Gemini читать связанные страницы для извлечения, сравнения и обобщения их содержимого в обоснованные ответы." + }, + "groundingSearch": { + "title": "Включить заземление с помощью поиска Google", + "description": "Подключает Gemini к веб-данным в реальном времени для получения точных и актуальных ответов с проверяемыми цитатами." + } + }, "googleCloudSetup": { "title": "Для использования Google Cloud Vertex AI необходимо:", "step1": "1. Создайте аккаунт Google Cloud, включите Vertex AI API и нужные модели Claude.", diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 27fcb1d682..850d79e1f3 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -311,6 +311,34 @@ "description": "Sıcaklık, top-p ve diğer gelişmiş ayarları yapın." } }, + "geminiSections": { + "modelParameters": { + "title": "Çıktı Kontrolleri", + "description": "topP, topK ve maxOutputTokens'e ince ayar yapın" + } + }, + "geminiParameters": { + "topK": { + "title": "En K", + "description": "Düşük bir değer metni daha öngörülebilir hale getirirken, yüksek bir değer onu daha yaratıcı hale getirir." + }, + "topP": { + "title": "En P", + "description": "Düşük bir değer daha odaklanmış metne yol açarken, yüksek bir değer daha çeşitli sonuçlara neden olur." + }, + "maxOutputTokens": { + "title": "Maksimum Çıkış Jetonu", + "description": "Bu parametre, modelin oluşturmasına izin verilen yanıtın maksimum uzunluğunu ayarlar." + }, + "urlContext": { + "title": "URL Bağlamını Etkinleştir", + "description": "Gemini'nin bağlantılı sayfaları okuyarak içeriklerini çıkarmasına, karşılaştırmasına ve bilgili yanıtlara sentezlemesine olanak tanır." + }, + "groundingSearch": { + "title": "Google Arama ile Topraklamayı Etkinleştir", + "description": "Doğrulanabilir alıntılarla doğru, güncel yanıtlar için Gemini'yi gerçek zamanlı web verilerine bağlar." + } + }, "googleCloudSetup": { "title": "Google Cloud Vertex AI'yi kullanmak için şunları yapmanız gerekir:", "step1": "1. Google Cloud hesabı oluşturun, Vertex AI API'sini etkinleştirin ve istediğiniz Claude modellerini etkinleştirin.", diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index c83a10d4a5..77be601bbb 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -311,6 +311,34 @@ "description": "Điều chỉnh nhiệt độ, top-p và các cài đặt nâng cao khác." } }, + "geminiSections": { + "modelParameters": { + "title": "Điều khiển đầu ra", + "description": "Tinh chỉnh topP, topK và maxOutputTokens" + } + }, + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Giá trị thấp làm cho văn bản dễ đoán hơn, trong khi giá trị cao làm cho nó sáng tạo hơn." + }, + "topP": { + "title": "Top P", + "description": "Giá trị thấp dẫn đến văn bản tập trung hơn, trong khi giá trị cao dẫn đến kết quả đa dạng hơn." + }, + "maxOutputTokens": { + "title": "Mã thông báo đầu ra tối đa", + "description": "Tham số này đặt độ dài tối đa của phản hồi mà mô hình được phép tạo." + }, + "urlContext": { + "title": "Bật ngữ cảnh URL", + "description": "Cho phép Gemini đọc các trang được liên kết để trích xuất, so sánh và tổng hợp nội dung của chúng thành các câu trả lời có đầy đủ thông tin." + }, + "groundingSearch": { + "title": "Bật tính năng tiếp đất với Google Tìm kiếm", + "description": "Kết nối Gemini với dữ liệu web thời gian thực để có câu trả lời chính xác, cập nhật với các trích dẫn có thể xác minh." + } + }, "googleCloudSetup": { "title": "Để sử dụng Google Cloud Vertex AI, bạn cần:", "step1": "1. Tạo tài khoản Google Cloud, kích hoạt Vertex AI API và kích hoạt các mô hình Claude mong muốn.", diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index ae37c11e6a..724a3129f3 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -311,6 +311,34 @@ "description": "调整温度、top-p 和其他高级设置。" } }, + "geminiSections": { + "modelParameters": { + "title": "输出控制", + "description": "微调 topP、topK 和 maxOutputTokens" + } + }, + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "较低的值使文本更具可预测性,而较高的值使其更具创造性。" + }, + "topP": { + "title": "Top P", + "description": "较低的值可以使文本更集中,而较高的值可以产生更多样化的结果。" + }, + "maxOutputTokens": { + "title": "最大输出 Token", + "description": "此参数设置模型允许生成的响应的最大长度。" + }, + "urlContext": { + "title": "启用 URL 上下文", + "description": "让 Gemini 读取链接的页面以提取、比较和综合其内容,从而提供明智的答复。" + }, + "groundingSearch": { + "title": "启用 Google 搜索基础", + "description": "将 Gemini 连接到实时网络数据,以获得包含可验证引用的准确、最新的答案。" + } + }, "googleCloudSetup": { "title": "要使用 Google Cloud Vertex AI,您需要:", "step1": "1. 注册Google Cloud账号并启用Vertex AI API", diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index 96d1cae645..8e3fca95f6 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -311,6 +311,34 @@ "description": "調整溫度、top-p 和其他進階設定。" } }, + "geminiSections": { + "modelParameters": { + "title": "輸出控制", + "description": "微調 topP、topK 和 maxOutputTokens" + } + }, + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "較低的值使文字更具可預測性,而較高的值使其更具創造性。" + }, + "topP": { + "title": "Top P", + "description": "較低的值可以使文字更集中,而較高的值可以產生更多樣化的結果。" + }, + "maxOutputTokens": { + "title": "最大輸出 Token", + "description": "此參數設定模型允許產生的回應的最大長度。" + }, + "urlContext": { + "title": "啟用 URL 上下文", + "description": "讓 Gemini 讀取連結的頁面以提取、比較和綜合其內容,從而提供明智的答覆。" + }, + "groundingSearch": { + "title": "啟用 Google 搜尋基礎", + "description": "將 Gemini 連接到即時網路數據,以獲得包含可驗證引用的準確、最新的答案。" + } + }, "googleCloudSetup": { "title": "要使用 Google Cloud Vertex AI,您需要:", "step1": "1. 建立 Google Cloud 帳戶,啟用 Vertex AI API 並啟用所需的 Claude 模型。", From 88a7eb466cc7f607de294ac582a9b733c2211c08 Mon Sep 17 00:00:00 2001 From: "Ton Hoang Nguyen (Bill)" <32552798+HahaBill@users.noreply.github.com> Date: Fri, 18 Jul 2025 01:22:35 +0100 Subject: [PATCH 30/31] fix: removing redundant extra translations - a mistake made by the agent --- webview-ui/src/i18n/locales/ca/settings.json | 32 ++++++++----------- webview-ui/src/i18n/locales/de/settings.json | 28 ---------------- .../src/i18n/locales/zh-CN/settings.json | 28 ---------------- .../src/i18n/locales/zh-TW/settings.json | 28 ---------------- 4 files changed, 14 insertions(+), 102 deletions(-) diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index a5ce520446..7a59d0fb6d 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -283,36 +283,32 @@ "cacheUsageNote": "Nota: Si no veieu l'ús de la caché, proveu de seleccionar un model diferent i després tornar a seleccionar el model desitjat.", "vscodeLmModel": "Model de llenguatge", "vscodeLmWarning": "Nota: Aquesta és una integració molt experimental i el suport del proveïdor variarà. Si rebeu un error sobre un model no compatible, és un problema del proveïdor.", + "geminiSections": { + "modelParameters": { + "title": "Controls de sortida", + "description": "Ajusta amb precisió topP, topK i maxOutputTokens" + } + }, "geminiParameters": { "topK": { "title": "Top K", - "description": "Controla el nombre de tokens amb més probabilitat a considerar en cada pas. Valors més alts augmenten la diversitat, valors més baixos fan que la sortida sigui més enfocada i determinista." + "description": "Un valor baix fa que el text sigui més predictible, mentre que un valor alt el fa més creatiu." }, "topP": { "title": "Top P", - "description": "Controla la probabilitat acumulada dels tokens a considerar (mètode nucleus). Valors més propers a 1,0 augmenten la diversitat, mentre que valors més baixos fan que la sortida sigui més enfocada." + "description": "Un valor baix condueix a un text més enfocat, mentre que un valor alt resulta en resultats més diversos." }, "maxOutputTokens": { - "title": "Tokens màxims de sortida", - "description": "Nombre màxim de tokens que el model pot generar en una sola resposta. Valors més alts permeten respostes més llargues, però augmenten l’ús de tokens i els costos." + "title": "Màxim de Tokens de Sortida", + "description": "Aquest paràmetre estableix la longitud màxima de la resposta que el model pot generar." }, "urlContext": { - "title": "Activa context d’URL", - "description": "Permet a Gemini accedir i processar URLs per obtenir context addicional durant la generació de respostes. Útil per a tasques que requereixen anàlisi de contingut web." + "title": "Activa el Context d'URL", + "description": "Permet a Gemini llegir pàgines enllaçades per extreure, comparar i sintetitzar el seu contingut en respostes informades." }, "groundingSearch": { - "title": "Activa grounding amb cerca de Google", - "description": "Permet a Gemini cercar informació actual a Google i fonamentar les respostes en dades en temps real. Útil per a consultes que requereixen informació actualitzada." - }, - "contextLimit": { - "title": "Límit de context", - "description": "Nombre màxim de missatges anteriors a incloure en el context. Valors més baixos redueixen l’ús de tokens i els costos, però poden limitar la continuïtat de la conversa." - } - }, - "geminiSections": { - "modelParameters": { - "title": "Configuració avançada", - "description": "Ajusta la temperatura, top-p i altres paràmetres avançats." + "title": "Activa la Fonamentació amb la Cerca de Google", + "description": "Connecta Gemini a dades web en temps real per a respostes precises i actualitzades amb citacions verificables." } }, "googleCloudSetup": { diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index 4440afdb28..ab142a7876 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -283,34 +283,6 @@ "cacheUsageNote": "Hinweis: Wenn Sie keine Cache-Nutzung sehen, versuchen Sie ein anderes Modell auszuwählen und dann Ihr gewünschtes Modell erneut auszuwählen.", "vscodeLmModel": "Sprachmodell", "vscodeLmWarning": "Hinweis: Dies ist eine sehr experimentelle Integration und die Anbieterunterstützung variiert. Wenn Sie einen Fehler über ein nicht unterstütztes Modell erhalten, liegt das Problem auf Anbieterseite.", - "geminiParameters": { - "topK": { - "title": "Top K", - "description": "Steuert die Anzahl der wahrscheinlichsten Tokens, die in jedem Schritt berücksichtigt werden. Höhere Werte erhöhen die Vielfalt, niedrigere Werte sorgen für fokussiertere und deterministischere Ausgaben." - }, - "topP": { - "title": "Top P", - "description": "Steuert die kumulative Wahrscheinlichkeit der in Betracht zu ziehenden Tokens (Nucleus Sampling). Werte näher bei 1,0 erhöhen die Vielfalt, während niedrigere Werte die Ausgabe fokussierter machen." - }, - "maxOutputTokens": { - "title": "Maximale Ausgabetokens", - "description": "Maximale Anzahl der Tokens, die das Modell in einer einzigen Antwort generieren kann. Höhere Werte ermöglichen längere Antworten, erhöhen jedoch den Tokenverbrauch und die Kosten." - }, - "urlContext": { - "title": "URL-Kontext aktivieren", - "description": "Ermöglicht es Gemini, URLs für zusätzlichen Kontext bei der Generierung von Antworten zu verwenden und zu verarbeiten. Nützlich für Aufgaben, die eine Webinhaltsanalyse erfordern." - }, - "groundingSearch": { - "title": "Grounding mit Google-Suche aktivieren", - "description": "Ermöglicht es Gemini, Google nach aktuellen Informationen zu durchsuchen und Antworten auf Echtzeitdaten zu stützen. Nützlich für Abfragen, die aktuelle Informationen erfordern." - } - }, - "geminiSections": { - "modelParameters": { - "title": "Erweiterte Einstellungen", - "description": "Feinabstimmung von Temperatur, Top-P und anderen erweiterten Einstellungen." - } - }, "geminiSections": { "modelParameters": { "title": "Ausgabesteuerung", diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index 724a3129f3..57c966ad77 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -283,34 +283,6 @@ "cacheUsageNote": "提示:若未显示缓存使用情况,请切换模型后重新选择", "vscodeLmModel": "VSCode LM 模型", "vscodeLmWarning": "注意:这是一个非常实验性的集成,提供商支持会有所不同。如果您收到有关不支持模型的错误,则这是提供商方面的问题。", - "geminiParameters": { - "topK": { - "title": "Top K", - "description": "控制每个步骤要考虑的最高概率令牌数量。值越高,多样性越高;值越低,输出越集中和确定。" - }, - "topP": { - "title": "Top P", - "description": "控制要考虑的令牌的累积概率(核采样)。接近1.0的值增加多样性,而较低的值使输出更集中。" - }, - "maxOutputTokens": { - "title": "最大输出令牌", - "description": "模型在单个响应中可以生成的最大令牌数。值越高,响应越长,但会增加令牌使用量和成本。" - }, - "urlContext": { - "title": "启用URL上下文", - "description": "允许Gemini在生成响应时访问和处理URL以获取额外上下文。适用于需要网络内容分析的任务。" - }, - "groundingSearch": { - "title": "启用Google搜索落地", - "description": "允许Gemini在Google中搜索最新信息,并在实时数据的基础上生成响应。适用于需要最新信息的查询。" - } - }, - "geminiSections": { - "modelParameters": { - "title": "高级设置", - "description": "调整温度、top-p 和其他高级设置。" - } - }, "geminiSections": { "modelParameters": { "title": "输出控制", diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index 8e3fca95f6..313c87f473 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -283,34 +283,6 @@ "cacheUsageNote": "注意:如果您沒有看到快取使用情況,請嘗試選擇其他模型,然後重新選擇您想要的模型。", "vscodeLmModel": "語言模型", "vscodeLmWarning": "注意:此整合功能仍處於實驗階段,各供應商的支援程度可能不同。如果出現模型不支援的錯誤,通常是供應商方面的問題。", - "geminiParameters": { - "topK": { - "title": "Top K", - "description": "控制要考慮的最高機率代幣數量。" - }, - "topP": { - "title": "Top P", - "description": "控制生成時累積機率閾值。" - }, - "maxOutputTokens": { - "title": "最大輸出代幣", - "description": "控制模型可返回的最大代幣數量。" - }, - "urlContext": { - "title": "啟用 URL 上下文", - "description": "允許在生成期間從提供的 URL 獲取頁面內容並將其包含在上下文中。" - }, - "groundingSearch": { - "title": "啟用使用 Google 搜索進行基礎支持", - "description": "在生成期間使用 Google 搜索以獲取最新資訊並將其包含在上下文中。" - } - }, - "geminiSections": { - "modelParameters": { - "title": "進階設定", - "description": "調整溫度、top-p 和其他進階設定。" - } - }, "geminiSections": { "modelParameters": { "title": "輸出控制", From 847756c573b1247afbd6ac7b58c3ad6175ed1d4d Mon Sep 17 00:00:00 2001 From: Roo Code Date: Fri, 18 Jul 2025 20:48:02 +0000 Subject: [PATCH 31/31] fix: remove duplicate translation keys in geminiSections and geminiParameters - Fixed duplicate keys in 13 localization files (es, fr, hi, id, it, ja, ko, nl, pl, pt-BR, ru, tr, vi) - Removed second occurrence of geminiSections and geminiParameters keys - Kept first occurrence which contains more comprehensive descriptions - All JSON files validated for syntax correctness - Translation completeness verified with missing translations script Resolves duplicate key issue identified in PR #4895 --- webview-ui/src/i18n/locales/es/settings.json | 28 ------------------- webview-ui/src/i18n/locales/fr/settings.json | 28 ------------------- webview-ui/src/i18n/locales/hi/settings.json | 28 ------------------- webview-ui/src/i18n/locales/id/settings.json | 28 ------------------- webview-ui/src/i18n/locales/it/settings.json | 28 ------------------- webview-ui/src/i18n/locales/ja/settings.json | 28 ------------------- webview-ui/src/i18n/locales/ko/settings.json | 28 ------------------- webview-ui/src/i18n/locales/nl/settings.json | 28 ------------------- webview-ui/src/i18n/locales/pl/settings.json | 28 ------------------- .../src/i18n/locales/pt-BR/settings.json | 28 ------------------- webview-ui/src/i18n/locales/ru/settings.json | 28 ------------------- webview-ui/src/i18n/locales/tr/settings.json | 28 ------------------- webview-ui/src/i18n/locales/vi/settings.json | 28 ------------------- 13 files changed, 364 deletions(-) diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index dc01756e63..224b423e4c 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -311,34 +311,6 @@ "description": "Ajusta la temperatura, top-p y otros parámetros avanzados." } }, - "geminiSections": { - "modelParameters": { - "title": "Controles de Salida", - "description": "Ajusta topP, topK y maxOutputTokens" - } - }, - "geminiParameters": { - "topK": { - "title": "Top K", - "description": "Un valor bajo hace que el texto sea más predecible, mientras que un valor alto lo hace más creativo." - }, - "topP": { - "title": "Top P", - "description": "Un valor bajo conduce a un texto más enfocado, mientras que un valor alto da como resultado resultados más diversos." - }, - "maxOutputTokens": { - "title": "Máximo de Tokens de Salida", - "description": "Este parámetro establece la longitud máxima de la respuesta que el modelo puede generar." - }, - "urlContext": { - "title": "Habilitar Contexto de URL", - "description": "Permite a Gemini leer páginas enlazadas para extraer, comparar y sintetizar su contenido en respuestas informadas." - }, - "groundingSearch": { - "title": "Habilitar Grounding con Búsqueda de Google", - "description": "Conecta a Gemini con datos web en tiempo real para obtener respuestas precisas y actualizadas con citas verificables." - } - }, "googleCloudSetup": { "title": "Para usar Google Cloud Vertex AI, necesita:", "step1": "1. Crear una cuenta de Google Cloud, habilitar la API de Vertex AI y habilitar los modelos Claude deseados.", diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index 52542f1667..e0176f4d6c 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -311,34 +311,6 @@ "description": "Ajustez la température, top-p et d'autres paramètres avancés." } }, - "geminiSections": { - "modelParameters": { - "title": "Contrôles de sortie", - "description": "Ajustez topP, topK et maxOutputTokens" - } - }, - "geminiParameters": { - "topK": { - "title": "Top K", - "description": "Une valeur faible rend le texte plus prévisible, tandis qu'une valeur élevée le rend plus créatif." - }, - "topP": { - "title": "Top P", - "description": "Une valeur faible conduit à un texte plus ciblé, tandis qu'une valeur élevée donne des résultats plus diversifiés." - }, - "maxOutputTokens": { - "title": "Jetons de sortie max", - "description": "Ce paramètre définit la longueur maximale de la réponse que le modèle est autorisé à générer." - }, - "urlContext": { - "title": "Activer le contexte d'URL", - "description": "Permet à Gemini de lire les pages liées pour extraire, comparer et synthétiser leur contenu en réponses éclairées." - }, - "groundingSearch": { - "title": "Activer l'ancrage avec la recherche Google", - "description": "Connecte Gemini aux données Web en temps réel pour des réponses précises et à jour avec des citations vérifiables." - } - }, "googleCloudSetup": { "title": "Pour utiliser Google Cloud Vertex AI, vous devez :", "step1": "1. Créer un compte Google Cloud, activer l'API Vertex AI et activer les modèles Claude souhaités.", diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index 7389103ba7..47474840ef 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -311,34 +311,6 @@ "description": "टेम्परेचर, टॉप-पी और अन्य उन्नत सेटिंग्स को फाइन-ट्यून करें।" } }, - "geminiSections": { - "modelParameters": { - "title": "आउटपुट नियंत्रण", - "description": "topP, topK और maxOutputTokens को ठीक करें" - } - }, - "geminiParameters": { - "topK": { - "title": "शीर्ष K", - "description": "कम मान पाठ को अधिक पूर्वानुमानित बनाता है, जबकि उच्च मान इसे अधिक रचनात्मक बनाता है।" - }, - "topP": { - "title": "शीर्ष P", - "description": "कम मान अधिक केंद्रित पाठ की ओर ले जाता है, जबकि उच्च मान के परिणामस्वरूप अधिक विविध परिणाम होते हैं।" - }, - "maxOutputTokens": { - "title": "अधिकतम आउटपुट टोकन", - "description": "यह पैरामीटर प्रतिक्रिया की अधिकतम लंबाई निर्धारित करता है जिसे मॉडल उत्पन्न करने की अनुमति है।" - }, - "urlContext": { - "title": "URL प्रसंग सक्षम करें", - "description": "जेमिनी को लिंक किए गए पृष्ठों को पढ़ने, तुलना करने और उनकी सामग्री को सूचित प्रतिक्रियाओं में संश्लेषित करने देता है।" - }, - "groundingSearch": { - "title": "Google खोज के साथ ग्राउंडिंग सक्षम करें", - "description": "सटीक, अद्यतित उत्तरों के लिए जेमिनी को रीयल-टाइम वेब डेटा से जोड़ता है जिसमें सत्यापन योग्य उद्धरण होते हैं।" - } - }, "googleCloudSetup": { "title": "Google Cloud Vertex AI का उपयोग करने के लिए, आपको आवश्यकता है:", "step1": "1. Google Cloud खाता बनाएं, Vertex AI API सक्षम करें और वांछित Claude मॉडल सक्षम करें।", diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index e1b230612e..c7135ef32d 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -315,34 +315,6 @@ "description": "Menyesuaikan suhu, top-p, dan pengaturan lanjutan lainnya." } }, - "geminiSections": { - "modelParameters": { - "title": "Kontrol Output", - "description": "Menyesuaikan topP, topK, dan maxOutputTokens" - } - }, - "geminiParameters": { - "topK": { - "title": "Top K", - "description": "Nilai rendah membuat teks lebih dapat diprediksi, sedangkan nilai tinggi membuatnya lebih kreatif." - }, - "topP": { - "title": "Top P", - "description": "Nilai rendah menghasilkan teks yang lebih terfokus, sedangkan nilai tinggi menghasilkan hasil yang lebih beragam." - }, - "maxOutputTokens": { - "title": "Token Output Maks", - "description": "Parameter ini menetapkan panjang maksimum respons yang diizinkan untuk dibuat oleh model." - }, - "urlContext": { - "title": "Aktifkan Konteks URL", - "description": "Memungkinkan Gemini membaca halaman tertaut untuk mengekstrak, membandingkan, dan mensintesis kontennya menjadi respons yang terinformasi." - }, - "groundingSearch": { - "title": "Aktifkan Grounding dengan Google Search", - "description": "Menghubungkan Gemini ke data web real-time untuk jawaban yang akurat dan terkini dengan kutipan yang dapat diverifikasi." - } - }, "googleCloudSetup": { "title": "Untuk menggunakan Google Cloud Vertex AI, kamu perlu:", "step1": "1. Buat akun Google Cloud, aktifkan Vertex AI API & aktifkan model Claude yang diinginkan.", diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index 13460bfa21..a0170c2948 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -311,34 +311,6 @@ "description": "Regola la temperatura, top-p e altre impostazioni avanzate." } }, - "geminiSections": { - "modelParameters": { - "title": "Controlli di output", - "description": "Perfeziona topP, topK e maxOutputTokens" - } - }, - "geminiParameters": { - "topK": { - "title": "Top K", - "description": "Un valore basso rende il testo più prevedibile, mentre un valore alto lo rende più creativo." - }, - "topP": { - "title": "Top P", - "description": "Un valore basso porta a un testo più mirato, mentre un valore alto si traduce in risultati più diversi." - }, - "maxOutputTokens": { - "title": "Token di output massimi", - "description": "Questo parametro imposta la lunghezza massima della risposta che il modello è autorizzato a generare." - }, - "urlContext": { - "title": "Abilita contesto URL", - "description": "Consente a Gemini di leggere le pagine collegate per estrarre, confrontare e sintetizzare il loro contenuto in risposte informate." - }, - "groundingSearch": { - "title": "Abilita il grounding con la Ricerca Google", - "description": "Collega Gemini ai dati web in tempo reale per risposte accurate e aggiornate con citazioni verificabili." - } - }, "googleCloudSetup": { "title": "Per utilizzare Google Cloud Vertex AI, è necessario:", "step1": "1. Creare un account Google Cloud, abilitare l'API Vertex AI e abilitare i modelli Claude desiderati.", diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index 91d2bac289..92d734eafe 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -311,34 +311,6 @@ "description": "温度、top-p、およびその他の詳細設定を調整します。" } }, - "geminiSections": { - "modelParameters": { - "title": "出力制御", - "description": "topP、topK、maxOutputTokensを微調整します" - } - }, - "geminiParameters": { - "topK": { - "title": "トップK", - "description": "値を低くするとテキストの予測可能性が高まり、値を高くするとより創造的になります。" - }, - "topP": { - "title": "トップP", - "description": "値を低くするとより焦点の合ったテキストになり、値を高くするとより多様な結果になります。" - }, - "maxOutputTokens": { - "title": "最大出力トークン", - "description": "このパラメータは、モデルが生成できる応答の最大長を設定します。" - }, - "urlContext": { - "title": "URLコンテキストを有効にする", - "description": "Geminiがリンクされたページを読み取り、そのコンテンツを抽出、比較、統合して、情報に基づいた応答を生成できるようにします。" - }, - "groundingSearch": { - "title": "Google検索によるグラウンディングを有効にする", - "description": "GeminiをリアルタイムのWebデータに接続して、検証可能な引用付きの正確で最新の回答を提供します。" - } - }, "googleCloudSetup": { "title": "Google Cloud Vertex AIを使用するには:", "step1": "1. Google Cloudアカウントを作成し、Vertex AI APIを有効にして、希望するClaudeモデルを有効にします。", diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index ae41c03c94..46ebc3e4d0 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -311,34 +311,6 @@ "description": "온도, top-p 및 기타 고급 설정을 조정합니다." } }, - "geminiSections": { - "modelParameters": { - "title": "출력 제어", - "description": "topP, topK 및 maxOutputTokens 미세 조정" - } - }, - "geminiParameters": { - "topK": { - "title": "상위 K", - "description": "값이 낮으면 텍스트를 더 예측하기 쉬워지고, 값이 높으면 더 창의적으로 만듭니다." - }, - "topP": { - "title": "상위 P", - "description": "값이 낮으면 더 집중된 텍스트가 되고, 값이 높으면 더 다양한 결과가 나옵니다." - }, - "maxOutputTokens": { - "title": "최대 출력 토큰", - "description": "이 매개변수는 모델이 생성할 수 있는 응답의 최대 길이를 설정합니다." - }, - "urlContext": { - "title": "URL 컨텍스트 활성화", - "description": "Gemini가 연결된 페이지를 읽고 콘텐츠를 추출, 비교 및 종합하여 정보에 입각한 응답을 생성하도록 합니다." - }, - "groundingSearch": { - "title": "Google 검색으로 그라운딩 활성화", - "description": "Gemini를 실시간 웹 데이터에 연결하여 검증 가능한 인용과 함께 정확하고 최신 답변을 제공합니다." - } - }, "googleCloudSetup": { "title": "Google Cloud Vertex AI를 사용하려면:", "step1": "1. Google Cloud 계정을 만들고, Vertex AI API를 활성화하고, 원하는 Claude 모델을 활성화하세요.", diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index d5ef688808..2f7c799e23 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -311,34 +311,6 @@ "description": "Pas de temperatuur, top-p en andere geavanceerde instellingen aan." } }, - "geminiSections": { - "modelParameters": { - "title": "Uitvoerregelaars", - "description": "Verfijn topP, topK en maxOutputTokens" - } - }, - "geminiParameters": { - "topK": { - "title": "Top K", - "description": "Een lage waarde maakt de tekst voorspelbaarder, terwijl een hoge waarde deze creatiever maakt." - }, - "topP": { - "title": "Top P", - "description": "Een lage waarde leidt tot meer gerichte tekst, terwijl een hoge waarde resulteert in meer diverse resultaten." - }, - "maxOutputTokens": { - "title": "Max. uitvoertokens", - "description": "Deze parameter stelt de maximale lengte in van de respons die het model mag genereren." - }, - "urlContext": { - "title": "URL-context inschakelen", - "description": "Laat Gemini gelinkte pagina's lezen om hun inhoud te extraheren, te vergelijken en te synthetiseren tot geïnformeerde antwoorden." - }, - "groundingSearch": { - "title": "Aarding met Google Zoeken inschakelen", - "description": "Verbindt Gemini met realtime webgegevens voor nauwkeurige, up-to-date antwoorden met verifieerbare citaten." - } - }, "googleCloudSetup": { "title": "Om Google Cloud Vertex AI te gebruiken, moet je:", "step1": "1. Maak een Google Cloud-account aan, schakel de Vertex AI API in en activeer de gewenste Claude-modellen.", diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index 981b6394b7..11163b1666 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -311,34 +311,6 @@ "description": "Dostosuj temperaturę, top-p i inne zaawansowane ustawienia." } }, - "geminiSections": { - "modelParameters": { - "title": "Kontrola wyjścia", - "description": "Dostosuj topP, topK i maxOutputTokens" - } - }, - "geminiParameters": { - "topK": { - "title": "Top K", - "description": "Niska wartość sprawia, że tekst jest bardziej przewidywalny, a wysoka wartość czyni go bardziej kreatywnym." - }, - "topP": { - "title": "Top P", - "description": "Niska wartość prowadzi do bardziej skoncentrowanego tekstu, a wysoka wartość skutkuje bardziej zróżnicowanymi wynikami." - }, - "maxOutputTokens": { - "title": "Maksymalna liczba tokenów wyjściowych", - "description": "Ten parametr ustawia maksymalną długość odpowiedzi, jaką model może wygenerować." - }, - "urlContext": { - "title": "Włącz kontekst adresu URL", - "description": "Pozwala Gemini czytać połączone strony w celu wyodrębnienia, porównania i syntezy ich treści w świadome odpowiedzi." - }, - "groundingSearch": { - "title": "Włącz uziemienie za pomocą wyszukiwarki Google", - "description": "Łączy Gemini z danymi internetowymi w czasie rzeczywistym w celu uzyskania dokładnych, aktualnych odpowiedzi z weryfikowalnymi cytatami." - } - }, "googleCloudSetup": { "title": "Aby korzystać z Google Cloud Vertex AI, potrzebujesz:", "step1": "1. Utworzyć konto Google Cloud, włączyć API Vertex AI i włączyć żądane modele Claude.", diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index b320342d49..dd5955d488 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -311,34 +311,6 @@ "description": "Ajuste a temperatura, top-p e outras configurações avançadas." } }, - "geminiSections": { - "modelParameters": { - "title": "Controles de saída", - "description": "Ajuste fino de topP, topK e maxOutputTokens" - } - }, - "geminiParameters": { - "topK": { - "title": "Top K", - "description": "Um valor baixo torna o texto mais previsível, enquanto um valor alto o torna mais criativo." - }, - "topP": { - "title": "Top P", - "description": "Um valor baixo leva a um texto mais focado, enquanto um valor alto resulta em resultados mais diversos." - }, - "maxOutputTokens": { - "title": "Tokens de saída máximos", - "description": "Este parâmetro define o comprimento máximo da resposta que o modelo tem permissão para gerar." - }, - "urlContext": { - "title": "Ativar contexto de URL", - "description": "Permite que o Gemini leia páginas vinculadas para extrair, comparar e sintetizar seu conteúdo em respostas informadas." - }, - "groundingSearch": { - "title": "Ativar o aterramento com a Pesquisa Google", - "description": "Conecta o Gemini a dados da web em tempo real para respostas precisas e atualizadas com citações verificáveis." - } - }, "googleCloudSetup": { "title": "Para usar o Google Cloud Vertex AI, você precisa:", "step1": "1. Criar uma conta Google Cloud, ativar a API Vertex AI e ativar os modelos Claude desejados.", diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index 1ae05b22bf..ec4a3861f4 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -311,34 +311,6 @@ "description": "Настройте температуру, top-p и другие расширенные параметры." } }, - "geminiSections": { - "modelParameters": { - "title": "Элементы управления выводом", - "description": "Точная настройка topP, topK и maxOutputTokens" - } - }, - "geminiParameters": { - "topK": { - "title": "Топ K", - "description": "Низкое значение делает текст более предсказуемым, а высокое — более креативным." - }, - "topP": { - "title": "Топ P", - "description": "Низкое значение приводит к более сфокусированному тексту, а высокое — к более разнообразным результатам." - }, - "maxOutputTokens": { - "title": "Максимальное количество выходных токенов", - "description": "Этот параметр устанавливает максимальную длину ответа, который разрешено генерировать модели." - }, - "urlContext": { - "title": "Включить контекст URL", - "description": "Позволяет Gemini читать связанные страницы для извлечения, сравнения и обобщения их содержимого в обоснованные ответы." - }, - "groundingSearch": { - "title": "Включить заземление с помощью поиска Google", - "description": "Подключает Gemini к веб-данным в реальном времени для получения точных и актуальных ответов с проверяемыми цитатами." - } - }, "googleCloudSetup": { "title": "Для использования Google Cloud Vertex AI необходимо:", "step1": "1. Создайте аккаунт Google Cloud, включите Vertex AI API и нужные модели Claude.", diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 850d79e1f3..27fcb1d682 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -311,34 +311,6 @@ "description": "Sıcaklık, top-p ve diğer gelişmiş ayarları yapın." } }, - "geminiSections": { - "modelParameters": { - "title": "Çıktı Kontrolleri", - "description": "topP, topK ve maxOutputTokens'e ince ayar yapın" - } - }, - "geminiParameters": { - "topK": { - "title": "En K", - "description": "Düşük bir değer metni daha öngörülebilir hale getirirken, yüksek bir değer onu daha yaratıcı hale getirir." - }, - "topP": { - "title": "En P", - "description": "Düşük bir değer daha odaklanmış metne yol açarken, yüksek bir değer daha çeşitli sonuçlara neden olur." - }, - "maxOutputTokens": { - "title": "Maksimum Çıkış Jetonu", - "description": "Bu parametre, modelin oluşturmasına izin verilen yanıtın maksimum uzunluğunu ayarlar." - }, - "urlContext": { - "title": "URL Bağlamını Etkinleştir", - "description": "Gemini'nin bağlantılı sayfaları okuyarak içeriklerini çıkarmasına, karşılaştırmasına ve bilgili yanıtlara sentezlemesine olanak tanır." - }, - "groundingSearch": { - "title": "Google Arama ile Topraklamayı Etkinleştir", - "description": "Doğrulanabilir alıntılarla doğru, güncel yanıtlar için Gemini'yi gerçek zamanlı web verilerine bağlar." - } - }, "googleCloudSetup": { "title": "Google Cloud Vertex AI'yi kullanmak için şunları yapmanız gerekir:", "step1": "1. Google Cloud hesabı oluşturun, Vertex AI API'sini etkinleştirin ve istediğiniz Claude modellerini etkinleştirin.", diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index 77be601bbb..c83a10d4a5 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -311,34 +311,6 @@ "description": "Điều chỉnh nhiệt độ, top-p và các cài đặt nâng cao khác." } }, - "geminiSections": { - "modelParameters": { - "title": "Điều khiển đầu ra", - "description": "Tinh chỉnh topP, topK và maxOutputTokens" - } - }, - "geminiParameters": { - "topK": { - "title": "Top K", - "description": "Giá trị thấp làm cho văn bản dễ đoán hơn, trong khi giá trị cao làm cho nó sáng tạo hơn." - }, - "topP": { - "title": "Top P", - "description": "Giá trị thấp dẫn đến văn bản tập trung hơn, trong khi giá trị cao dẫn đến kết quả đa dạng hơn." - }, - "maxOutputTokens": { - "title": "Mã thông báo đầu ra tối đa", - "description": "Tham số này đặt độ dài tối đa của phản hồi mà mô hình được phép tạo." - }, - "urlContext": { - "title": "Bật ngữ cảnh URL", - "description": "Cho phép Gemini đọc các trang được liên kết để trích xuất, so sánh và tổng hợp nội dung của chúng thành các câu trả lời có đầy đủ thông tin." - }, - "groundingSearch": { - "title": "Bật tính năng tiếp đất với Google Tìm kiếm", - "description": "Kết nối Gemini với dữ liệu web thời gian thực để có câu trả lời chính xác, cập nhật với các trích dẫn có thể xác minh." - } - }, "googleCloudSetup": { "title": "Để sử dụng Google Cloud Vertex AI, bạn cần:", "step1": "1. Tạo tài khoản Google Cloud, kích hoạt Vertex AI API và kích hoạt các mô hình Claude mong muốn.",