diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index 3b53627295..c2112c597b 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -164,6 +164,11 @@ const lmStudioSchema = baseProviderSettingsSchema.extend({ const geminiSchema = apiModelIdProviderModelSchema.extend({ geminiApiKey: z.string().optional(), googleGeminiBaseUrl: z.string().optional(), + topP: z.number().optional(), + topK: z.number().optional(), + maxOutputTokens: z.number().optional(), + enableUrlContext: z.boolean().optional(), + enableGrounding: z.boolean().optional(), }) const geminiCliSchema = apiModelIdProviderModelSchema.extend({ diff --git a/src/api/providers/__tests__/gemini-handler.spec.ts b/src/api/providers/__tests__/gemini-handler.spec.ts new file mode 100644 index 0000000000..5f9e088ff2 --- /dev/null +++ b/src/api/providers/__tests__/gemini-handler.spec.ts @@ -0,0 +1,54 @@ +import { describe, it, expect, vi } from "vitest" +import { GeminiHandler } from "../gemini" +import type { ApiHandlerOptions } from "../../../shared/api" + +describe("GeminiHandler backend support", () => { + it("passes maxOutputTokens, topP, topK, and tools for URL context and grounding in config", async () => { + const options = { + apiProvider: "gemini", + maxOutputTokens: 5, + topP: 0.5, + topK: 10, + enableUrlContext: true, + enableGrounding: true, + } as ApiHandlerOptions + const handler = new GeminiHandler(options) + const stub = vi.fn().mockReturnValue((async function* () {})()) + // @ts-ignore access private client + handler["client"].models.generateContentStream = stub + await handler.createMessage("instr", [] as any).next() + const config = stub.mock.calls[0][0].config + expect(config.maxOutputTokens).toBe(5) + expect(config.topP).toBe(0.5) + expect(config.topK).toBe(10) + expect(config.tools).toEqual([{ urlContext: {} }, { googleSearch: {} }]) + }) + + it("completePrompt passes config overrides without tools when URL context and grounding disabled", async () => { + const options = { + apiProvider: "gemini", + maxOutputTokens: 7, + topP: 0.7, + topK: 3, + enableUrlContext: false, + enableGrounding: false, + } as ApiHandlerOptions + const handler = new GeminiHandler(options) + const stub = vi.fn().mockResolvedValue({ text: "ok" }) + // @ts-ignore access private client + handler["client"].models.generateContent = stub + const res = await handler.completePrompt("hi") + expect(res).toBe("ok") + expect(stub).toHaveBeenCalledWith( + expect.objectContaining({ + config: expect.objectContaining({ + maxOutputTokens: 7, + topP: 0.7, + topK: 3, + }), + }), + ) + const promptConfig = stub.mock.calls[0][0].config + expect(promptConfig.tools).toBeUndefined() + }) +}) diff --git a/src/api/providers/gemini.ts b/src/api/providers/gemini.ts index 6765c8676d..53d361a2ac 100644 --- a/src/api/providers/gemini.ts +++ b/src/api/providers/gemini.ts @@ -4,6 +4,7 @@ import { type GenerateContentResponseUsageMetadata, type GenerateContentParameters, type GenerateContentConfig, + type GroundingMetadata, } from "@google/genai" import type { JWTInput } from "google-auth-library" @@ -67,72 +68,104 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl const contents = messages.map(convertAnthropicMessageToGemini) - const config: GenerateContentConfig = { + const tools: GenerateContentConfig["tools"] = [] + if (this.options.enableUrlContext) { + tools.push({ urlContext: {} }) + } + if (this.options.enableGrounding) { + tools.push({ googleSearch: {} }) + } + const rawConfig = { systemInstruction, httpOptions: this.options.googleGeminiBaseUrl ? { baseUrl: this.options.googleGeminiBaseUrl } : undefined, thinkingConfig, - maxOutputTokens: this.options.modelMaxTokens ?? maxTokens ?? undefined, + maxOutputTokens: this.options.maxOutputTokens ?? this.options.modelMaxTokens ?? maxTokens ?? undefined, temperature: this.options.modelTemperature ?? 0, + topP: this.options.topP, + topK: this.options.topK, + ...(tools.length > 0 ? { tools } : {}), } + const config = rawConfig as unknown as GenerateContentConfig const params: GenerateContentParameters = { model, contents, config } - const result = await this.client.models.generateContentStream(params) + try { + const result = await this.client.models.generateContentStream(params) - let lastUsageMetadata: GenerateContentResponseUsageMetadata | undefined + let lastUsageMetadata: GenerateContentResponseUsageMetadata | undefined + let pendingGroundingMetadata: GroundingMetadata | undefined - for await (const chunk of result) { - // Process candidates and their parts to separate thoughts from content - if (chunk.candidates && chunk.candidates.length > 0) { - const candidate = chunk.candidates[0] - if (candidate.content && candidate.content.parts) { - for (const part of candidate.content.parts) { - if (part.thought) { - // This is a thinking/reasoning part - if (part.text) { - yield { type: "reasoning", text: part.text } - } - } else { - // This is regular content - if (part.text) { - yield { type: "text", text: part.text } + for await (const chunk of result) { + // Process candidates and their parts to separate thoughts from content + if (chunk.candidates && chunk.candidates.length > 0) { + const candidate = chunk.candidates[0] + + if (candidate.groundingMetadata) { + pendingGroundingMetadata = candidate.groundingMetadata + } + + if (candidate.content && candidate.content.parts) { + for (const part of candidate.content.parts) { + if (part.thought) { + // This is a thinking/reasoning part + if (part.text) { + yield { type: "reasoning", text: part.text } + } + } else { + // This is regular content + if (part.text) { + yield { type: "text", text: part.text } + } } } } } - } - // Fallback to the original text property if no candidates structure - else if (chunk.text) { - yield { type: "text", text: chunk.text } + // Fallback to the original text property if no candidates structure + else if (chunk.text) { + yield { type: "text", text: chunk.text } + } + + if (chunk.usageMetadata) { + lastUsageMetadata = chunk.usageMetadata + } } - if (chunk.usageMetadata) { - lastUsageMetadata = chunk.usageMetadata + if (pendingGroundingMetadata) { + const citations = this.extractCitationsOnly(pendingGroundingMetadata) + if (citations) { + yield { type: "text", text: `\n\nSources: ${citations}` } + } } - } - if (lastUsageMetadata) { - const inputTokens = lastUsageMetadata.promptTokenCount ?? 0 - const outputTokens = lastUsageMetadata.candidatesTokenCount ?? 0 - const cacheReadTokens = lastUsageMetadata.cachedContentTokenCount - const reasoningTokens = lastUsageMetadata.thoughtsTokenCount - - yield { - type: "usage", - inputTokens, - outputTokens, - cacheReadTokens, - reasoningTokens, - totalCost: this.calculateCost({ info, inputTokens, outputTokens, cacheReadTokens }), + if (lastUsageMetadata) { + const inputTokens = lastUsageMetadata.promptTokenCount ?? 0 + const outputTokens = lastUsageMetadata.candidatesTokenCount ?? 0 + const cacheReadTokens = lastUsageMetadata.cachedContentTokenCount + const reasoningTokens = lastUsageMetadata.thoughtsTokenCount + + yield { + type: "usage", + inputTokens, + outputTokens, + cacheReadTokens, + reasoningTokens, + totalCost: this.calculateCost({ info, inputTokens, outputTokens, cacheReadTokens }), + } } + } catch (error) { + if (error instanceof Error) { + throw new Error(`Gemini Generate Context Stream error: ${error.message}`) + } + + throw error } } override getModel() { const modelId = this.options.apiModelId let id = modelId && modelId in geminiModels ? (modelId as GeminiModelId) : geminiDefaultModelId - const info: ModelInfo = geminiModels[id] + let info: ModelInfo = geminiModels[id] const params = getModelParams({ format: "gemini", modelId: id, model: info, settings: this.options }) // The `:thinking` suffix indicates that the model is a "Hybrid" @@ -142,22 +175,70 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl return { id: id.endsWith(":thinking") ? id.replace(":thinking", "") : id, info, ...params } } + private extractCitationsOnly(groundingMetadata?: GroundingMetadata): string | null { + const chunks = groundingMetadata?.groundingChunks + + if (!chunks) { + return null + } + + const citationLinks = chunks + .map((chunk, i) => { + const uri = chunk.web?.uri + if (uri) { + return `[${i + 1}](${uri})` + } + return null + }) + .filter((link): link is string => link !== null) + + if (citationLinks.length > 0) { + return citationLinks.join(", ") + } + + return null + } + async completePrompt(prompt: string): Promise { try { const { id: model } = this.getModel() + const tools: GenerateContentConfig["tools"] = [] + if (this.options.enableUrlContext) { + tools.push({ urlContext: {} }) + } + if (this.options.enableGrounding) { + tools.push({ googleSearch: {} }) + } + const rawPromptConfig = { + httpOptions: this.options.googleGeminiBaseUrl + ? { baseUrl: this.options.googleGeminiBaseUrl } + : undefined, + temperature: this.options.modelTemperature ?? 0, + maxOutputTokens: this.options.maxOutputTokens ?? this.options.modelMaxTokens, + topP: this.options.topP, + topK: this.options.topK, + ...(tools.length > 0 ? { tools } : {}), + } + const promptConfig = rawPromptConfig as unknown as GenerateContentConfig + const result = await this.client.models.generateContent({ model, contents: [{ role: "user", parts: [{ text: prompt }] }], - config: { - httpOptions: this.options.googleGeminiBaseUrl - ? { baseUrl: this.options.googleGeminiBaseUrl } - : undefined, - temperature: this.options.modelTemperature ?? 0, - }, + config: promptConfig, }) - return result.text ?? "" + let text = result.text ?? "" + + const candidate = result.candidates?.[0] + if (candidate?.groundingMetadata) { + const citations = this.extractCitationsOnly(candidate.groundingMetadata) + if (citations) { + text += `\n\nSources: ${citations}` + } + } + + return text } catch (error) { if (error instanceof Error) { throw new Error(`Gemini completion error: ${error.message}`) diff --git a/src/core/sliding-window/__tests__/sliding-window.spec.ts b/src/core/sliding-window/__tests__/sliding-window.spec.ts index 3bda5351d4..b6f09125ac 100644 --- a/src/core/sliding-window/__tests__/sliding-window.spec.ts +++ b/src/core/sliding-window/__tests__/sliding-window.spec.ts @@ -250,7 +250,6 @@ describe("Sliding Window", () => { { role: "assistant", content: "Fourth message" }, { role: "user", content: "Fifth message" }, ] - it("should not truncate if tokens are below max tokens threshold", async () => { const modelInfo = createModelInfo(100000, 30000) const dynamicBuffer = modelInfo.contextWindow * TOKEN_BUFFER_PERCENTAGE // 10000 diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 06994b16b9..44383eec86 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -443,7 +443,11 @@ const ApiOptions = ({ )} {selectedProvider === "gemini" && ( - + )} {selectedProvider === "openai" && ( diff --git a/webview-ui/src/components/settings/providers/Gemini.tsx b/webview-ui/src/components/settings/providers/Gemini.tsx index 21056f12d5..05d083e381 100644 --- a/webview-ui/src/components/settings/providers/Gemini.tsx +++ b/webview-ui/src/components/settings/providers/Gemini.tsx @@ -1,8 +1,12 @@ -import { useCallback, useState } from "react" +import { useCallback, useState, useMemo } from "react" import { Checkbox } from "vscrui" import { VSCodeTextField } from "@vscode/webview-ui-toolkit/react" +import { Collapsible, CollapsibleContent, CollapsibleTrigger } from "@src/components/ui/collapsible" +import { Slider } from "@src/components/ui" +import { ChevronRight } from "lucide-react" import type { ProviderSettings } from "@roo-code/types" +import { geminiModels, geminiDefaultModelId, type GeminiModelId } from "@roo-code/types" import { useAppTranslation } from "@src/i18n/TranslationContext" import { VSCodeButtonLink } from "@src/components/common/VSCodeButtonLink" @@ -12,14 +16,23 @@ import { inputEventTransform } from "../transforms" type GeminiProps = { apiConfiguration: ProviderSettings setApiConfigurationField: (field: keyof ProviderSettings, value: ProviderSettings[keyof ProviderSettings]) => void + currentModelId?: string } -export const Gemini = ({ apiConfiguration, setApiConfigurationField }: GeminiProps) => { +export const Gemini = ({ apiConfiguration, setApiConfigurationField, currentModelId }: GeminiProps) => { const { t } = useAppTranslation() const [googleGeminiBaseUrlSelected, setGoogleGeminiBaseUrlSelected] = useState( !!apiConfiguration?.googleGeminiBaseUrl, ) + const [isModelParametersOpen, setIsModelParametersOpen] = useState(false) + + const modelInfo = useMemo(() => { + const modelId = ( + currentModelId && currentModelId in geminiModels ? currentModelId : geminiDefaultModelId + ) as GeminiModelId + return geminiModels[modelId] + }, [currentModelId]) const handleInputChange = useCallback( ( @@ -50,12 +63,13 @@ export const Gemini = ({ apiConfiguration, setApiConfigurationField }: GeminiPro {t("settings:providers.getGeminiApiKey")} )} +
{ setGoogleGeminiBaseUrlSelected(checked) - if (!checked) { setApiConfigurationField("googleGeminiBaseUrl", "") } @@ -71,6 +85,131 @@ export const Gemini = ({ apiConfiguration, setApiConfigurationField }: GeminiPro className="w-full mt-1" /> )} + + setApiConfigurationField("enableUrlContext", checked)}> + {t("settings:providers.geminiParameters.urlContext.title")} + +
+ {t("settings:providers.geminiParameters.urlContext.description")} +
+ + setApiConfigurationField("enableGrounding", checked)}> + {t("settings:providers.geminiParameters.groundingSearch.title")} + +
+ {t("settings:providers.geminiParameters.groundingSearch.description")} +
+ +
+ + +
+
+

+ {t("settings:providers.geminiSections.modelParameters.title")} +

+

+ {t("settings:providers.geminiSections.modelParameters.description")} +

+
+ +
+
+ +
+ +
+ + setApiConfigurationField("topP", values[0]) + } + className="flex-grow" + /> + + {(apiConfiguration.topP ?? 0.95).toFixed(2)} + +
+
+ {t("settings:providers.geminiParameters.topP.description")} +
+
+ +
+ +
+ + setApiConfigurationField("topK", values[0]) + } + className="flex-grow" + /> + {apiConfiguration.topK ?? 64} +
+
+ {t("settings:providers.geminiParameters.topK.description")} +
+
+ +
+ +
+ + setApiConfigurationField("maxOutputTokens", values[0]) + } + className="flex-grow" + /> + { + const val = parseInt((e as any).target.value, 10) + return Number.isNaN(val) ? 0 : Math.min(val, modelInfo.maxTokens) + })} + className="w-16" + /> +
+
+ {t("settings:providers.geminiParameters.maxOutputTokens.description")}_{" "} +
+
+
+
+
) diff --git a/webview-ui/src/components/settings/providers/__tests__/Gemini.spec.tsx b/webview-ui/src/components/settings/providers/__tests__/Gemini.spec.tsx new file mode 100644 index 0000000000..725cdf9fd4 --- /dev/null +++ b/webview-ui/src/components/settings/providers/__tests__/Gemini.spec.tsx @@ -0,0 +1,64 @@ +import { render, screen } from "@testing-library/react" +import userEvent from "@testing-library/user-event" +import { Gemini } from "../Gemini" +import type { ProviderSettings } from "@roo-code/types" + +vi.mock("@vscode/webview-ui-toolkit/react", () => ({ + VSCodeTextField: ({ children, value, onInput, type }: any) => ( +
+ {children} + onInput(e)} /> +
+ ), +})) + +vi.mock("vscrui", () => ({ + Checkbox: ({ children, checked, onChange, "data-testid": testId, _ }: any) => ( + + ), +})) + +vi.mock("@src/components/ui", () => ({ + Slider: ({ min, max, step, value, onValueChange, "data-testid": testId, _ }: any) => ( + onValueChange([Number(e.target.value)])} + /> + ), +})) + +vi.mock("@src/i18n/TranslationContext", () => ({ + useAppTranslation: () => ({ t: (key: string) => key }), +})) + +vi.mock("@src/components/common/VSCodeButtonLink", () => ({ + VSCodeButtonLink: ({ children, href }: any) => {children}, +})) + +describe("Gemini provider settings", () => { + it("renders sliders for topP, topK and maxOutputTokens after expanding", async () => { + const user = userEvent.setup() + const setApiField = vi.fn() + const config: ProviderSettings = {} + render() + + expect(screen.queryByTestId("slider-top-p")).not.toBeInTheDocument() + expect(screen.queryByTestId("slider-top-k")).not.toBeInTheDocument() + expect(screen.queryByTestId("slider-max-output-tokens")).not.toBeInTheDocument() + + const trigger = screen.getByText("settings:providers.geminiSections.modelParameters.title") + await user.click(trigger) + + expect(screen.getByTestId("slider-top-p")).toBeInTheDocument() + expect(screen.getByTestId("slider-top-k")).toBeInTheDocument() + expect(screen.getByTestId("slider-max-output-tokens")).toBeInTheDocument() + }) +}) diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index 15018e64ab..7a59d0fb6d 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -283,6 +283,34 @@ "cacheUsageNote": "Nota: Si no veieu l'ús de la caché, proveu de seleccionar un model diferent i després tornar a seleccionar el model desitjat.", "vscodeLmModel": "Model de llenguatge", "vscodeLmWarning": "Nota: Aquesta és una integració molt experimental i el suport del proveïdor variarà. Si rebeu un error sobre un model no compatible, és un problema del proveïdor.", + "geminiSections": { + "modelParameters": { + "title": "Controls de sortida", + "description": "Ajusta amb precisió topP, topK i maxOutputTokens" + } + }, + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Un valor baix fa que el text sigui més predictible, mentre que un valor alt el fa més creatiu." + }, + "topP": { + "title": "Top P", + "description": "Un valor baix condueix a un text més enfocat, mentre que un valor alt resulta en resultats més diversos." + }, + "maxOutputTokens": { + "title": "Màxim de Tokens de Sortida", + "description": "Aquest paràmetre estableix la longitud màxima de la resposta que el model pot generar." + }, + "urlContext": { + "title": "Activa el Context d'URL", + "description": "Permet a Gemini llegir pàgines enllaçades per extreure, comparar i sintetitzar el seu contingut en respostes informades." + }, + "groundingSearch": { + "title": "Activa la Fonamentació amb la Cerca de Google", + "description": "Connecta Gemini a dades web en temps real per a respostes precises i actualitzades amb citacions verificables." + } + }, "googleCloudSetup": { "title": "Per utilitzar Google Cloud Vertex AI, necessiteu:", "step1": "1. Crear un compte de Google Cloud, habilitar l'API de Vertex AI i habilitar els models Claude necessaris.", diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index bb5ed1146b..ab142a7876 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -283,6 +283,34 @@ "cacheUsageNote": "Hinweis: Wenn Sie keine Cache-Nutzung sehen, versuchen Sie ein anderes Modell auszuwählen und dann Ihr gewünschtes Modell erneut auszuwählen.", "vscodeLmModel": "Sprachmodell", "vscodeLmWarning": "Hinweis: Dies ist eine sehr experimentelle Integration und die Anbieterunterstützung variiert. Wenn Sie einen Fehler über ein nicht unterstütztes Modell erhalten, liegt das Problem auf Anbieterseite.", + "geminiSections": { + "modelParameters": { + "title": "Ausgabesteuerung", + "description": "Feinabstimmung von topP, topK und maxOutputTokens" + } + }, + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Ein niedriger Wert macht den Text vorhersagbarer, während ein hoher Wert ihn kreativer macht." + }, + "topP": { + "title": "Top P", + "description": "Ein niedriger Wert führt zu fokussierterem Text, während ein hoher Wert zu vielfältigeren Ergebnissen führt." + }, + "maxOutputTokens": { + "title": "Maximale Ausgabe-Tokens", + "description": "Dieser Parameter legt die maximale Länge der Antwort fest, die das Modell generieren darf." + }, + "urlContext": { + "title": "URL-Kontext aktivieren", + "description": "Ermöglicht Gemini, verlinkte Seiten zu lesen, um deren Inhalt zu extrahieren, zu vergleichen und in fundierte Antworten zu synthetisieren." + }, + "groundingSearch": { + "title": "Grounding mit Google Suche aktivieren", + "description": "Verbindet Gemini mit Echtzeit-Webdaten für genaue, aktuelle Antworten mit überprüfbaren Zitaten." + } + }, "googleCloudSetup": { "title": "Um Google Cloud Vertex AI zu verwenden, müssen Sie:", "step1": "1. Ein Google Cloud-Konto erstellen, die Vertex AI API aktivieren & die gewünschten Claude-Modelle aktivieren.", diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index 728e856502..30ccd11ba8 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -283,6 +283,34 @@ "cacheUsageNote": "Note: If you don't see cache usage, try selecting a different model and then selecting your desired model again.", "vscodeLmModel": "Language Model", "vscodeLmWarning": "Note: This is a very experimental integration and provider support will vary. If you get an error about a model not being supported, that's an issue on the provider's end.", + "geminiSections": { + "modelParameters": { + "title": "Output Controls", + "description": "Fine-tune topP, topK and maxOutputTokens" + } + }, + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "A low value makes the text more predictable, while a high value makes it more creative." + }, + "topP": { + "title": "Top P", + "description": "A low value leads to more focused text, while a high value results in more diverse outcomes." + }, + "maxOutputTokens": { + "title": "Max Output Tokens", + "description": "This parameter sets the maximum length of the response the model is allowed to generate." + }, + "urlContext": { + "title": "Enable URL Context", + "description": "Lets Gemini read linked pages to extract, compare, and synthesize their content into informed responses." + }, + "groundingSearch": { + "title": "Enable Grounding with Google Search", + "description": "Connects Gemini to real‑time web data for accurate, up‑to‑date answers with verifiable citations." + } + }, "googleCloudSetup": { "title": "To use Google Cloud Vertex AI, you need to:", "step1": "1. Create a Google Cloud account, enable the Vertex AI API & enable the desired Claude models.", diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index 5836933b46..224b423e4c 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -283,6 +283,34 @@ "cacheUsageNote": "Nota: Si no ve el uso del caché, intente seleccionar un modelo diferente y luego seleccionar nuevamente su modelo deseado.", "vscodeLmModel": "Modelo de lenguaje", "vscodeLmWarning": "Nota: Esta es una integración muy experimental y el soporte del proveedor variará. Si recibe un error sobre un modelo no compatible, es un problema del proveedor.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Controla el número de tokens con mayor probabilidad a considerar en cada paso. Valores más altos aumentan la diversidad, valores más bajos hacen que la salida sea más enfocada y determinista." + }, + "topP": { + "title": "Top P", + "description": "Controla la probabilidad acumulada de tokens a considerar (muestra de núcleo). Valores cercanos a 1.0 aumentan la diversidad, mientras que valores más bajos hacen que la salida sea más enfocada." + }, + "maxOutputTokens": { + "title": "Tokens máximos de salida", + "description": "Número máximo de tokens que el modelo puede generar en una sola respuesta. Valores más altos permiten respuestas más largas, pero aumentan el uso de tokens y los costos." + }, + "urlContext": { + "title": "Habilitar contexto de URL", + "description": "Permite que Gemini acceda y procese URLs para contexto adicional al generar respuestas. Útil para tareas que requieren análisis de contenido web." + }, + "groundingSearch": { + "title": "Habilitar grounding con búsqueda en Google", + "description": "Permite que Gemini busque en Google información actual y fundamente las respuestas en datos en tiempo real. Útil para consultas que requieren información actualizada." + } + }, + "geminiSections": { + "modelParameters": { + "title": "Configuración avanzada", + "description": "Ajusta la temperatura, top-p y otros parámetros avanzados." + } + }, "googleCloudSetup": { "title": "Para usar Google Cloud Vertex AI, necesita:", "step1": "1. Crear una cuenta de Google Cloud, habilitar la API de Vertex AI y habilitar los modelos Claude deseados.", diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index 833a789e5a..e0176f4d6c 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -283,6 +283,34 @@ "cacheUsageNote": "Remarque : Si vous ne voyez pas l'utilisation du cache, essayez de sélectionner un modèle différent puis de sélectionner à nouveau votre modèle souhaité.", "vscodeLmModel": "Modèle de langage", "vscodeLmWarning": "Remarque : Il s'agit d'une intégration très expérimentale et le support des fournisseurs variera. Si vous recevez une erreur concernant un modèle non pris en charge, c'est un problème du côté du fournisseur.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Contrôle le nombre de tokens les plus probables à considérer à chaque étape. Des valeurs plus élevées augmentent la diversité, des valeurs plus faibles rendent la sortie plus ciblée et déterministe." + }, + "topP": { + "title": "Top P", + "description": "Contrôle la probabilité cumulée des tokens à considérer (échantillonnage nucleus). Des valeurs proches de 1,0 augmentent la diversité, tandis que des valeurs plus faibles rendent la sortie plus ciblée." + }, + "maxOutputTokens": { + "title": "Nombre maximal de tokens de sortie", + "description": "Nombre maximal de tokens que le modèle peut générer dans une seule réponse. Des valeurs plus élevées permettent des réponses plus longues mais augmentent l'utilisation des tokens et les coûts." + }, + "urlContext": { + "title": "Activer le contexte d'URL", + "description": "Permet à Gemini d'accéder et de traiter les URL pour un contexte supplémentaire lors de la génération des réponses. Utile pour les tâches nécessitant l'analyse de contenu web." + }, + "groundingSearch": { + "title": "Activer la mise en contexte via la recherche Google", + "description": "Permet à Gemini d'effectuer des recherches sur Google pour obtenir des informations actuelles et fonder les réponses sur des données en temps réel. Utile pour les requêtes nécessitant des informations à jour." + } + }, + "geminiSections": { + "modelParameters": { + "title": "Paramètres avancés", + "description": "Ajustez la température, top-p et d'autres paramètres avancés." + } + }, "googleCloudSetup": { "title": "Pour utiliser Google Cloud Vertex AI, vous devez :", "step1": "1. Créer un compte Google Cloud, activer l'API Vertex AI et activer les modèles Claude souhaités.", diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index 0749943508..47474840ef 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -283,6 +283,34 @@ "cacheUsageNote": "नोट: यदि आप कैश उपयोग नहीं देखते हैं, तो एक अलग मॉडल चुनने का प्रयास करें और फिर अपने वांछित मॉडल को पुनः चुनें।", "vscodeLmModel": "भाषा मॉडल", "vscodeLmWarning": "नोट: यह एक बहुत ही प्रायोगिक एकीकरण है और प्रदाता समर्थन भिन्न होगा। यदि आपको किसी मॉडल के समर्थित न होने की त्रुटि मिलती है, तो यह प्रदाता की ओर से एक समस्या है।", + "geminiParameters": { + "topK": { + "title": "शीर्ष K", + "description": "प्रत्येक चरण के लिए विचार करने के लिए उच्चतम संभावना वाले टोकनों की संख्या को नियंत्रित करता है। उच्च मान विविधता बढ़ाते हैं, निम्न मान आउटपुट को अधिक केंद्रित और निर्धार्त बनाते हैं।" + }, + "topP": { + "title": "शीर्ष P", + "description": "समूह नमूना (न्यूक्लियस सैंपलिंग) में विचार करने के लिए टोकनों की संचयी संभावना को नियंत्रित करता है। 1.0 के करीब मान विविधता बढ़ाते हैं, जबकि निम्न मान आउटपुट को अधिक केंद्रित बनाते हैं।" + }, + "maxOutputTokens": { + "title": "अधिकतम आउटपुट टोकन", + "description": "मॉडल एकल प्रतिक्रिया में उत्पन्न कर सकने वाले अधिकतम टोकनों की संख्या। उच्च मान लंबी प्रतिक्रियाएं सक्षम करते हैं लेकिन टोकन उपयोग और लागत बढ़ाते हैं।" + }, + "urlContext": { + "title": "URL संदर्भ सक्षम करें", + "description": "जब प्रतिक्रियाएं उत्पन्न करता है, अतिरिक्त संदर्भ के लिए Gemini को URL तक पहुंचने और संसाधित करने की अनुमति देता है। वेब सामग्री विश्लेषण वाली कार्यों के लिए उपयोगी।" + }, + "groundingSearch": { + "title": "Google खोज के साथ ग्राउंडिंग सक्षम करें", + "description": "Gemini को वास्तविक समय के डेटा पर आधारित उत्तर प्रदान करने के लिए Google पर जानकारी खोजने और उत्तरों को ग्राउंड करने की अनुमति देता है। अद्यतित जानकारी की आवश्यकता वाली क्वेरीज़ के लिए उपयोगी।" + } + }, + "geminiSections": { + "modelParameters": { + "title": "उन्नत सेटिंग्स", + "description": "टेम्परेचर, टॉप-पी और अन्य उन्नत सेटिंग्स को फाइन-ट्यून करें।" + } + }, "googleCloudSetup": { "title": "Google Cloud Vertex AI का उपयोग करने के लिए, आपको आवश्यकता है:", "step1": "1. Google Cloud खाता बनाएं, Vertex AI API सक्षम करें और वांछित Claude मॉडल सक्षम करें।", diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index 4a0c51d39d..c7135ef32d 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -287,6 +287,34 @@ "cacheUsageNote": "Catatan: Jika kamu tidak melihat penggunaan cache, coba pilih model yang berbeda lalu pilih model yang kamu inginkan lagi.", "vscodeLmModel": "Model Bahasa", "vscodeLmWarning": "Catatan: Ini adalah integrasi yang sangat eksperimental dan dukungan provider akan bervariasi. Jika kamu mendapat error tentang model yang tidak didukung, itu adalah masalah di sisi provider.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Mengontrol jumlah token dengan probabilitas tertinggi yang dipertimbangkan pada setiap langkah. Nilai lebih tinggi meningkatkan keberagaman, nilai lebih rendah membuat keluaran lebih terfokus dan deterministik." + }, + "topP": { + "title": "Top P", + "description": "Mengontrol probabilitas kumulatif token yang dipertimbangkan (pengambilan sampel nucleus). Nilai mendekati 1.0 meningkatkan keberagaman, sedangkan nilai lebih rendah membuat keluaran lebih terfokus." + }, + "maxOutputTokens": { + "title": "Token Output Maksimum", + "description": "Jumlah maksimum token yang dapat dihasilkan model dalam satu respons. Nilai lebih tinggi memungkinkan respons lebih panjang tetapi meningkatkan penggunaan token dan biaya." + }, + "urlContext": { + "title": "Aktifkan Konteks URL", + "description": "Memungkinkan Gemini mengakses dan memproses URL untuk konteks tambahan saat menghasilkan respons. Berguna untuk tugas yang memerlukan analisis konten web." + }, + "groundingSearch": { + "title": "Aktifkan Grounding dengan Pencarian Google", + "description": "Memungkinkan Gemini mencari informasi terkini di Google dan mendasarkan respons pada data waktu nyata. Berguna untuk kueri yang memerlukan informasi terkini." + } + }, + "geminiSections": { + "modelParameters": { + "title": "Pengaturan Lanjutan", + "description": "Menyesuaikan suhu, top-p, dan pengaturan lanjutan lainnya." + } + }, "googleCloudSetup": { "title": "Untuk menggunakan Google Cloud Vertex AI, kamu perlu:", "step1": "1. Buat akun Google Cloud, aktifkan Vertex AI API & aktifkan model Claude yang diinginkan.", diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index 9d9be82868..a0170c2948 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -283,6 +283,34 @@ "cacheUsageNote": "Nota: Se non vedi l'utilizzo della cache, prova a selezionare un modello diverso e poi seleziona nuovamente il modello desiderato.", "vscodeLmModel": "Modello linguistico", "vscodeLmWarning": "Nota: Questa è un'integrazione molto sperimentale e il supporto del fornitore varierà. Se ricevi un errore relativo a un modello non supportato, si tratta di un problema del fornitore.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Controlla il numero di token con la massima probabilità da considerare ad ogni passaggio. Valori più alti aumentano la diversità, valori più bassi rendono l'output più focalizzato e deterministico." + }, + "topP": { + "title": "Top P", + "description": "Controlla la probabilità cumulativa dei token da considerare (campionamento nucleare). Valori prossimi a 1,0 aumentano la diversità, mentre valori più bassi rendono l'output più focalizzato." + }, + "maxOutputTokens": { + "title": "Token massimi di output", + "description": "Numero massimo di token che il modello può generare in una singola risposta. Valori più alti consentono risposte più lunghe ma aumentano l'utilizzo dei token e i costi." + }, + "urlContext": { + "title": "Abilita contesto URL", + "description": "Consente a Gemini di accedere e processare URL per contesto aggiuntivo durante la generazione delle risposte. Utile per attività che richiedono analisi di contenuti web." + }, + "groundingSearch": { + "title": "Abilita grounding con ricerca Google", + "description": "Consente a Gemini di cercare informazioni aggiornate su Google e basare le risposte su dati in tempo reale. Utile per query che richiedono informazioni aggiornate." + } + }, + "geminiSections": { + "modelParameters": { + "title": "Impostazioni avanzate", + "description": "Regola la temperatura, top-p e altre impostazioni avanzate." + } + }, "googleCloudSetup": { "title": "Per utilizzare Google Cloud Vertex AI, è necessario:", "step1": "1. Creare un account Google Cloud, abilitare l'API Vertex AI e abilitare i modelli Claude desiderati.", diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index 9fc03cbfb1..92d734eafe 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -283,6 +283,34 @@ "cacheUsageNote": "注意:キャッシュの使用が表示されない場合は、別のモデルを選択してから希望のモデルを再度選択してみてください。", "vscodeLmModel": "言語モデル", "vscodeLmWarning": "注意:これは非常に実験的な統合であり、プロバイダーのサポートは異なります。モデルがサポートされていないというエラーが表示された場合、それはプロバイダー側の問題です。", + "geminiParameters": { + "topK": { + "title": "トップK", + "description": "各ステップで考慮する最も確率の高いトークンの数を制御します。値を大きくすると多様性が増し、値を小さくすると出力がより集中して決定的になります。" + }, + "topP": { + "title": "トップP", + "description": "考慮するトークンの累積確率を制御します(ニュークリアスサンプリング)。1.0に近い値は多様性を高め、低い値は出力をより集中させます。" + }, + "maxOutputTokens": { + "title": "最大出力トークン数", + "description": "モデルが1つの応答で生成できる最大トークン数。値が大きいほど長い応答が可能になりますが、トークン使用量とコストが増加します。" + }, + "urlContext": { + "title": "URLコンテキストを有効にする", + "description": "応答を生成する際に、追加のコンテキストとしてGeminiがURLにアクセスして処理できるようにします。Webコンテンツの分析を必要とするタスクに役立ちます。" + }, + "groundingSearch": { + "title": "Google検索でのグラウンディングを有効にする", + "description": "GeminiがGoogleを検索して最新情報を取得し、リアルタイムデータに基づいて応答をグラウンディングできるようにします。最新情報が必要なクエリに便利です。" + } + }, + "geminiSections": { + "modelParameters": { + "title": "詳細設定", + "description": "温度、top-p、およびその他の詳細設定を調整します。" + } + }, "googleCloudSetup": { "title": "Google Cloud Vertex AIを使用するには:", "step1": "1. Google Cloudアカウントを作成し、Vertex AI APIを有効にして、希望するClaudeモデルを有効にします。", diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index 219daa05a4..46ebc3e4d0 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -283,6 +283,34 @@ "cacheUsageNote": "참고: 캐시 사용이 표시되지 않는 경우, 다른 모델을 선택한 다음 원하는 모델을 다시 선택해 보세요.", "vscodeLmModel": "언어 모델", "vscodeLmWarning": "참고: 이는 매우 실험적인 통합이며, 공급자 지원은 다를 수 있습니다. 모델이 지원되지 않는다는 오류가 발생하면, 이는 공급자 측의 문제입니다.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "각 단계에서 고려할 최고 확률 토큰의 수를 제어합니다. 값이 높을수록 다양성이 증가하고, 값이 낮을수록 출력이 더 집중되고 결정적이 됩니다." + }, + "topP": { + "title": "Top P", + "description": "고려할 토큰의 누적 확률을 제어합니다(중앙 샘플링). 1.0에 가까운 값은 다양성을 높이고, 낮은 값은 출력을 더 집중되게 합니다." + }, + "maxOutputTokens": { + "title": "최대 출력 토큰", + "description": "모델이 하나의 응답에서 생성할 수 있는 최대 토큰 수입니다. 높은 값은 더 긴 응답을 허용하지만 토큰 사용량과 비용을 증가시킵니다." + }, + "urlContext": { + "title": "URL 컨텍스트 활성화", + "description": "응답을 생성할 때 추가 컨텍스트를 위해 Gemini가 URL에 액세스하고 처리할 수 있도록 합니다. 웹 콘텐츠 분석이 필요한 작업에 유용합니다." + }, + "groundingSearch": { + "title": "Google 검색과 함께 근거 지정 활성화", + "description": "Gemini가 최신 정보를 얻기 위해 Google을 검색하고 응답을 실시간 데이터에 근거하도록 합니다. 최신 정보가 필요한 쿼리에 유용합니다." + } + }, + "geminiSections": { + "modelParameters": { + "title": "고급 설정", + "description": "온도, top-p 및 기타 고급 설정을 조정합니다." + } + }, "googleCloudSetup": { "title": "Google Cloud Vertex AI를 사용하려면:", "step1": "1. Google Cloud 계정을 만들고, Vertex AI API를 활성화하고, 원하는 Claude 모델을 활성화하세요.", diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index e184f4d85e..2f7c799e23 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -283,6 +283,34 @@ "cacheUsageNote": "Let op: als je geen cachegebruik ziet, probeer dan een ander model te selecteren en vervolgens weer je gewenste model.", "vscodeLmModel": "Taalmodel", "vscodeLmWarning": "Let op: dit is een zeer experimentele integratie en ondersteuning door providers kan variëren. Krijg je een foutmelding dat een model niet wordt ondersteund, dan ligt dat aan de provider.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Beheert het aantal tokens met de hoogste waarschijnlijkheid dat in elke stap wordt overwogen. Hogere waarden vergroten de diversiteit, lagere waarden maken de uitvoer meer gefocust en deterministisch." + }, + "topP": { + "title": "Top P", + "description": "Beheert de cumulatieve kans van tokens om te overwegen (nucleus-sampling). Waarden dicht bij 1,0 vergroten de diversiteit, terwijl lagere waarden de uitvoer meer gefocust maken." + }, + "maxOutputTokens": { + "title": "Maximale uitvoertokens", + "description": "Maximaal aantal tokens dat het model in één antwoord kan genereren. Hogere waarden maken langere antwoorden mogelijk, maar verhogen het tokengebruik en de kosten." + }, + "urlContext": { + "title": "URL-context inschakelen", + "description": "Staat Gemini toe om URL's te openen en te verwerken voor extra context bij het genereren van antwoorden. Handig voor taken die webinhoudsanalyse vereisen." + }, + "groundingSearch": { + "title": "Grounding met Google-zoekopdracht inschakelen", + "description": "Staat Gemini toe om Google te doorzoeken voor actuele informatie en antwoorden op realtime gegevens te baseren. Handig voor vragen die actuele informatie vereisen." + } + }, + "geminiSections": { + "modelParameters": { + "title": "Geavanceerde instellingen", + "description": "Pas de temperatuur, top-p en andere geavanceerde instellingen aan." + } + }, "googleCloudSetup": { "title": "Om Google Cloud Vertex AI te gebruiken, moet je:", "step1": "1. Maak een Google Cloud-account aan, schakel de Vertex AI API in en activeer de gewenste Claude-modellen.", diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index 23d3ce707d..11163b1666 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -283,6 +283,34 @@ "cacheUsageNote": "Uwaga: Jeśli nie widzisz użycia bufora, spróbuj wybrać inny model, a następnie ponownie wybrać żądany model.", "vscodeLmModel": "Model językowy", "vscodeLmWarning": "Uwaga: To bardzo eksperymentalna integracja, a wsparcie dostawcy może się różnić. Jeśli otrzymasz błąd dotyczący nieobsługiwanego modelu, jest to problem po stronie dostawcy.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Kontroluje liczbę tokenów o najwyższym prawdopodobieństwie rozważanych na każdym kroku. Wyższe wartości zwiększają różnorodność, niższe wartości powodują bardziej skupione i deterministyczne wyniki." + }, + "topP": { + "title": "Top P", + "description": "Kontroluje skumulowane prawdopodobieństwo tokenów do rozważenia (nucleus sampling). Wartości bliskie 1.0 zwiększają różnorodność, a niższe wartości sprawiają, że wyjście jest bardziej skupione." + }, + "maxOutputTokens": { + "title": "Maksymalna liczba tokenów wyjściowych", + "description": "Maksymalna liczba tokenów, które model może wygenerować w jednej odpowiedzi. Wyższe wartości umożliwiają dłuższe odpowiedzi, ale zwiększają użycie tokenów i koszty." + }, + "urlContext": { + "title": "Włącz kontekst URL", + "description": "Pozwala Gemini uzyskiwać dostęp i przetwarzać adresy URL w celu uzyskania dodatkowego kontekstu podczas generowania odpowiedzi. Przydatne w zadaniach wymagających analizy zawartości sieci Web." + }, + "groundingSearch": { + "title": "Włącz grounding przy użyciu wyszukiwarki Google", + "description": "Pozwala Gemini przeszukiwać Google w celu uzyskania aktualnych informacji i opierać odpowiedzi na danych w czasie rzeczywistym. Przydatne w zapytaniach wymagających najnowszych informacji." + } + }, + "geminiSections": { + "modelParameters": { + "title": "Ustawienia zaawansowane", + "description": "Dostosuj temperaturę, top-p i inne zaawansowane ustawienia." + } + }, "googleCloudSetup": { "title": "Aby korzystać z Google Cloud Vertex AI, potrzebujesz:", "step1": "1. Utworzyć konto Google Cloud, włączyć API Vertex AI i włączyć żądane modele Claude.", diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index 102036622c..dd5955d488 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -283,6 +283,34 @@ "cacheUsageNote": "Nota: Se você não vir o uso do cache, tente selecionar um modelo diferente e depois selecionar novamente o modelo desejado.", "vscodeLmModel": "Modelo de Linguagem", "vscodeLmWarning": "Nota: Esta é uma integração muito experimental e o suporte do provedor pode variar. Se você receber um erro sobre um modelo não ser suportado, isso é um problema do lado do provedor.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Controla o número de tokens com maior probabilidade a considerar em cada etapa. Valores mais altos aumentam a diversidade, valores mais baixos tornam a saída mais focada e determinística." + }, + "topP": { + "title": "Top P", + "description": "Controla a probabilidade cumulativa de tokens a considerar (amostragem de núcleo). Valores próximos a 1,0 aumentam a diversidade, enquanto valores mais baixos tornam a saída mais focada." + }, + "maxOutputTokens": { + "title": "Tokens máximos de saída", + "description": "Número máximo de tokens que o modelo pode gerar em uma única resposta. Valores mais altos permitem respostas mais longas, mas aumentam o uso de tokens e os custos." + }, + "urlContext": { + "title": "Ativar contexto de URL", + "description": "Permite que o Gemini acesse e processe URLs para contexto adicional ao gerar respostas. Útil para tarefas que exijam análise de conteúdo da web." + }, + "groundingSearch": { + "title": "Ativar grounding com pesquisa no Google", + "description": "Permite que o Gemini pesquise informações atuais no Google e fundamente as respostas em dados em tempo real. Útil para consultas que requerem informações atualizadas." + } + }, + "geminiSections": { + "modelParameters": { + "title": "Configurações Avançadas", + "description": "Ajuste a temperatura, top-p e outras configurações avançadas." + } + }, "googleCloudSetup": { "title": "Para usar o Google Cloud Vertex AI, você precisa:", "step1": "1. Criar uma conta Google Cloud, ativar a API Vertex AI e ativar os modelos Claude desejados.", diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index 5952dd8c89..ec4a3861f4 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -283,6 +283,34 @@ "cacheUsageNote": "Примечание: если вы не видите использование кэша, попробуйте выбрать другую модель, а затем вернуться к нужной.", "vscodeLmModel": "Языковая модель", "vscodeLmWarning": "Внимание: это очень экспериментальная интеграция, поддержка провайдера может отличаться. Если возникает ошибка о неподдерживаемой модели — проблема на стороне провайдера.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Управляет количеством токенов с наивысшей вероятностью, которые учитываются на каждом шаге. Более высокие значения увеличивают разнообразие, более низкие делают вывод более сфокусированным и детерминированным." + }, + "topP": { + "title": "Top P", + "description": "Управляет накопительной вероятностью токенов для рассмотрения (nucleus sampling). Значения ближе к 1,0 повышают разнообразие, в то время как более низкие значения делают вывод более сфокусированным." + }, + "maxOutputTokens": { + "title": "Максимальное количество токенов вывода", + "description": "Максимальное количество токенов, которое модель может сгенерировать в одном ответе. Более высокие значения позволяют генерировать более длинные ответы, но увеличивают использование токенов и стоимость." + }, + "urlContext": { + "title": "Включить контекст URL", + "description": "Позволяет Gemini получать доступ к URL-адресам и обрабатывать их для дополнительного контекста при генерации ответов. Полезно для задач, требующих анализа веб-контента." + }, + "groundingSearch": { + "title": "Включить grounding через поиск Google", + "description": "Позволяет Gemini искать актуальную информацию в Google и основывать ответы на данных в реальном времени. Полезно для запросов, требующих актуальной информации." + } + }, + "geminiSections": { + "modelParameters": { + "title": "Расширенные настройки", + "description": "Настройте температуру, top-p и другие расширенные параметры." + } + }, "googleCloudSetup": { "title": "Для использования Google Cloud Vertex AI необходимо:", "step1": "1. Создайте аккаунт Google Cloud, включите Vertex AI API и нужные модели Claude.", diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 625ca4d5ea..27fcb1d682 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -283,6 +283,34 @@ "cacheUsageNote": "Not: Önbellek kullanımını görmüyorsanız, farklı bir model seçip ardından istediğiniz modeli tekrar seçmeyi deneyin.", "vscodeLmModel": "Dil Modeli", "vscodeLmWarning": "Not: Bu çok deneysel bir entegrasyondur ve sağlayıcı desteği değişebilir. Bir modelin desteklenmediğine dair bir hata alırsanız, bu sağlayıcı tarafındaki bir sorundur.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Her adım için dikkate alınacak en yüksek olasılıklı token sayısını kontrol eder. Daha yüksek değerler çeşitliliği artırır, daha düşük değerler çıktıyı daha odaklı ve belirleyici yapar." + }, + "topP": { + "title": "Top P", + "description": "Düşünülecek token'ların kümülatif olasılığını kontrol eder (nucleus sampling). 1,0'a yakın değerler çeşitliliği artırırken, daha düşük değerler çıktıyı daha odaklı hale getirir." + }, + "maxOutputTokens": { + "title": "Maksimum Çıkış Tokenleri", + "description": "Modelin tek bir yanıtta oluşturabileceği maksimum token sayısını kontrol eder. Daha yüksek değerler daha uzun yanıtlar sağlar, ancak token kullanımı ve maliyetleri artırır." + }, + "urlContext": { + "title": "URL Bağlamını Etkinleştir", + "description": "Yanıtlar oluşturulurken ek bağlam için Gemini'nin URL'lere erişmesine ve işlemesine izin verir. Web içeriği analizi gerektiren görevler için faydalıdır." + }, + "groundingSearch": { + "title": "Google Aramasıyla Grounding Etkinleştir", + "description": "Gemini'nin güncel bilgileri almak için Google'da arama yapmasına ve yanıtları gerçek zamanlı verilere dayandırmasına izin verir. Güncel bilgi gerektiren sorgular için kullanışlıdır." + } + }, + "geminiSections": { + "modelParameters": { + "title": "Gelişmiş Ayarlar", + "description": "Sıcaklık, top-p ve diğer gelişmiş ayarları yapın." + } + }, "googleCloudSetup": { "title": "Google Cloud Vertex AI'yi kullanmak için şunları yapmanız gerekir:", "step1": "1. Google Cloud hesabı oluşturun, Vertex AI API'sini etkinleştirin ve istediğiniz Claude modellerini etkinleştirin.", diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index 52a9db5b93..c83a10d4a5 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -283,6 +283,34 @@ "cacheUsageNote": "Lưu ý: Nếu bạn không thấy việc sử dụng bộ nhớ đệm, hãy thử chọn một mô hình khác và sau đó chọn lại mô hình mong muốn của bạn.", "vscodeLmModel": "Mô hình ngôn ngữ", "vscodeLmWarning": "Lưu ý: Đây là tích hợp thử nghiệm và hỗ trợ nhà cung cấp có thể khác nhau. Nếu bạn nhận được lỗi về mô hình không được hỗ trợ, đó là vấn đề từ phía nhà cung cấp.", + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "Điều chỉnh số lượng token có xác suất cao nhất được xem xét cho mỗi bước. Giá trị cao hơn tăng tính đa dạng, giá trị thấp hơn khiến đầu ra tập trung hơn và mang tính xác định." + }, + "topP": { + "title": "Top P", + "description": "Điều chỉnh xác suất tích lũy của các token được xem xét (lấy mẫu nucleus). Giá trị gần 1.0 tăng tính đa dạng, trong khi giá trị thấp hơn khiến đầu ra tập trung hơn." + }, + "maxOutputTokens": { + "title": "Token đầu ra tối đa", + "description": "Số lượng token tối đa mà mô hình có thể tạo ra trong một phản hồi. Giá trị cao hơn cho phép phản hồi dài hơn nhưng tăng mức sử dụng token và chi phí." + }, + "urlContext": { + "title": "Bật ngữ cảnh URL", + "description": "Cho phép Gemini truy cập và xử lý URL để có thêm ngữ cảnh khi tạo phản hồi. Hữu ích cho các tác vụ yêu cầu phân tích nội dung web." + }, + "groundingSearch": { + "title": "Bật grounding với tìm kiếm Google", + "description": "Cho phép Gemini tìm kiếm trên Google để lấy thông tin mới nhất và căn cứ phản hồi dựa trên dữ liệu thời gian thực. Hữu ích cho các truy vấn yêu cầu thông tin cập nhật." + } + }, + "geminiSections": { + "modelParameters": { + "title": "Cài đặt nâng cao", + "description": "Điều chỉnh nhiệt độ, top-p và các cài đặt nâng cao khác." + } + }, "googleCloudSetup": { "title": "Để sử dụng Google Cloud Vertex AI, bạn cần:", "step1": "1. Tạo tài khoản Google Cloud, kích hoạt Vertex AI API và kích hoạt các mô hình Claude mong muốn.", diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index 151ee9e744..57c966ad77 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -283,6 +283,34 @@ "cacheUsageNote": "提示:若未显示缓存使用情况,请切换模型后重新选择", "vscodeLmModel": "VSCode LM 模型", "vscodeLmWarning": "注意:这是一个非常实验性的集成,提供商支持会有所不同。如果您收到有关不支持模型的错误,则这是提供商方面的问题。", + "geminiSections": { + "modelParameters": { + "title": "输出控制", + "description": "微调 topP、topK 和 maxOutputTokens" + } + }, + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "较低的值使文本更具可预测性,而较高的值使其更具创造性。" + }, + "topP": { + "title": "Top P", + "description": "较低的值可以使文本更集中,而较高的值可以产生更多样化的结果。" + }, + "maxOutputTokens": { + "title": "最大输出 Token", + "description": "此参数设置模型允许生成的响应的最大长度。" + }, + "urlContext": { + "title": "启用 URL 上下文", + "description": "让 Gemini 读取链接的页面以提取、比较和综合其内容,从而提供明智的答复。" + }, + "groundingSearch": { + "title": "启用 Google 搜索基础", + "description": "将 Gemini 连接到实时网络数据,以获得包含可验证引用的准确、最新的答案。" + } + }, "googleCloudSetup": { "title": "要使用 Google Cloud Vertex AI,您需要:", "step1": "1. 注册Google Cloud账号并启用Vertex AI API", diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index ab4caeea5b..313c87f473 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -283,6 +283,34 @@ "cacheUsageNote": "注意:如果您沒有看到快取使用情況,請嘗試選擇其他模型,然後重新選擇您想要的模型。", "vscodeLmModel": "語言模型", "vscodeLmWarning": "注意:此整合功能仍處於實驗階段,各供應商的支援程度可能不同。如果出現模型不支援的錯誤,通常是供應商方面的問題。", + "geminiSections": { + "modelParameters": { + "title": "輸出控制", + "description": "微調 topP、topK 和 maxOutputTokens" + } + }, + "geminiParameters": { + "topK": { + "title": "Top K", + "description": "較低的值使文字更具可預測性,而較高的值使其更具創造性。" + }, + "topP": { + "title": "Top P", + "description": "較低的值可以使文字更集中,而較高的值可以產生更多樣化的結果。" + }, + "maxOutputTokens": { + "title": "最大輸出 Token", + "description": "此參數設定模型允許產生的回應的最大長度。" + }, + "urlContext": { + "title": "啟用 URL 上下文", + "description": "讓 Gemini 讀取連結的頁面以提取、比較和綜合其內容,從而提供明智的答覆。" + }, + "groundingSearch": { + "title": "啟用 Google 搜尋基礎", + "description": "將 Gemini 連接到即時網路數據,以獲得包含可驗證引用的準確、最新的答案。" + } + }, "googleCloudSetup": { "title": "要使用 Google Cloud Vertex AI,您需要:", "step1": "1. 建立 Google Cloud 帳戶,啟用 Vertex AI API 並啟用所需的 Claude 模型。",