diff --git a/src/api/index.ts b/src/api/index.ts index c6d2b07cd2..ef8f99b7e7 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -21,6 +21,7 @@ import { UnboundHandler } from "./providers/unbound" import { RequestyHandler } from "./providers/requesty" import { HumanRelayHandler } from "./providers/human-relay" import { FakeAIHandler } from "./providers/fake-ai" +import { XAIHandler } from "./providers/xai" export interface SingleCompletionHandler { completePrompt(prompt: string): Promise @@ -78,6 +79,8 @@ export function buildApiHandler(configuration: ApiConfiguration): ApiHandler { return new HumanRelayHandler(options) case "fake-ai": return new FakeAIHandler(options) + case "xai": + return new XAIHandler(options) default: return new AnthropicHandler(options) } diff --git a/src/api/providers/__tests__/xai.test.ts b/src/api/providers/__tests__/xai.test.ts new file mode 100644 index 0000000000..f17e75277c --- /dev/null +++ b/src/api/providers/__tests__/xai.test.ts @@ -0,0 +1,292 @@ +import { XAIHandler } from "../xai" +import { xaiDefaultModelId, xaiModels } from "../../../shared/api" +import OpenAI from "openai" +import { Anthropic } from "@anthropic-ai/sdk" + +// Mock OpenAI client +jest.mock("openai", () => { + const createMock = jest.fn() + return jest.fn(() => ({ + chat: { + completions: { + create: createMock, + }, + }, + })) +}) + +describe("XAIHandler", () => { + let handler: XAIHandler + let mockCreate: jest.Mock + + beforeEach(() => { + // Reset all mocks + jest.clearAllMocks() + + // Get the mock create function + mockCreate = (OpenAI as unknown as jest.Mock)().chat.completions.create + + // Create handler with mock + handler = new XAIHandler({}) + }) + + test("should use the correct X.AI base URL", () => { + expect(OpenAI).toHaveBeenCalledWith( + expect.objectContaining({ + baseURL: "https://api.x.ai/v1", + }), + ) + }) + + test("should use the provided API key", () => { + // Clear mocks before this specific test + jest.clearAllMocks() + + // Create a handler with our API key + const xaiApiKey = "test-api-key" + new XAIHandler({ xaiApiKey }) + + // Verify the OpenAI constructor was called with our API key + expect(OpenAI).toHaveBeenCalledWith( + expect.objectContaining({ + apiKey: xaiApiKey, + }), + ) + }) + + test("should return default model when no model is specified", () => { + const model = handler.getModel() + expect(model.id).toBe(xaiDefaultModelId) + expect(model.info).toEqual(xaiModels[xaiDefaultModelId]) + }) + + test("should return specified model when valid model is provided", () => { + const testModelId = "grok-2-latest" + const handlerWithModel = new XAIHandler({ apiModelId: testModelId }) + const model = handlerWithModel.getModel() + + expect(model.id).toBe(testModelId) + expect(model.info).toEqual(xaiModels[testModelId]) + }) + + test("should include reasoning_effort parameter for mini models", async () => { + const miniModelHandler = new XAIHandler({ + apiModelId: "grok-3-mini-beta", + reasoningEffort: "high", + }) + + // Setup mock for streaming response + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + async next() { + return { done: true } + }, + }), + } + }) + + // Start generating a message + const messageGenerator = miniModelHandler.createMessage("test prompt", []) + await messageGenerator.next() // Start the generator + + // Check that reasoning_effort was included + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + reasoning_effort: "high", + }), + ) + }) + + test("should not include reasoning_effort parameter for non-mini models", async () => { + const regularModelHandler = new XAIHandler({ + apiModelId: "grok-2-latest", + reasoningEffort: "high", + }) + + // Setup mock for streaming response + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + async next() { + return { done: true } + }, + }), + } + }) + + // Start generating a message + const messageGenerator = regularModelHandler.createMessage("test prompt", []) + await messageGenerator.next() // Start the generator + + // Check call args for reasoning_effort + const calls = mockCreate.mock.calls + const lastCall = calls[calls.length - 1][0] + expect(lastCall).not.toHaveProperty("reasoning_effort") + }) + + test("completePrompt method should return text from OpenAI API", async () => { + const expectedResponse = "This is a test response" + + mockCreate.mockResolvedValueOnce({ + choices: [ + { + message: { + content: expectedResponse, + }, + }, + ], + }) + + const result = await handler.completePrompt("test prompt") + expect(result).toBe(expectedResponse) + }) + + test("should handle errors in completePrompt", async () => { + const errorMessage = "API error" + mockCreate.mockRejectedValueOnce(new Error(errorMessage)) + + await expect(handler.completePrompt("test prompt")).rejects.toThrow(`xAI completion error: ${errorMessage}`) + }) + + test("createMessage should yield text content from stream", async () => { + const testContent = "This is test content" + + // Setup mock for streaming response + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + next: jest + .fn() + .mockResolvedValueOnce({ + done: false, + value: { + choices: [{ delta: { content: testContent } }], + }, + }) + .mockResolvedValueOnce({ done: true }), + }), + } + }) + + // Create and consume the stream + const stream = handler.createMessage("system prompt", []) + const firstChunk = await stream.next() + + // Verify the content + expect(firstChunk.done).toBe(false) + expect(firstChunk.value).toEqual({ + type: "text", + text: testContent, + }) + }) + + test("createMessage should yield reasoning content from stream", async () => { + const testReasoning = "Test reasoning content" + + // Setup mock for streaming response + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + next: jest + .fn() + .mockResolvedValueOnce({ + done: false, + value: { + choices: [{ delta: { reasoning_content: testReasoning } }], + }, + }) + .mockResolvedValueOnce({ done: true }), + }), + } + }) + + // Create and consume the stream + const stream = handler.createMessage("system prompt", []) + const firstChunk = await stream.next() + + // Verify the reasoning content + expect(firstChunk.done).toBe(false) + expect(firstChunk.value).toEqual({ + type: "reasoning", + text: testReasoning, + }) + }) + + test("createMessage should yield usage data from stream", async () => { + // Setup mock for streaming response that includes usage data + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + next: jest + .fn() + .mockResolvedValueOnce({ + done: false, + value: { + choices: [{ delta: {} }], // Needs to have choices array to avoid error + usage: { + prompt_tokens: 10, + completion_tokens: 20, + cache_read_input_tokens: 5, + cache_creation_input_tokens: 15, + }, + }, + }) + .mockResolvedValueOnce({ done: true }), + }), + } + }) + + // Create and consume the stream + const stream = handler.createMessage("system prompt", []) + const firstChunk = await stream.next() + + // Verify the usage data + expect(firstChunk.done).toBe(false) + expect(firstChunk.value).toEqual({ + type: "usage", + inputTokens: 10, + outputTokens: 20, + cacheReadTokens: 5, + cacheWriteTokens: 15, + }) + }) + + test("createMessage should pass correct parameters to OpenAI client", async () => { + // Setup a handler with specific model + const modelId = "grok-2-latest" + const modelInfo = xaiModels[modelId] + const handlerWithModel = new XAIHandler({ apiModelId: modelId }) + + // Setup mock for streaming response + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + async next() { + return { done: true } + }, + }), + } + }) + + // System prompt and messages + const systemPrompt = "Test system prompt" + const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message" }] + + // Start generating a message + const messageGenerator = handlerWithModel.createMessage(systemPrompt, messages) + await messageGenerator.next() // Start the generator + + // Check that all parameters were passed correctly + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + model: modelId, + max_tokens: modelInfo.maxTokens, + temperature: 0, + messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]), + stream: true, + stream_options: { include_usage: true }, + }), + ) + }) +}) diff --git a/src/api/providers/constants.ts b/src/api/providers/constants.ts index 86ca71746e..bda1706728 100644 --- a/src/api/providers/constants.ts +++ b/src/api/providers/constants.ts @@ -1,3 +1,12 @@ +export const DEFAULT_HEADERS = { + "HTTP-Referer": "https://github.com/RooVetGit/Roo-Cline", + "X-Title": "Roo Code", +} + export const ANTHROPIC_DEFAULT_MAX_TOKENS = 8192 export const DEEP_SEEK_DEFAULT_TEMPERATURE = 0.6 + +export const AZURE_AI_INFERENCE_PATH = "/models/chat/completions" + +export const REASONING_MODELS = new Set(["x-ai/grok-3-mini-beta", "grok-3-mini-beta", "grok-3-mini-fast-beta"]) diff --git a/src/api/providers/openai.ts b/src/api/providers/openai.ts index 96984d90c1..ab9897c8b0 100644 --- a/src/api/providers/openai.ts +++ b/src/api/providers/openai.ts @@ -15,17 +15,10 @@ import { convertToSimpleMessages } from "../transform/simple-format" import { ApiStream, ApiStreamUsageChunk } from "../transform/stream" import { BaseProvider } from "./base-provider" import { XmlMatcher } from "../../utils/xml-matcher" -import { DEEP_SEEK_DEFAULT_TEMPERATURE } from "./constants" - -export const defaultHeaders = { - "HTTP-Referer": "https://github.com/RooVetGit/Roo-Cline", - "X-Title": "Roo Code", -} +import { DEEP_SEEK_DEFAULT_TEMPERATURE, DEFAULT_HEADERS, AZURE_AI_INFERENCE_PATH } from "./constants" export interface OpenAiHandlerOptions extends ApiHandlerOptions {} -const AZURE_AI_INFERENCE_PATH = "/models/chat/completions" - export class OpenAiHandler extends BaseProvider implements SingleCompletionHandler { protected options: OpenAiHandlerOptions private client: OpenAI @@ -45,7 +38,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl this.client = new OpenAI({ baseURL, apiKey, - defaultHeaders, + defaultHeaders: DEFAULT_HEADERS, defaultQuery: { "api-version": this.options.azureApiVersion || "2024-05-01-preview" }, }) } else if (isAzureOpenAi) { @@ -56,7 +49,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl apiKey, apiVersion: this.options.azureApiVersion || azureOpenAiDefaultApiVersion, defaultHeaders: { - ...defaultHeaders, + ...DEFAULT_HEADERS, ...(this.options.openAiHostHeader ? { Host: this.options.openAiHostHeader } : {}), }, }) @@ -65,7 +58,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl baseURL, apiKey, defaultHeaders: { - ...defaultHeaders, + ...DEFAULT_HEADERS, ...(this.options.openAiHostHeader ? { Host: this.options.openAiHostHeader } : {}), }, }) diff --git a/src/api/providers/openrouter.ts b/src/api/providers/openrouter.ts index 2a279d09a1..665d87542b 100644 --- a/src/api/providers/openrouter.ts +++ b/src/api/providers/openrouter.ts @@ -9,10 +9,9 @@ import { convertToOpenAiMessages } from "../transform/openai-format" import { ApiStreamChunk, ApiStreamUsageChunk } from "../transform/stream" import { convertToR1Format } from "../transform/r1-format" -import { DEEP_SEEK_DEFAULT_TEMPERATURE } from "./constants" +import { DEFAULT_HEADERS, DEEP_SEEK_DEFAULT_TEMPERATURE } from "./constants" import { getModelParams, SingleCompletionHandler } from ".." import { BaseProvider } from "./base-provider" -import { defaultHeaders } from "./openai" const OPENROUTER_DEFAULT_PROVIDER_NAME = "[default]" @@ -40,7 +39,7 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH const baseURL = this.options.openRouterBaseUrl || "https://openrouter.ai/api/v1" const apiKey = this.options.openRouterApiKey ?? "not-provided" - this.client = new OpenAI({ baseURL, apiKey, defaultHeaders }) + this.client = new OpenAI({ baseURL, apiKey, defaultHeaders: DEFAULT_HEADERS }) } override async *createMessage( diff --git a/src/api/providers/xai.ts b/src/api/providers/xai.ts new file mode 100644 index 0000000000..9da02330e9 --- /dev/null +++ b/src/api/providers/xai.ts @@ -0,0 +1,110 @@ +import { Anthropic } from "@anthropic-ai/sdk" +import OpenAI from "openai" +import { ApiHandlerOptions, XAIModelId, ModelInfo, xaiDefaultModelId, xaiModels } from "../../shared/api" +import { ApiStream } from "../transform/stream" +import { convertToOpenAiMessages } from "../transform/openai-format" +import { DEFAULT_HEADERS, REASONING_MODELS } from "./constants" +import { BaseProvider } from "./base-provider" +import { SingleCompletionHandler } from ".." + +const XAI_DEFAULT_TEMPERATURE = 0 + +export class XAIHandler extends BaseProvider implements SingleCompletionHandler { + protected options: ApiHandlerOptions + private client: OpenAI + + constructor(options: ApiHandlerOptions) { + super() + this.options = options + this.client = new OpenAI({ + baseURL: "https://api.x.ai/v1", + apiKey: this.options.xaiApiKey ?? "not-provided", + defaultHeaders: DEFAULT_HEADERS, + }) + } + + override getModel() { + // Determine which model ID to use (specified or default) + const id = + this.options.apiModelId && this.options.apiModelId in xaiModels + ? (this.options.apiModelId as XAIModelId) + : xaiDefaultModelId + + // Check if reasoning effort applies to this model + const supportsReasoning = REASONING_MODELS.has(id) + + return { + id, + info: xaiModels[id], + reasoningEffort: supportsReasoning ? this.options.reasoningEffort : undefined, + } + } + + override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { + const { id: modelId, info: modelInfo, reasoningEffort } = this.getModel() + + // Use the OpenAI-compatible API. + const stream = await this.client.chat.completions.create({ + model: modelId, + max_tokens: modelInfo.maxTokens, + temperature: this.options.modelTemperature ?? XAI_DEFAULT_TEMPERATURE, + messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)], + stream: true, + stream_options: { include_usage: true }, + ...(reasoningEffort ? { reasoning_effort: reasoningEffort } : {}), + }) + + for await (const chunk of stream) { + const delta = chunk.choices[0]?.delta + + if (delta?.content) { + yield { + type: "text", + text: delta.content, + } + } + + if (delta && "reasoning_content" in delta && delta.reasoning_content) { + yield { + type: "reasoning", + text: delta.reasoning_content as string, + } + } + + if (chunk.usage) { + yield { + type: "usage", + inputTokens: chunk.usage.prompt_tokens || 0, + outputTokens: chunk.usage.completion_tokens || 0, + // X.AI might include these fields in the future, handle them if present. + cacheReadTokens: + "cache_read_input_tokens" in chunk.usage ? (chunk.usage as any).cache_read_input_tokens : 0, + cacheWriteTokens: + "cache_creation_input_tokens" in chunk.usage + ? (chunk.usage as any).cache_creation_input_tokens + : 0, + } + } + } + } + + async completePrompt(prompt: string): Promise { + const { id: modelId, reasoningEffort } = this.getModel() + + try { + const response = await this.client.chat.completions.create({ + model: modelId, + messages: [{ role: "user", content: prompt }], + ...(reasoningEffort ? { reasoning_effort: reasoningEffort } : {}), + }) + + return response.choices[0]?.message.content || "" + } catch (error) { + if (error instanceof Error) { + throw new Error(`xAI completion error: ${error.message}`) + } + + throw error + } + } +} diff --git a/src/exports/roo-code.d.ts b/src/exports/roo-code.d.ts index 8a62e412f6..85f7e8733e 100644 --- a/src/exports/roo-code.d.ts +++ b/src/exports/roo-code.d.ts @@ -20,6 +20,7 @@ type ProviderSettings = { | "requesty" | "human-relay" | "fake-ai" + | "xai" ) | undefined apiModelId?: string | undefined @@ -176,6 +177,7 @@ type ProviderSettings = { cachableFields?: string[] | undefined } | null) | undefined + xaiApiKey?: string | undefined modelMaxTokens?: number | undefined modelMaxThinkingTokens?: number | undefined includeMaxTokens?: boolean | undefined @@ -212,6 +214,7 @@ type GlobalSettings = { | "requesty" | "human-relay" | "fake-ai" + | "xai" ) | undefined }[] diff --git a/src/exports/types.ts b/src/exports/types.ts index ba3f82b26b..e301f7bfd0 100644 --- a/src/exports/types.ts +++ b/src/exports/types.ts @@ -21,6 +21,7 @@ type ProviderSettings = { | "requesty" | "human-relay" | "fake-ai" + | "xai" ) | undefined apiModelId?: string | undefined @@ -177,6 +178,7 @@ type ProviderSettings = { cachableFields?: string[] | undefined } | null) | undefined + xaiApiKey?: string | undefined modelMaxTokens?: number | undefined modelMaxThinkingTokens?: number | undefined includeMaxTokens?: boolean | undefined @@ -215,6 +217,7 @@ type GlobalSettings = { | "requesty" | "human-relay" | "fake-ai" + | "xai" ) | undefined }[] diff --git a/src/schemas/index.ts b/src/schemas/index.ts index 2d71df0533..aeab4f0703 100644 --- a/src/schemas/index.ts +++ b/src/schemas/index.ts @@ -28,6 +28,7 @@ export const providerNames = [ "requesty", "human-relay", "fake-ai", + "xai", ] as const export const providerNamesSchema = z.enum(providerNames) @@ -380,6 +381,8 @@ export const providerSettingsSchema = z.object({ requestyApiKey: z.string().optional(), requestyModelId: z.string().optional(), requestyModelInfo: modelInfoSchema.nullish(), + // X.AI (Grok) + xaiApiKey: z.string().optional(), // Claude 3.7 Sonnet Thinking modelMaxTokens: z.number().optional(), modelMaxThinkingTokens: z.number().optional(), @@ -483,6 +486,8 @@ const providerSettingsRecord: ProviderSettingsRecord = { fuzzyMatchThreshold: undefined, // Fake AI fakeAi: undefined, + // X.AI (Grok) + xaiApiKey: undefined, } export const PROVIDER_SETTINGS_KEYS = Object.keys(providerSettingsRecord) as Keys[] @@ -672,6 +677,7 @@ export type SecretState = Pick< | "mistralApiKey" | "unboundApiKey" | "requestyApiKey" + | "xaiApiKey" > type SecretStateRecord = Record, undefined> @@ -690,6 +696,7 @@ const secretStateRecord: SecretStateRecord = { mistralApiKey: undefined, unboundApiKey: undefined, requestyApiKey: undefined, + xaiApiKey: undefined, } export const SECRET_STATE_KEYS = Object.keys(secretStateRecord) as Keys[] diff --git a/src/shared/api.ts b/src/shared/api.ts index a262c12abb..0284f2bca4 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -1,4 +1,7 @@ import { ModelInfo, ProviderName, ProviderSettings } from "../schemas" +import { REASONING_MODELS } from "../api/providers/constants" + +export { REASONING_MODELS } export type { ModelInfo, ProviderName as ApiProvider } @@ -77,6 +80,7 @@ export const anthropicModels = { cacheReadsPrice: 0.03, }, } as const satisfies Record // as const assertion makes the object deeply readonly + // Amazon Bedrock // https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html export interface MessageContent { @@ -950,6 +954,7 @@ export const mistralModels = { } as const satisfies Record // Unbound Security +// https://www.unboundsecurity.ai/ai-gateway export const unboundDefaultModelId = "anthropic/claude-3-5-sonnet-20241022" export const unboundDefaultModelInfo: ModelInfo = { maxTokens: 8192, @@ -961,3 +966,118 @@ export const unboundDefaultModelInfo: ModelInfo = { cacheWritesPrice: 3.75, cacheReadsPrice: 0.3, } + +// xAI +// https://docs.x.ai/docs/api-reference +export type XAIModelId = keyof typeof xaiModels +export const xaiDefaultModelId: XAIModelId = "grok-3-beta" +export const xaiModels = { + "grok-3-beta": { + maxTokens: 8192, + contextWindow: 131072, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 3.0, + outputPrice: 15.0, + description: "xAI's Grok-3 beta model with 131K context window", + }, + "grok-3-fast-beta": { + maxTokens: 8192, + contextWindow: 131072, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 5.0, + outputPrice: 25.0, + description: "xAI's Grok-3 fast beta model with 131K context window", + }, + "grok-3-mini-beta": { + maxTokens: 8192, + contextWindow: 131072, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0.3, + outputPrice: 0.5, + description: "xAI's Grok-3 mini beta model with 131K context window", + }, + "grok-3-mini-fast-beta": { + maxTokens: 8192, + contextWindow: 131072, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0.6, + outputPrice: 4.0, + description: "xAI's Grok-3 mini fast beta model with 131K context window", + }, + "grok-2-latest": { + maxTokens: 8192, + contextWindow: 131072, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 2.0, + outputPrice: 10.0, + description: "xAI's Grok-2 model - latest version with 131K context window", + }, + "grok-2": { + maxTokens: 8192, + contextWindow: 131072, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 2.0, + outputPrice: 10.0, + description: "xAI's Grok-2 model with 131K context window", + }, + "grok-2-1212": { + maxTokens: 8192, + contextWindow: 131072, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 2.0, + outputPrice: 10.0, + description: "xAI's Grok-2 model (version 1212) with 131K context window", + }, + "grok-2-vision-latest": { + maxTokens: 8192, + contextWindow: 32768, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 2.0, + outputPrice: 10.0, + description: "xAI's Grok-2 Vision model - latest version with image support and 32K context window", + }, + "grok-2-vision": { + maxTokens: 8192, + contextWindow: 32768, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 2.0, + outputPrice: 10.0, + description: "xAI's Grok-2 Vision model with image support and 32K context window", + }, + "grok-2-vision-1212": { + maxTokens: 8192, + contextWindow: 32768, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 2.0, + outputPrice: 10.0, + description: "xAI's Grok-2 Vision model (version 1212) with image support and 32K context window", + }, + "grok-vision-beta": { + maxTokens: 8192, + contextWindow: 8192, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 5.0, + outputPrice: 15.0, + description: "xAI's Grok Vision Beta model with image support and 8K context window", + }, + "grok-beta": { + maxTokens: 8192, + contextWindow: 131072, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 5.0, + outputPrice: 15.0, + description: "xAI's Grok Beta model (legacy) with 131K context window", + }, +} as const satisfies Record diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 2d9525a9f2..9eca64e862 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -35,6 +35,8 @@ import { unboundDefaultModelInfo, requestyDefaultModelId, requestyDefaultModelInfo, + xaiDefaultModelId, + xaiModels, ApiProvider, } from "../../../../src/shared/api" import { ExtensionMessage } from "../../../../src/shared/ExtensionMessage" @@ -1444,6 +1446,27 @@ const ApiOptions = ({ )} + {selectedProvider === "xai" && ( + <> + + + +
+ {t("settings:providers.apiKeyStorageNotice")} +
+ {!apiConfiguration?.xaiApiKey && ( + + {t("settings:providers.getXaiApiKey")} + + )} + + )} + {selectedProvider === "unbound" && ( <> )} - {selectedProvider === "openrouter" && REASONING_MODELS.has(selectedModelId) && ( - - )} - {selectedProvider === "glama" && ( )} + {REASONING_MODELS.has(selectedModelId) && ( + + )} + {!fromWelcomeView && ( <> >> = { anthropic: anthropicModels, bedrock: bedrockModels, @@ -18,6 +22,7 @@ export const MODELS_BY_PROVIDER: Partial a.label.localeCompare(b.label)) export const VERTEX_REGIONS = [ @@ -46,5 +52,3 @@ export const VERTEX_REGIONS = [ { value: "europe-west4", label: "europe-west4" }, { value: "asia-southeast1", label: "asia-southeast1" }, ] - -export const REASONING_MODELS = new Set(["x-ai/grok-3-mini-beta"]) diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index 00fb251eab..a4795a2239 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -128,6 +128,8 @@ "getMistralApiKey": "Obtenir clau API de Mistral / Codestral", "codestralBaseUrl": "URL base de Codestral (opcional)", "codestralBaseUrlDesc": "Establir una URL alternativa per al model Codestral.", + "xaiApiKey": "Clau API de xAI", + "getXaiApiKey": "Obtenir clau API de xAI", "awsCredentials": "Credencials d'AWS", "awsProfile": "Perfil d'AWS", "awsProfileName": "Nom del perfil d'AWS", diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index 59d986be18..dda9befbce 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -128,6 +128,8 @@ "getMistralApiKey": "Mistral / Codestral API-Schlüssel erhalten", "codestralBaseUrl": "Codestral Basis-URL (Optional)", "codestralBaseUrlDesc": "Legen Sie eine alternative URL für das Codestral-Modell fest.", + "xaiApiKey": "xAI API-Schlüssel", + "getXaiApiKey": "xAI API-Schlüssel erhalten", "awsCredentials": "AWS Anmeldedaten", "awsProfile": "AWS Profil", "awsProfileName": "AWS Profilname", diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index e277085424..ffd68f7e6e 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -128,6 +128,8 @@ "getMistralApiKey": "Get Mistral / Codestral API Key", "codestralBaseUrl": "Codestral Base URL (Optional)", "codestralBaseUrlDesc": "Set an alternative URL for the Codestral model.", + "xaiApiKey": "xAI API Key", + "getXaiApiKey": "Get xAI API Key", "awsCredentials": "AWS Credentials", "awsProfile": "AWS Profile", "awsProfileName": "AWS Profile Name", diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index af6e2b218e..8e68d7be47 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -128,6 +128,8 @@ "getMistralApiKey": "Obtener clave API de Mistral / Codestral", "codestralBaseUrl": "URL base de Codestral (Opcional)", "codestralBaseUrlDesc": "Establecer una URL alternativa para el modelo Codestral.", + "xaiApiKey": "Clave API de xAI", + "getXaiApiKey": "Obtener clave API de xAI", "awsCredentials": "Credenciales de AWS", "awsProfile": "Perfil de AWS", "awsProfileName": "Nombre del perfil de AWS", diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index 948dfb127b..5c2904c1d2 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -128,6 +128,8 @@ "getMistralApiKey": "Obtenir la clé API Mistral / Codestral", "codestralBaseUrl": "URL de base Codestral (Optionnel)", "codestralBaseUrlDesc": "Définir une URL alternative pour le modèle Codestral.", + "xaiApiKey": "Clé API xAI", + "getXaiApiKey": "Obtenir la clé API xAI", "awsCredentials": "Identifiants AWS", "awsProfile": "Profil AWS", "awsProfileName": "Nom du profil AWS", diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index 1aaf89e946..414c312c5c 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -128,6 +128,8 @@ "getMistralApiKey": "Mistral / Codestral API कुंजी प्राप्त करें", "codestralBaseUrl": "Codestral बेस URL (वैकल्पिक)", "codestralBaseUrlDesc": "Codestral मॉडल के लिए वैकल्पिक URL सेट करें।", + "xaiApiKey": "xAI API कुंजी", + "getXaiApiKey": "xAI API कुंजी प्राप्त करें", "awsCredentials": "AWS क्रेडेंशियल्स", "awsProfile": "AWS प्रोफाइल", "awsProfileName": "AWS प्रोफाइल नाम", diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index 570bca7d2e..63a3b5810e 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -128,6 +128,8 @@ "getMistralApiKey": "Ottieni chiave API Mistral / Codestral", "codestralBaseUrl": "URL base Codestral (opzionale)", "codestralBaseUrlDesc": "Imposta un URL opzionale per i modelli Codestral.", + "xaiApiKey": "Chiave API xAI", + "getXaiApiKey": "Ottieni chiave API xAI", "awsCredentials": "Credenziali AWS", "awsProfile": "Profilo AWS", "awsProfileName": "Nome profilo AWS", diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index 101f56cd8a..78f0b280ce 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -128,6 +128,8 @@ "getMistralApiKey": "Mistral / Codestral APIキーを取得", "codestralBaseUrl": "Codestral ベースURL(オプション)", "codestralBaseUrlDesc": "Codestralモデルの代替URLを設定します。", + "xaiApiKey": "xAI APIキー", + "getXaiApiKey": "xAI APIキーを取得", "awsCredentials": "AWS認証情報", "awsProfile": "AWSプロファイル", "awsProfileName": "AWSプロファイル名", diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index c13e7e8f73..b051a4cda8 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -128,6 +128,8 @@ "getMistralApiKey": "Mistral / Codestral API 키 받기", "codestralBaseUrl": "Codestral 기본 URL (선택사항)", "codestralBaseUrlDesc": "Codestral 모델의 대체 URL을 설정합니다.", + "xaiApiKey": "xAI API 키", + "getXaiApiKey": "xAI API 키 받기", "awsCredentials": "AWS 자격 증명", "awsProfile": "AWS 프로필", "awsProfileName": "AWS 프로필 이름", diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index 534ee15234..024c356a57 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -128,6 +128,8 @@ "getMistralApiKey": "Uzyskaj klucz API Mistral / Codestral", "codestralBaseUrl": "URL bazowy Codestral (opcjonalnie)", "codestralBaseUrlDesc": "Ustaw opcjonalny URL dla modeli Codestral.", + "xaiApiKey": "Klucz API xAI", + "getXaiApiKey": "Uzyskaj klucz API xAI", "awsCredentials": "Poświadczenia AWS", "awsProfile": "Profil AWS", "awsProfileName": "Nazwa profilu AWS", diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index 5df5798a6d..428fc2fa03 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -128,6 +128,8 @@ "getMistralApiKey": "Obter chave de API Mistral / Codestral", "codestralBaseUrl": "URL Base Codestral (Opcional)", "codestralBaseUrlDesc": "Defina uma URL alternativa para o modelo Codestral.", + "xaiApiKey": "Chave de API xAI", + "getXaiApiKey": "Obter chave de API xAI", "awsCredentials": "Credenciais AWS", "awsProfile": "Perfil AWS", "awsProfileName": "Nome do Perfil AWS", diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 9723383005..5d9e5d7e57 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -128,6 +128,8 @@ "getMistralApiKey": "Mistral / Codestral API Anahtarı Al", "codestralBaseUrl": "Codestral Temel URL (İsteğe bağlı)", "codestralBaseUrlDesc": "Codestral modeli için alternatif URL ayarlayın.", + "xaiApiKey": "xAI API Anahtarı", + "getXaiApiKey": "xAI API Anahtarı Al", "awsCredentials": "AWS Kimlik Bilgileri", "awsProfile": "AWS Profili", "awsProfileName": "AWS Profil Adı", diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index 5ab7fe9b28..6a73d36857 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -127,6 +127,8 @@ "getMistralApiKey": "Lấy khóa API Mistral / Codestral", "codestralBaseUrl": "URL cơ sở Codestral (Tùy chọn)", "codestralBaseUrlDesc": "Đặt URL thay thế cho mô hình Codestral.", + "xaiApiKey": "Khóa API xAI", + "getXaiApiKey": "Lấy khóa API xAI", "awsCredentials": "Thông tin xác thực AWS", "awsProfile": "Hồ sơ AWS", "awsProfileName": "Tên hồ sơ AWS", diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index da85a296bb..1038b46f5d 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -128,6 +128,8 @@ "getMistralApiKey": "获取 Mistral / Codestral API 密钥", "codestralBaseUrl": "Codestral 基础 URL(可选)", "codestralBaseUrlDesc": "为 Codestral 模型设置替代 URL。", + "xaiApiKey": "xAI API 密钥", + "getXaiApiKey": "获取 xAI API 密钥", "awsCredentials": "AWS 凭证", "awsProfile": "AWS 配置文件", "awsProfileName": "AWS 配置文件名称", diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index f98c40e607..8d4efcf6b2 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -128,6 +128,8 @@ "getMistralApiKey": "取得 Mistral/Codestral API 金鑰", "codestralBaseUrl": "Codestral 基礎 URL(選用)", "codestralBaseUrlDesc": "設定 Codestral 模型的替代 URL。", + "xaiApiKey": "xAI API 金鑰", + "getXaiApiKey": "取得 xAI API 金鑰", "awsCredentials": "AWS 認證", "awsProfile": "AWS Profile", "awsProfileName": "AWS Profile 名稱",