From dee8b4437335d3aef51827857b6f5198a3387ad6 Mon Sep 17 00:00:00 2001 From: xiaose Date: Sun, 19 Oct 2025 21:04:20 +0800 Subject: [PATCH 01/17] feat: code --- cli/src/config/schema.json | 1 + cli/src/constants/providers/labels.ts | 1 + cli/src/constants/providers/models.ts | 3 + cli/src/constants/providers/settings.ts | 19 +- cli/src/types/messages.ts | 5 + packages/types/src/global-settings.ts | 1 + packages/types/src/provider-settings.ts | 17 + packages/types/src/providers/index.ts | 1 + packages/types/src/providers/minimax.ts | 22 ++ src/api/index.ts | 3 + src/api/providers/__tests__/minimax.spec.ts | 359 ++++++++++++++++++ src/api/providers/index.ts | 2 + src/api/providers/minimax.ts | 39 ++ .../__tests__/checkExistApiConfig.spec.ts | 1 + .../kilocode/hooks/useProviderModels.ts | 10 +- .../src/components/settings/ApiOptions.tsx | 18 +- .../src/components/settings/constants.ts | 3 + .../components/settings/providers/MiniMax.tsx | 73 ++++ .../components/settings/providers/index.ts | 1 + .../components/ui/hooks/useSelectedModel.ts | 7 + webview-ui/src/i18n/locales/ca/settings.json | 3 + webview-ui/src/i18n/locales/cs/settings.json | 3 + webview-ui/src/i18n/locales/de/settings.json | 3 + webview-ui/src/i18n/locales/en/settings.json | 3 + webview-ui/src/i18n/locales/es/settings.json | 3 + webview-ui/src/i18n/locales/fr/settings.json | 3 + webview-ui/src/i18n/locales/hi/settings.json | 3 + webview-ui/src/i18n/locales/id/settings.json | 3 + webview-ui/src/i18n/locales/it/settings.json | 3 + webview-ui/src/i18n/locales/ja/settings.json | 3 + webview-ui/src/i18n/locales/ko/settings.json | 3 + webview-ui/src/i18n/locales/nl/settings.json | 3 + webview-ui/src/i18n/locales/pl/settings.json | 3 + .../src/i18n/locales/pt-BR/settings.json | 3 + webview-ui/src/i18n/locales/ru/settings.json | 3 + webview-ui/src/i18n/locales/th/settings.json | 3 + webview-ui/src/i18n/locales/tr/settings.json | 3 + webview-ui/src/i18n/locales/uk/settings.json | 3 + webview-ui/src/i18n/locales/vi/settings.json | 3 + .../src/i18n/locales/zh-CN/settings.json | 3 + .../src/i18n/locales/zh-TW/settings.json | 3 + 41 files changed, 646 insertions(+), 3 deletions(-) create mode 100644 packages/types/src/providers/minimax.ts create mode 100644 src/api/providers/__tests__/minimax.spec.ts create mode 100644 src/api/providers/minimax.ts create mode 100644 webview-ui/src/components/settings/providers/MiniMax.tsx diff --git a/cli/src/config/schema.json b/cli/src/config/schema.json index c5e2e6195f3..79985dc9494 100644 --- a/cli/src/config/schema.json +++ b/cli/src/config/schema.json @@ -257,6 +257,7 @@ "qwen-code", "gemini-cli", "zai", + "minimax", "unbound", "requesty", "roo", diff --git a/cli/src/constants/providers/labels.ts b/cli/src/constants/providers/labels.ts index 24b4f03b269..7f93ad901dd 100644 --- a/cli/src/constants/providers/labels.ts +++ b/cli/src/constants/providers/labels.ts @@ -36,6 +36,7 @@ export const PROVIDER_LABELS: Record = { "qwen-code": "Qwen Code", "gemini-cli": "Gemini CLI", zai: "Zai", + minimax: "MiniMax", unbound: "Unbound", requesty: "Requesty", roo: "Roo", diff --git a/cli/src/constants/providers/models.ts b/cli/src/constants/providers/models.ts index 292004f5a43..00aa68b6d99 100644 --- a/cli/src/constants/providers/models.ts +++ b/cli/src/constants/providers/models.ts @@ -71,6 +71,7 @@ export const PROVIDER_TO_ROUTER_NAME: Record = moonshot: null, deepseek: null, doubao: null, + minimax: null, "qwen-code": null, "human-relay": null, "fake-ai": null, @@ -116,6 +117,7 @@ export const PROVIDER_MODEL_FIELD: Record = { moonshot: null, deepseek: null, doubao: null, + minimax: null, "qwen-code": null, "human-relay": null, "fake-ai": null, @@ -193,6 +195,7 @@ export const DEFAULT_MODEL_IDS: Partial> = { sambanova: "Meta-Llama-3.1-8B-Instruct", featherless: "deepseek-ai/DeepSeek-V3-0324", deepinfra: "deepseek-ai/DeepSeek-R1-0528", + minimax: "MiniMax-M1", } /** diff --git a/cli/src/constants/providers/settings.ts b/cli/src/constants/providers/settings.ts index 82071db233b..597fefdfef5 100644 --- a/cli/src/constants/providers/settings.ts +++ b/cli/src/constants/providers/settings.ts @@ -410,6 +410,18 @@ export const FIELD_REGISTRY: Record = { placeholder: "Enter API line...", }, + // Minimax fields + minimaxBaseUrl: { + label: "Base URL", + type: "text", + placeholder: "Enter MiniMax base URL...", + }, + minimaxApiKey: { + label: "API Key", + type: "password", + placeholder: "Enter MiniMax API key...", + }, + // Unbound fields unboundApiKey: { label: "API Key", @@ -767,7 +779,11 @@ export const getProviderSettings = (provider: ProviderName, config: ProviderSett type: "text", }, ] - + case "minimax": + return [ + createFieldConfig("minimaxBaseUrl", config, "https://api.minimax.io/v1"), + createFieldConfig("minimaxApiKey", config), + ] case "fake-ai": return [ { @@ -825,6 +841,7 @@ export const PROVIDER_DEFAULT_MODELS: Record = { "vercel-ai-gateway": "gpt-4o", "virtual-quota-fallback": "gpt-4o", "human-relay": "human", + minimax: "MiniMax-M1", "fake-ai": "fake-model", } diff --git a/cli/src/types/messages.ts b/cli/src/types/messages.ts index d4acbd296ac..7d4f5c35456 100644 --- a/cli/src/types/messages.ts +++ b/cli/src/types/messages.ts @@ -103,6 +103,7 @@ export type ProviderName = | "io-intelligence" | "roo" | "vercel-ai-gateway" + | "minimax" // Provider Settings Entry for profile metadata export interface ProviderSettingsEntry { @@ -320,6 +321,10 @@ export interface ProviderSettings { vercelAiGatewayApiKey?: string vercelAiGatewayModelId?: string + // MiniMax AI + minimaxBaseUrl?: "https://api.minimax.io/v1" | "https://api.minimaxi.com/v1" + minimaxApiKey?: string + // Allow additional fields for extensibility [key: string]: any } diff --git a/packages/types/src/global-settings.ts b/packages/types/src/global-settings.ts index 17101acaa98..3eb442b5d97 100644 --- a/packages/types/src/global-settings.ts +++ b/packages/types/src/global-settings.ts @@ -218,6 +218,7 @@ export const SECRET_STATE_KEYS = [ "deepInfraApiKey", "codeIndexOpenAiKey", "codeIndexQdrantApiKey", + "minimaxApiKey", // kilocode_change start "kilocodeToken", "syntheticApiKey", diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index 575764f6f34..6f23edae0a6 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -28,6 +28,7 @@ import { vscodeLlmModels, xaiModels, internationalZAiModels, + minimaxModels, } from "./providers/index.js" import { toolUseStylesSchema } from "./kilocode/native-function-calling.js" @@ -141,6 +142,7 @@ export const providerNames = [ "groq", "mistral", "moonshot", + "minimax", "openai-native", "qwen-code", "roo", @@ -411,6 +413,13 @@ const sambaNovaSchema = apiModelIdProviderModelSchema.extend({ sambaNovaApiKey: z.string().optional(), }) +const minimaxSchema = apiModelIdProviderModelSchema.extend({ + minimaxBaseUrl: z + .union([z.literal("https://api.minimax.io/v1"), z.literal("https://api.minimaxi.com/v1")]) + .optional(), + minimaxApiKey: z.string().optional(), +}) + // kilocode_change start const ovhcloudSchema = baseProviderSettingsSchema.extend({ ovhCloudAiEndpointsApiKey: z.string().optional(), @@ -518,6 +527,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv humanRelaySchema.merge(z.object({ apiProvider: z.literal("human-relay") })), fakeAiSchema.merge(z.object({ apiProvider: z.literal("fake-ai") })), xaiSchema.merge(z.object({ apiProvider: z.literal("xai") })), + minimaxSchema.merge(z.object({ apiProvider: z.literal("minimax") })), // kilocode_change start geminiCliSchema.merge(z.object({ apiProvider: z.literal("gemini-cli") })), kilocodeSchema.merge(z.object({ apiProvider: z.literal("kilocode") })), @@ -570,6 +580,7 @@ export const providerSettingsSchema = z.object({ ...humanRelaySchema.shape, ...fakeAiSchema.shape, ...xaiSchema.shape, + ...minimaxSchema.shape, ...groqSchema.shape, ...huggingFaceSchema.shape, ...chutesSchema.shape, @@ -660,6 +671,7 @@ export const modelIdKeysByProvider: Record = { unbound: "unboundModelId", requesty: "requestyModelId", xai: "apiModelId", + minimax: "apiModelId", groq: "apiModelId", chutes: "apiModelId", litellm: "litellmModelId", @@ -799,6 +811,11 @@ export const MODELS_BY_PROVIDER: Record< }, xai: { id: "xai", label: "xAI (Grok)", models: Object.keys(xaiModels) }, zai: { id: "zai", label: "Zai", models: Object.keys(internationalZAiModels) }, + minimax: { + id: "minimax", + label: "MiniMax", + models: Object.keys(minimaxModels), + }, // Dynamic providers; models pulled from remote APIs. glama: { id: "glama", label: "Glama", models: [] }, diff --git a/packages/types/src/providers/index.ts b/packages/types/src/providers/index.ts index 087cfb4e6ef..4cfefd8103f 100644 --- a/packages/types/src/providers/index.ts +++ b/packages/types/src/providers/index.ts @@ -35,3 +35,4 @@ export * from "./xai.js" export * from "./vercel-ai-gateway.js" export * from "./zai.js" export * from "./deepinfra.js" +export * from "./minimax.js" diff --git a/packages/types/src/providers/minimax.ts b/packages/types/src/providers/minimax.ts new file mode 100644 index 00000000000..1337d64c3b2 --- /dev/null +++ b/packages/types/src/providers/minimax.ts @@ -0,0 +1,22 @@ +import type { ModelInfo } from "../model.js" + +// Minimax +// https://www.minimax.io/platform/document/text_api_intro +// https://www.minimax.io/platform/document/pricing +export type MinimaxModelId = keyof typeof minimaxModels +export const minimaxDefaultModelId: MinimaxModelId = "MiniMax-M1" + +export const minimaxModels = { + "MiniMax-M1": { + maxTokens: 25_600, + contextWindow: 1_000_192, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0.4, + outputPrice: 2.2, + cacheWritesPrice: 0, + cacheReadsPrice: 0, + }, +} as const satisfies Record + +export const MINIMAX_DEFAULT_TEMPERATURE = 0 diff --git a/src/api/index.ts b/src/api/index.ts index 029bb50e1c2..a49e807fae0 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -41,6 +41,7 @@ import { DoubaoHandler, ZAiHandler, FireworksHandler, + MiniMaxHandler, SyntheticHandler, // kilocode_change RooHandler, FeatherlessHandler, @@ -202,6 +203,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler { return new FeatherlessHandler(options) case "vercel-ai-gateway": return new VercelAiGatewayHandler(options) + case "minimax": + return new MiniMaxHandler(options) // kilocode_change start case "ovhcloud": return new OVHcloudAIEndpointsHandler(options) diff --git a/src/api/providers/__tests__/minimax.spec.ts b/src/api/providers/__tests__/minimax.spec.ts new file mode 100644 index 00000000000..69bb53c361d --- /dev/null +++ b/src/api/providers/__tests__/minimax.spec.ts @@ -0,0 +1,359 @@ +// Mocks must come first, before imports +const mockCreate = vi.fn() +vi.mock("openai", () => { + return { + __esModule: true, + default: vi.fn().mockImplementation(() => ({ + chat: { + completions: { + create: mockCreate.mockImplementation(async (options) => { + if (!options.stream) { + return { + id: "test-completion", + choices: [ + { + message: { role: "assistant", content: "Test response", refusal: null }, + finish_reason: "stop", + index: 0, + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 5, + total_tokens: 15, + cached_tokens: 2, + }, + } + } + + // Return async iterator for streaming + return { + [Symbol.asyncIterator]: async function* () { + yield { + choices: [ + { + delta: { content: "Test response" }, + index: 0, + }, + ], + usage: null, + } + yield { + choices: [ + { + delta: {}, + index: 0, + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 5, + total_tokens: 15, + cached_tokens: 2, + }, + } + }, + } + }), + }, + }, + })), + } +}) + +import OpenAI from "openai" +import type { Anthropic } from "@anthropic-ai/sdk" + +import { minimaxDefaultModelId } from "@roo-code/types" + +import type { ApiHandlerOptions } from "../../../shared/api" + +import { MiniMaxHandler } from "../minimax" + +describe("MiniMaxHandler", () => { + let handler: MiniMaxHandler + let mockOptions: ApiHandlerOptions + + beforeEach(() => { + mockOptions = { + minimaxApiKey: "test-api-key", + apiModelId: "MiniMax-M1", + minimaxBaseUrl: "https://api.minimax.io/v1", + } + handler = new MiniMaxHandler(mockOptions) + vi.clearAllMocks() + }) + + describe("constructor", () => { + it("should initialize with provided options", () => { + expect(handler).toBeInstanceOf(MiniMaxHandler) + expect(handler.getModel().id).toBe(mockOptions.apiModelId) + }) + + it.skip("should throw error if API key is missing", () => { + expect(() => { + new MiniMaxHandler({ + ...mockOptions, + minimaxApiKey: undefined, + }) + }).toThrow("MiniMax API key is required") + }) + + it("should use default model ID if not provided", () => { + const handlerWithoutModel = new MiniMaxHandler({ + ...mockOptions, + apiModelId: undefined, + }) + expect(handlerWithoutModel.getModel().id).toBe(minimaxDefaultModelId) + }) + + it("should use default base URL if not provided", () => { + const handlerWithoutBaseUrl = new MiniMaxHandler({ + ...mockOptions, + minimaxBaseUrl: undefined, + }) + expect(handlerWithoutBaseUrl).toBeInstanceOf(MiniMaxHandler) + // The base URL is passed to OpenAI client internally + expect(OpenAI).toHaveBeenCalledWith( + expect.objectContaining({ + baseURL: "https://api.minimax.io/v1", + }), + ) + }) + + it("should use chinese base URL if provided", () => { + const customBaseUrl = "https://api.minimax.io/v1" + const handlerWithCustomUrl = new MiniMaxHandler({ + ...mockOptions, + minimaxBaseUrl: customBaseUrl, + }) + expect(handlerWithCustomUrl).toBeInstanceOf(MiniMaxHandler) + // The custom base URL is passed to OpenAI client + expect(OpenAI).toHaveBeenCalledWith( + expect.objectContaining({ + baseURL: customBaseUrl, + }), + ) + }) + + it("should set includeMaxTokens to true", () => { + // Create a new handler and verify OpenAI client was called with includeMaxTokens + const _handler = new MiniMaxHandler(mockOptions) + expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: mockOptions.minimaxApiKey })) + }) + }) + + describe("getModel", () => { + it("should return model info for valid model ID", () => { + const model = handler.getModel() + expect(model.id).toBe(mockOptions.apiModelId) + expect(model.info).toBeDefined() + expect(model.info.maxTokens).toBe(16384) + expect(model.info.contextWindow).toBe(262144) + expect(model.info.supportsImages).toBe(false) + expect(model.info.supportsPromptCache).toBe(true) // Should be true now + }) + + it("should return provided model ID with default model info if model does not exist", () => { + const handlerWithInvalidModel = new MiniMaxHandler({ + ...mockOptions, + apiModelId: "invalid-model", + }) + const model = handlerWithInvalidModel.getModel() + expect(model.id).toBe("invalid-model") // Returns provided ID + expect(model.info).toBeDefined() + // With the current implementation, it's the same object reference when using default model info + expect(model.info).toBe(handler.getModel().info) + // Should have the same base properties + expect(model.info.contextWindow).toBe(handler.getModel().info.contextWindow) + // And should have supportsPromptCache set to true + expect(model.info.supportsPromptCache).toBe(true) + }) + + it("should return default model if no model ID is provided", () => { + const handlerWithoutModel = new MiniMaxHandler({ + ...mockOptions, + apiModelId: undefined, + }) + const model = handlerWithoutModel.getModel() + expect(model.id).toBe(minimaxDefaultModelId) + expect(model.info).toBeDefined() + expect(model.info.supportsPromptCache).toBe(true) + }) + + it("should include model parameters from getModelParams", () => { + const model = handler.getModel() + expect(model).toHaveProperty("temperature") + expect(model).toHaveProperty("maxTokens") + }) + }) + + describe("createMessage", () => { + const systemPrompt = "You are a helpful assistant." + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: [ + { + type: "text" as const, + text: "Hello!", + }, + ], + }, + ] + + it("should handle streaming responses", async () => { + const stream = handler.createMessage(systemPrompt, messages) + const chunks: any[] = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(chunks.length).toBeGreaterThan(0) + const textChunks = chunks.filter((chunk) => chunk.type === "text") + expect(textChunks).toHaveLength(1) + expect(textChunks[0].text).toBe("Test response") + }) + + it("should include usage information", async () => { + const stream = handler.createMessage(systemPrompt, messages) + const chunks: any[] = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + const usageChunks = chunks.filter((chunk) => chunk.type === "usage") + expect(usageChunks.length).toBeGreaterThan(0) + expect(usageChunks[0].inputTokens).toBe(10) + expect(usageChunks[0].outputTokens).toBe(5) + }) + + it("should include cache metrics in usage information", async () => { + const stream = handler.createMessage(systemPrompt, messages) + const chunks: any[] = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + const usageChunks = chunks.filter((chunk) => chunk.type === "usage") + expect(usageChunks.length).toBeGreaterThan(0) + expect(usageChunks[0].cacheWriteTokens).toBe(0) + expect(usageChunks[0].cacheReadTokens).toBe(2) + }) + }) + + describe("processUsageMetrics", () => { + it("should correctly process usage metrics including cache information", () => { + // We need to access the protected method, so we'll create a test subclass + class TestMiniMaxHandler extends MiniMaxHandler { + public testProcessUsageMetrics(usage: any) { + return this.processUsageMetrics(usage) + } + } + + const testHandler = new TestMiniMaxHandler(mockOptions) + + const usage = { + prompt_tokens: 100, + completion_tokens: 50, + total_tokens: 150, + cached_tokens: 20, + } + + const result = testHandler.testProcessUsageMetrics(usage) + + expect(result.type).toBe("usage") + expect(result.inputTokens).toBe(100) + expect(result.outputTokens).toBe(50) + expect(result.cacheWriteTokens).toBe(0) + expect(result.cacheReadTokens).toBe(20) + }) + + it("should handle missing cache metrics gracefully", () => { + class TestMiniMaxHandler extends MiniMaxHandler { + public testProcessUsageMetrics(usage: any) { + return this.processUsageMetrics(usage) + } + } + + const testHandler = new TestMiniMaxHandler(mockOptions) + + const usage = { + prompt_tokens: 100, + completion_tokens: 50, + total_tokens: 150, + // No cached_tokens + } + + const result = testHandler.testProcessUsageMetrics(usage) + + expect(result.type).toBe("usage") + expect(result.inputTokens).toBe(100) + expect(result.outputTokens).toBe(50) + expect(result.cacheWriteTokens).toBe(0) + expect(result.cacheReadTokens).toBeUndefined() + }) + }) + + describe("addMaxTokensIfNeeded", () => { + it("should always add max_tokens regardless of includeMaxTokens option", () => { + // Create a test subclass to access the protected method + class TestMiniMaxHandler extends MiniMaxHandler { + public testAddMaxTokensIfNeeded(requestOptions: any, modelInfo: any) { + this.addMaxTokensIfNeeded(requestOptions, modelInfo) + } + } + + const testHandler = new TestMiniMaxHandler(mockOptions) + const requestOptions: any = {} + const modelInfo = { + maxTokens: 32_000, + } + + // Test with includeMaxTokens set to false - should still add max tokens + testHandler.testAddMaxTokensIfNeeded(requestOptions, modelInfo) + + expect(requestOptions.max_tokens).toBe(32_000) + }) + + it("should use modelMaxTokens when provided", () => { + class TestMiniMaxHandler extends MiniMaxHandler { + public testAddMaxTokensIfNeeded(requestOptions: any, modelInfo: any) { + this.addMaxTokensIfNeeded(requestOptions, modelInfo) + } + } + + const customMaxTokens = 5000 + const testHandler = new TestMiniMaxHandler({ + ...mockOptions, + modelMaxTokens: customMaxTokens, + }) + const requestOptions: any = {} + const modelInfo = { + maxTokens: 32_000, + } + + testHandler.testAddMaxTokensIfNeeded(requestOptions, modelInfo) + + expect(requestOptions.max_tokens).toBe(customMaxTokens) + }) + + it("should fall back to modelInfo.maxTokens when modelMaxTokens is not provided", () => { + class TestMiniMaxHandler extends MiniMaxHandler { + public testAddMaxTokensIfNeeded(requestOptions: any, modelInfo: any) { + this.addMaxTokensIfNeeded(requestOptions, modelInfo) + } + } + + const testHandler = new TestMiniMaxHandler(mockOptions) + const requestOptions: any = {} + const modelInfo = { + maxTokens: 16_000, + } + + testHandler.testAddMaxTokensIfNeeded(requestOptions, modelInfo) + + expect(requestOptions.max_tokens).toBe(16_000) + }) + }) +}) diff --git a/src/api/providers/index.ts b/src/api/providers/index.ts index 1e87b24e4ea..c778f29d30b 100644 --- a/src/api/providers/index.ts +++ b/src/api/providers/index.ts @@ -1,3 +1,4 @@ +import { MiniMaxHandler } from "./minimax" export { AnthropicVertexHandler } from "./anthropic-vertex" export { AnthropicHandler } from "./anthropic" export { AwsBedrockHandler } from "./bedrock" @@ -40,3 +41,4 @@ export { RooHandler } from "./roo" export { FeatherlessHandler } from "./featherless" export { VercelAiGatewayHandler } from "./vercel-ai-gateway" export { DeepInfraHandler } from "./deepinfra" +export { MiniMaxHandler } from "./minimax" diff --git a/src/api/providers/minimax.ts b/src/api/providers/minimax.ts new file mode 100644 index 00000000000..1dafe99184b --- /dev/null +++ b/src/api/providers/minimax.ts @@ -0,0 +1,39 @@ +import { minimaxModels, minimaxDefaultModelId } from "@roo-code/types" + +import type { ApiHandlerOptions } from "../../shared/api" + +import type { ApiStreamUsageChunk } from "../transform/stream" +import { getModelParams } from "../transform/model-params" + +import { OpenAiHandler } from "./openai" + +export class MiniMaxHandler extends OpenAiHandler { + constructor(options: ApiHandlerOptions) { + super({ + ...options, + openAiApiKey: options.minimaxApiKey ?? "not-provided", + openAiModelId: options.apiModelId ?? minimaxDefaultModelId, + openAiBaseUrl: options.minimaxBaseUrl ?? "https://api.minimax.io/v1", + openAiStreamingEnabled: true, + includeMaxTokens: true, + }) + } + + override getModel() { + const id = this.options.apiModelId ?? minimaxDefaultModelId + const info = minimaxModels[id as keyof typeof minimaxModels] || minimaxModels[minimaxDefaultModelId] + const params = getModelParams({ format: "openai", modelId: id, model: info, settings: this.options }) + return { id, info, ...params } + } + + // Override to handle Minimax's usage metrics, including caching. + protected override processUsageMetrics(usage: any): ApiStreamUsageChunk { + return { + type: "usage", + inputTokens: usage?.prompt_tokens || 0, + outputTokens: usage?.completion_tokens || 0, + cacheWriteTokens: usage?.prompt_tokens_details?.cache_miss_tokens, + cacheReadTokens: usage?.prompt_tokens_details?.cached_tokens, + } + } +} diff --git a/src/shared/__tests__/checkExistApiConfig.spec.ts b/src/shared/__tests__/checkExistApiConfig.spec.ts index 7696f00cc0c..740974a955f 100644 --- a/src/shared/__tests__/checkExistApiConfig.spec.ts +++ b/src/shared/__tests__/checkExistApiConfig.spec.ts @@ -58,6 +58,7 @@ describe("checkExistKey", () => { vsCodeLmModelSelector: undefined, requestyApiKey: undefined, unboundApiKey: undefined, + minimaxApiKey: undefined, } expect(checkExistKey(config)).toBe(false) }) diff --git a/webview-ui/src/components/kilocode/hooks/useProviderModels.ts b/webview-ui/src/components/kilocode/hooks/useProviderModels.ts index 3978bcc413d..66d0c0f7938 100644 --- a/webview-ui/src/components/kilocode/hooks/useProviderModels.ts +++ b/webview-ui/src/components/kilocode/hooks/useProviderModels.ts @@ -49,7 +49,9 @@ import { deepInfraDefaultModelId, cerebrasModels, cerebrasDefaultModelId, - ovhCloudAiEndpointsDefaultModelId, // kilocode_change + ovhCloudAiEndpointsDefaultModelId, + minimaxModels, + minimaxDefaultModelId, // kilocode_change } from "@roo-code/types" import type { ModelRecord, RouterModels } from "@roo/api" import { useRouterModels } from "../../ui/hooks/useRouterModels" @@ -277,6 +279,12 @@ export const getModelsByProvider = ({ defaultModel: deepInfraDefaultModelId, } } + case "minimax": { + return { + models: minimaxModels, + defaultModel: minimaxDefaultModelId, + } + } // kilocode_change start case "ovhcloud": { return { diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 91565def48b..ae92720dfd9 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -22,6 +22,7 @@ import { geminiCliDefaultModelId, deepSeekDefaultModelId, moonshotDefaultModelId, + minimaxDefaultModelId, mistralDefaultModelId, xaiDefaultModelId, groqDefaultModelId, @@ -102,6 +103,7 @@ import { Synthetic, // kilocode_change end ZAi, + MiniMax, Fireworks, Featherless, VercelAiGateway, @@ -396,6 +398,7 @@ const ApiOptions = ({ ? mainlandZAiDefaultModelId : internationalZAiDefaultModelId, }, + minimax: { field: "apiModelId", default: minimaxDefaultModelId }, fireworks: { field: "apiModelId", default: fireworksDefaultModelId }, synthetic: { field: "apiModelId", default: syntheticDefaultModelId }, // kilocode_change featherless: { field: "apiModelId", default: featherlessDefaultModelId }, @@ -438,7 +441,16 @@ const ApiOptions = ({ // kilocode_change start // Providers that don't have documentation pages yet - const excludedProviders = ["gemini-cli", "moonshot", "chutes", "cerebras", "litellm", "zai", "qwen-code"] + const excludedProviders = [ + "gemini-cli", + "moonshot", + "chutes", + "cerebras", + "litellm", + "zai", + "qwen-code", + "minimax", + ] // Skip documentation link when the provider is excluded because documentation is not available if (excludedProviders.includes(selectedProvider)) { @@ -672,6 +684,10 @@ const ApiOptions = ({ )} + {selectedProvider === "minimax" && ( + + )} + {selectedProvider === "groq" && ( )} diff --git a/webview-ui/src/components/settings/constants.ts b/webview-ui/src/components/settings/constants.ts index 961a3cd4743..9884254f828 100644 --- a/webview-ui/src/components/settings/constants.ts +++ b/webview-ui/src/components/settings/constants.ts @@ -25,6 +25,7 @@ import { syntheticModels, // kilocode_change rooModels, featherlessModels, + minimaxModels, } from "@roo-code/types" export const MODELS_BY_PROVIDER: Partial>> = { @@ -52,6 +53,7 @@ export const MODELS_BY_PROVIDER: Partial a.label.localeCompare(b.label)) PROVIDERS.unshift({ value: "kilocode", label: "Kilo Code" }) // kilocode_change diff --git a/webview-ui/src/components/settings/providers/MiniMax.tsx b/webview-ui/src/components/settings/providers/MiniMax.tsx new file mode 100644 index 00000000000..b23773dcfea --- /dev/null +++ b/webview-ui/src/components/settings/providers/MiniMax.tsx @@ -0,0 +1,73 @@ +import { useCallback } from "react" +import { VSCodeTextField, VSCodeDropdown, VSCodeOption } from "@vscode/webview-ui-toolkit/react" + +import type { ProviderSettings } from "@roo-code/types" + +import { useAppTranslation } from "@src/i18n/TranslationContext" +import { VSCodeButtonLink } from "@src/components/common/VSCodeButtonLink" + +import { inputEventTransform } from "../transforms" +import { cn } from "@/lib/utils" + +type MiniMaxProps = { + apiConfiguration: ProviderSettings + setApiConfigurationField: (field: keyof ProviderSettings, value: ProviderSettings[keyof ProviderSettings]) => void +} + +export const MiniMax = ({ apiConfiguration, setApiConfigurationField }: MiniMaxProps) => { + const { t } = useAppTranslation() + + const handleInputChange = useCallback( + ( + field: K, + transform: (event: E) => ProviderSettings[K] = inputEventTransform, + ) => + (event: E | Event) => { + setApiConfigurationField(field, transform(event as E)) + }, + [setApiConfigurationField], + ) + + return ( + <> +
+ + + + api.minimax.io + + + api.minimaxi.com + + +
+
+ + + +
+ {t("settings:providers.apiKeyStorageNotice")} +
+ {!apiConfiguration?.minimaxApiKey && ( + + {t("settings:providers.getMoonshotApiKey")} + + )} +
+ + ) +} diff --git a/webview-ui/src/components/settings/providers/index.ts b/webview-ui/src/components/settings/providers/index.ts index 0f0483d9241..17ed718e8c4 100644 --- a/webview-ui/src/components/settings/providers/index.ts +++ b/webview-ui/src/components/settings/providers/index.ts @@ -36,3 +36,4 @@ export { Synthetic } from "./Synthetic" // kilocode_change export { Featherless } from "./Featherless" export { VercelAiGateway } from "./VercelAiGateway" export { DeepInfra } from "./DeepInfra" +export { MiniMax } from "./MiniMax" diff --git a/webview-ui/src/components/ui/hooks/useSelectedModel.ts b/webview-ui/src/components/ui/hooks/useSelectedModel.ts index 534f07caa36..65ece7ef9a1 100644 --- a/webview-ui/src/components/ui/hooks/useSelectedModel.ts +++ b/webview-ui/src/components/ui/hooks/useSelectedModel.ts @@ -63,6 +63,8 @@ import { vercelAiGatewayDefaultModelId, BEDROCK_1M_CONTEXT_MODEL_IDS, deepInfraDefaultModelId, + minimaxDefaultModelId, + minimaxModels, ovhCloudAiEndpointsDefaultModelId, // kilocode_change } from "@roo-code/types" @@ -456,6 +458,11 @@ function getSelectedModel({ const info = routerModels.ovhcloud[id] return { id, info } } + case "minimax": { + const id = apiConfiguration.apiModelId ?? minimaxDefaultModelId + const info = minimaxModels[id as keyof typeof minimaxModels] + return { id, info } + } // kilocode_change end // case "anthropic": // case "human-relay": diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index 179c62668b8..019c73e864a 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -306,6 +306,9 @@ "getZaiApiKey": "Obtenir clau API de Z AI", "zaiEntrypoint": "Punt d'entrada de Z AI", "zaiEntrypointDescription": "Si us plau, seleccioneu el punt d'entrada de l'API apropiat segons la vostra ubicació. Si sou a la Xina, trieu open.bigmodel.cn. Altrament, trieu api.z.ai.", + "minimaxApiKey": "Clau API de MiniMax", + "getMiniMaxApiKey": "Obtenir clau API de MiniMax", + "minimaxBaseUrl": "Punt d'entrada de MiniMax", "geminiApiKey": "Clau API de Gemini", "getGroqApiKey": "Obtenir clau API de Groq", "groqApiKey": "Clau API de Groq", diff --git a/webview-ui/src/i18n/locales/cs/settings.json b/webview-ui/src/i18n/locales/cs/settings.json index 03621a6ac66..72cceac0088 100644 --- a/webview-ui/src/i18n/locales/cs/settings.json +++ b/webview-ui/src/i18n/locales/cs/settings.json @@ -315,6 +315,9 @@ "moonshotBaseUrl": "Vstupní bod Moonshot", "zaiEntrypoint": "Vstupní bod Z AI", "zaiEntrypointDescription": "Vyberte prosím vhodný vstupní bod API podle vaší polohy. Pokud jste v Číně, vyberte open.bigmodel.cn. Jinak vyberte api.z.ai.", + "minimaxApiKey": "Klíč API MiniMax", + "getMiniMaxApiKey": "Získat klíč API MiniMax", + "minimaxBaseUrl": "Vstupní bod MiniMax", "geminiApiKey": "Klíč API Gemini", "getGroqApiKey": "Získat klíč API Groq", "groqApiKey": "Klíč API Groq", diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index 37acc150c78..b1eddf24f57 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -306,6 +306,9 @@ "getZaiApiKey": "Z AI API-Schlüssel erhalten", "zaiEntrypoint": "Z AI Einstiegspunkt", "zaiEntrypointDescription": "Bitte wähle den entsprechenden API-Einstiegspunkt basierend auf deinem Standort aus. Wenn du dich in China befindest, wähle open.bigmodel.cn. Andernfalls wähle api.z.ai.", + "minimaxApiKey": "MiniMax API-Schlüssel", + "getMiniMaxApiKey": "MiniMax API-Schlüssel erhalten", + "minimaxBaseUrl": "MiniMax-Einstiegspunkt", "geminiApiKey": "Gemini API-Schlüssel", "getGroqApiKey": "Groq API-Schlüssel erhalten", "groqApiKey": "Groq API-Schlüssel", diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index 31dbb4b2a8f..f03cf2fc26c 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -311,6 +311,9 @@ "getZaiApiKey": "Get Z AI API Key", "zaiEntrypoint": "Z AI Entrypoint", "zaiEntrypointDescription": "Please select the appropriate API entrypoint based on your location. If you are in China, choose open.bigmodel.cn. Otherwise, choose api.z.ai.", + "minimaxApiKey": "MiniMax API Key", + "getMiniMaxApiKey": "Get MiniMax API Key", + "minimaxBaseUrl": "MiniMax Entrypoint", "geminiApiKey": "Gemini API Key", "getGroqApiKey": "Get Groq API Key", "groqApiKey": "Groq API Key", diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index bce2c528a15..6c7d3d9e2d2 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -306,6 +306,9 @@ "getZaiApiKey": "Obtener clave API de Z AI", "zaiEntrypoint": "Punto de entrada de Z AI", "zaiEntrypointDescription": "Por favor, seleccione el punto de entrada de API apropiado según su ubicación. Si está en China, elija open.bigmodel.cn. De lo contrario, elija api.z.ai.", + "minimaxApiKey": "Clave API de MiniMax", + "getMiniMaxApiKey": "Obtener clave API de MiniMax", + "minimaxBaseUrl": "Punto de entrada de MiniMax", "geminiApiKey": "Clave API de Gemini", "getGroqApiKey": "Obtener clave API de Groq", "groqApiKey": "Clave API de Groq", diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index 8e754e7381e..cc4d16e2e75 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -306,6 +306,9 @@ "getZaiApiKey": "Obtenir la clé API Z AI", "zaiEntrypoint": "Point d'entrée Z AI", "zaiEntrypointDescription": "Veuillez sélectionner le point d'entrée API approprié en fonction de votre emplacement. Si vous êtes en Chine, choisissez open.bigmodel.cn. Sinon, choisissez api.z.ai.", + "minimaxApiKey": "Clé API MiniMax", + "getMiniMaxApiKey": "Obtenir la clé API MiniMax", + "minimaxBaseUrl": "Point d'entrée MiniMax", "geminiApiKey": "Clé API Gemini", "getGroqApiKey": "Obtenir la clé API Groq", "groqApiKey": "Clé API Groq", diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index 2c7f7fcbdf1..458af6f1d79 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -306,6 +306,9 @@ "getZaiApiKey": "Z AI API कुंजी प्राप्त करें", "zaiEntrypoint": "Z AI प्रवेश बिंदु", "zaiEntrypointDescription": "कृपया अपने स्थान के आधार पर उपयुक्त API प्रवेश बिंदु का चयन करें। यदि आप चीन में हैं, तो open.bigmodel.cn चुनें। अन्यथा, api.z.ai चुनें।", + "minimaxApiKey": "MiniMax API कुंजी", + "getMiniMaxApiKey": "MiniMax API कुंजी प्राप्त करें", + "minimaxBaseUrl": "MiniMax प्रवेश बिंदु", "geminiApiKey": "Gemini API कुंजी", "getGroqApiKey": "Groq API कुंजी प्राप्त करें", "groqApiKey": "Groq API कुंजी", diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index d72cfd58e58..045836918e8 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -306,6 +306,9 @@ "getZaiApiKey": "Dapatkan Kunci API Z AI", "zaiEntrypoint": "Titik Masuk Z AI", "zaiEntrypointDescription": "Silakan pilih titik masuk API yang sesuai berdasarkan lokasi Anda. Jika Anda berada di China, pilih open.bigmodel.cn. Jika tidak, pilih api.z.ai.", + "minimaxApiKey": "Kunci API MiniMax", + "getMiniMaxApiKey": "Dapatkan Kunci API MiniMax", + "minimaxBaseUrl": "Titik Masuk MiniMax", "geminiApiKey": "Gemini API Key", "getGroqApiKey": "Dapatkan Groq API Key", "groqApiKey": "Groq API Key", diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index 737f200f6a5..8b196449df6 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -307,6 +307,9 @@ "getZaiApiKey": "Ottieni chiave API Z AI", "zaiEntrypoint": "Punto di ingresso Z AI", "zaiEntrypointDescription": "Si prega di selezionare il punto di ingresso API appropriato in base alla propria posizione. Se ti trovi in Cina, scegli open.bigmodel.cn. Altrimenti, scegli api.z.ai.", + "minimaxApiKey": "Chiave API MiniMax", + "getMiniMaxApiKey": "Ottieni chiave API MiniMax", + "minimaxBaseUrl": "Punto di ingresso MiniMax", "geminiApiKey": "Chiave API Gemini", "getGroqApiKey": "Ottieni chiave API Groq", "groqApiKey": "Chiave API Groq", diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index 882183a413d..73a8e93e9ad 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -307,6 +307,9 @@ "getZaiApiKey": "Z AI APIキーを取得", "zaiEntrypoint": "Z AI エントリーポイント", "zaiEntrypointDescription": "お住まいの地域に応じて適切な API エントリーポイントを選択してください。中国にお住まいの場合は open.bigmodel.cn を選択してください。それ以外の場合は api.z.ai を選択してください。", + "minimaxApiKey": "MiniMax APIキー", + "getMiniMaxApiKey": "MiniMax APIキーを取得", + "minimaxBaseUrl": "MiniMax エントリーポイント", "geminiApiKey": "Gemini APIキー", "getGroqApiKey": "Groq APIキーを取得", "groqApiKey": "Groq APIキー", diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index b02bdad689b..5d33c1f98b4 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -306,6 +306,9 @@ "getZaiApiKey": "Z AI API 키 받기", "zaiEntrypoint": "Z AI 엔트리포인트", "zaiEntrypointDescription": "위치에 따라 적절한 API 엔트리포인트를 선택하세요. 중국에 있다면 open.bigmodel.cn을 선택하세요. 그렇지 않으면 api.z.ai를 선택하세요.", + "minimaxApiKey": "MiniMax API 키", + "getMiniMaxApiKey": "MiniMax API 키 받기", + "minimaxBaseUrl": "MiniMax 엔트리포인트", "geminiApiKey": "Gemini API 키", "getGroqApiKey": "Groq API 키 받기", "groqApiKey": "Groq API 키", diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index a40ee3100e6..337d3173db2 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -306,6 +306,9 @@ "getZaiApiKey": "Z AI API-sleutel ophalen", "zaiEntrypoint": "Z AI-ingangspunt", "zaiEntrypointDescription": "Selecteer het juiste API-ingangspunt op basis van uw locatie. Als u zich in China bevindt, kies dan open.bigmodel.cn. Anders kiest u api.z.ai.", + "minimaxApiKey": "MiniMax API-sleutel", + "getMiniMaxApiKey": "MiniMax API-sleutel ophalen", + "minimaxBaseUrl": "MiniMax-ingangspunt", "geminiApiKey": "Gemini API-sleutel", "getGroqApiKey": "Groq API-sleutel ophalen", "groqApiKey": "Groq API-sleutel", diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index c0a19078212..1810c8dc032 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -306,6 +306,9 @@ "getZaiApiKey": "Uzyskaj klucz API Z AI", "zaiEntrypoint": "Punkt wejścia Z AI", "zaiEntrypointDescription": "Wybierz odpowiedni punkt wejścia API w zależności od swojej lokalizacji. Jeśli jesteś w Chinach, wybierz open.bigmodel.cn. W przeciwnym razie wybierz api.z.ai.", + "minimaxApiKey": "Klucz API MiniMax", + "getMiniMaxApiKey": "Uzyskaj klucz API MiniMax", + "minimaxBaseUrl": "Punkt wejścia MiniMax", "geminiApiKey": "Klucz API Gemini", "getGroqApiKey": "Uzyskaj klucz API Groq", "groqApiKey": "Klucz API Groq", diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index 7f4624e8630..23b9b6436b9 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -306,6 +306,9 @@ "getZaiApiKey": "Obter chave de API Z AI", "zaiEntrypoint": "Ponto de entrada Z AI", "zaiEntrypointDescription": "Selecione o ponto de entrada da API apropriado com base na sua localização. Se você estiver na China, escolha open.bigmodel.cn. Caso contrário, escolha api.z.ai.", + "minimaxApiKey": "Chave de API MiniMax", + "getMiniMaxApiKey": "Obter chave de API MiniMax", + "minimaxBaseUrl": "Ponto de entrada MiniMax", "geminiApiKey": "Chave de API Gemini", "getGroqApiKey": "Obter chave de API Groq", "groqApiKey": "Chave de API Groq", diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index e4bd8876987..27d298fd3f4 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -306,6 +306,9 @@ "getZaiApiKey": "Получить Z AI API-ключ", "zaiEntrypoint": "Точка входа Z AI", "zaiEntrypointDescription": "Пожалуйста, выберите подходящую точку входа API в зависимости от вашего местоположения. Если вы находитесь в Китае, выберите open.bigmodel.cn. В противном случае выберите api.z.ai.", + "minimaxApiKey": "MiniMax API-ключ", + "getMiniMaxApiKey": "Получить MiniMax API-ключ", + "minimaxBaseUrl": "Точка входа MiniMax", "geminiApiKey": "Gemini API-ключ", "getGroqApiKey": "Получить Groq API-ключ", "groqApiKey": "Groq API-ключ", diff --git a/webview-ui/src/i18n/locales/th/settings.json b/webview-ui/src/i18n/locales/th/settings.json index 606dc766c1f..d8c60ccbc4d 100644 --- a/webview-ui/src/i18n/locales/th/settings.json +++ b/webview-ui/src/i18n/locales/th/settings.json @@ -308,6 +308,9 @@ "moonshotBaseUrl": "จุดเข้าใช้งาน Moonshot", "zaiEntrypoint": "จุดเข้าใช้งาน Z AI", "zaiEntrypointDescription": "โปรดเลือกจุดเข้าใช้งาน API ที่เหมาะสมตามตำแหน่งของคุณ หากคุณอยู่ในจีน ให้เลือก open.bigmodel.cn มิฉะนั้น ให้เลือก api.z.ai", + "minimaxApiKey": "คีย์ API ของ MiniMax", + "getMiniMaxApiKey": "รับคีย์ API ของ MiniMax", + "minimaxBaseUrl": "จุดเข้าใช้งาน MiniMax", "geminiApiKey": "คีย์ API ของ Gemini", "geminiParameters": { "urlContext": { diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 8f9bb5ba70d..d520b54fd41 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -307,6 +307,9 @@ "getZaiApiKey": "Z AI API Anahtarı Al", "zaiEntrypoint": "Z AI Giriş Noktası", "zaiEntrypointDescription": "Konumunuza göre uygun API giriş noktasını seçin. Çin'de iseniz open.bigmodel.cn'yi seçin. Aksi takdirde api.z.ai'yi seçin.", + "minimaxApiKey": "MiniMax API Anahtarı", + "getMiniMaxApiKey": "MiniMax API Anahtarı Al", + "minimaxBaseUrl": "MiniMax Giriş Noktası", "geminiApiKey": "Gemini API Anahtarı", "getGroqApiKey": "Groq API Anahtarı Al", "groqApiKey": "Groq API Anahtarı", diff --git a/webview-ui/src/i18n/locales/uk/settings.json b/webview-ui/src/i18n/locales/uk/settings.json index 0f4a3ce1b7a..baeeed3a0d7 100644 --- a/webview-ui/src/i18n/locales/uk/settings.json +++ b/webview-ui/src/i18n/locales/uk/settings.json @@ -314,6 +314,9 @@ "moonshotBaseUrl": "Точка входу Moonshot", "zaiEntrypoint": "Точка входу Z AI", "zaiEntrypointDescription": "Будь ласка, виберіть відповідну точку входу API залежно від вашого місцезнаходження. Якщо ви в Китаї, виберіть open.bigmodel.cn. Інакше виберіть api.z.ai.", + "minimaxApiKey": "Ключ API MiniMax", + "getMiniMaxApiKey": "Отримати ключ API MiniMax", + "minimaxBaseUrl": "Точка входу MiniMax", "geminiApiKey": "Ключ API Gemini", "geminiUrlContext": "Включати URL у контекст", "geminiUrlContextDescription": "Коли увімкнено, URL будуть включені в контекст при надсиланні запитів до Gemini", diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index de37986f5be..bd6d6ee5d8c 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -306,6 +306,9 @@ "getZaiApiKey": "Lấy khóa API Z AI", "zaiEntrypoint": "Điểm vào Z AI", "zaiEntrypointDescription": "Vui lòng chọn điểm vào API phù hợp dựa trên vị trí của bạn. Nếu bạn ở Trung Quốc, hãy chọn open.bigmodel.cn. Ngược lại, hãy chọn api.z.ai.", + "minimaxApiKey": "Khóa API MiniMax", + "getMiniMaxApiKey": "Lấy khóa API MiniMax", + "minimaxBaseUrl": "Điểm vào MiniMax", "geminiApiKey": "Khóa API Gemini", "getGroqApiKey": "Lấy khóa API Groq", "groqApiKey": "Khóa API Groq", diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index 0f200d24bd2..d929ad6913a 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -307,6 +307,9 @@ "getZaiApiKey": "获取 Z AI API 密钥", "zaiEntrypoint": "Z AI 服务站点", "zaiEntrypointDescription": "请根据您的位置选择适当的 API 服务站点。如果您在中国,请选择 open.bigmodel.cn。否则,请选择 api.z.ai。", + "minimaxApiKey": "MiniMax API 密钥", + "getMiniMaxApiKey": "获取 MiniMax API 密钥", + "minimaxBaseUrl": "MiniMax 服务站点", "geminiApiKey": "Gemini API 密钥", "getGroqApiKey": "获取 Groq API 密钥", "groqApiKey": "Groq API 密钥", diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index 8b065ee52cb..07132d9f3a1 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -307,6 +307,9 @@ "getZaiApiKey": "取得 Z AI API 金鑰", "zaiEntrypoint": "Z AI 服務端點", "zaiEntrypointDescription": "請根據您的位置選擇適當的 API 服務端點。如果您在中國,請選擇 open.bigmodel.cn。否則,請選擇 api.z.ai。", + "minimaxApiKey": "MiniMax API 金鑰", + "getMiniMaxApiKey": "取得 MiniMax API 金鑰", + "minimaxBaseUrl": "MiniMax 服務端點", "geminiApiKey": "Gemini API 金鑰", "getGroqApiKey": "取得 Groq API 金鑰", "groqApiKey": "Groq API 金鑰", From 4a9082a88851bcb21546a142a4df0f7055cc0789 Mon Sep 17 00:00:00 2001 From: xiaose Date: Sun, 19 Oct 2025 21:25:31 +0800 Subject: [PATCH 02/17] feat: fix --- webview-ui/src/components/settings/providers/MiniMax.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webview-ui/src/components/settings/providers/MiniMax.tsx b/webview-ui/src/components/settings/providers/MiniMax.tsx index b23773dcfea..07d38b12306 100644 --- a/webview-ui/src/components/settings/providers/MiniMax.tsx +++ b/webview-ui/src/components/settings/providers/MiniMax.tsx @@ -64,7 +64,7 @@ export const MiniMax = ({ apiConfiguration, setApiConfigurationField }: MiniMaxP : "https://www.minimax.io/platform/user-center/basic-information/interface-key" } appearance="secondary"> - {t("settings:providers.getMoonshotApiKey")} + {t("settings:providers.getMinimaxApiKey")} )} From 233d1c75d457addd12e8506b29ba414f8ebcd998 Mon Sep 17 00:00:00 2001 From: xiaose Date: Wed, 22 Oct 2025 17:22:11 +0800 Subject: [PATCH 03/17] feat: code --- webview-ui/src/components/settings/providers/MiniMax.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webview-ui/src/components/settings/providers/MiniMax.tsx b/webview-ui/src/components/settings/providers/MiniMax.tsx index 07d38b12306..4055be7d179 100644 --- a/webview-ui/src/components/settings/providers/MiniMax.tsx +++ b/webview-ui/src/components/settings/providers/MiniMax.tsx @@ -64,7 +64,7 @@ export const MiniMax = ({ apiConfiguration, setApiConfigurationField }: MiniMaxP : "https://www.minimax.io/platform/user-center/basic-information/interface-key" } appearance="secondary"> - {t("settings:providers.getMinimaxApiKey")} + {t("settings:providers.getMiniMaxApiKey")} )} From 60760beabce764ab86bf8f1ba021578686d8acbb Mon Sep 17 00:00:00 2001 From: xiaose Date: Wed, 22 Oct 2025 18:21:33 +0800 Subject: [PATCH 04/17] feat: format code --- src/api/providers/__tests__/minimax.spec.ts | 497 +++++++++----------- src/api/providers/minimax.ts | 38 +- 2 files changed, 220 insertions(+), 315 deletions(-) diff --git a/src/api/providers/__tests__/minimax.spec.ts b/src/api/providers/__tests__/minimax.spec.ts index 69bb53c361d..0a7c375db4f 100644 --- a/src/api/providers/__tests__/minimax.spec.ts +++ b/src/api/providers/__tests__/minimax.spec.ts @@ -1,119 +1,48 @@ -// Mocks must come first, before imports -const mockCreate = vi.fn() -vi.mock("openai", () => { - return { - __esModule: true, - default: vi.fn().mockImplementation(() => ({ - chat: { - completions: { - create: mockCreate.mockImplementation(async (options) => { - if (!options.stream) { - return { - id: "test-completion", - choices: [ - { - message: { role: "assistant", content: "Test response", refusal: null }, - finish_reason: "stop", - index: 0, - }, - ], - usage: { - prompt_tokens: 10, - completion_tokens: 5, - total_tokens: 15, - cached_tokens: 2, - }, - } - } - - // Return async iterator for streaming - return { - [Symbol.asyncIterator]: async function* () { - yield { - choices: [ - { - delta: { content: "Test response" }, - index: 0, - }, - ], - usage: null, - } - yield { - choices: [ - { - delta: {}, - index: 0, - }, - ], - usage: { - prompt_tokens: 10, - completion_tokens: 5, - total_tokens: 15, - cached_tokens: 2, - }, - } - }, - } - }), - }, - }, - })), - } -}) +// npx vitest run src/api/providers/__tests__/minimax.spec.ts + +// kilocode_change start +vitest.mock("vscode", () => ({ + workspace: { + getConfiguration: vitest.fn().mockReturnValue({ + get: vitest.fn().mockReturnValue(600), // Default timeout in seconds + }), + }, +})) +// kilocode_change end import OpenAI from "openai" -import type { Anthropic } from "@anthropic-ai/sdk" +import { Anthropic } from "@anthropic-ai/sdk" -import { minimaxDefaultModelId } from "@roo-code/types" - -import type { ApiHandlerOptions } from "../../../shared/api" +import { type MinimaxModelId, minimaxDefaultModelId, minimaxModels } from "@roo-code/types" import { MiniMaxHandler } from "../minimax" +vitest.mock("openai", () => { + const createMock = vitest.fn() + return { + default: vitest.fn(() => ({ chat: { completions: { create: createMock } } })), + } +}) + describe("MiniMaxHandler", () => { let handler: MiniMaxHandler - let mockOptions: ApiHandlerOptions + let mockCreate: any beforeEach(() => { - mockOptions = { - minimaxApiKey: "test-api-key", - apiModelId: "MiniMax-M1", - minimaxBaseUrl: "https://api.minimax.io/v1", - } - handler = new MiniMaxHandler(mockOptions) - vi.clearAllMocks() + vitest.clearAllMocks() + mockCreate = (OpenAI as unknown as any)().chat.completions.create }) - describe("constructor", () => { - it("should initialize with provided options", () => { - expect(handler).toBeInstanceOf(MiniMaxHandler) - expect(handler.getModel().id).toBe(mockOptions.apiModelId) - }) - - it.skip("should throw error if API key is missing", () => { - expect(() => { - new MiniMaxHandler({ - ...mockOptions, - minimaxApiKey: undefined, - }) - }).toThrow("MiniMax API key is required") - }) - - it("should use default model ID if not provided", () => { - const handlerWithoutModel = new MiniMaxHandler({ - ...mockOptions, - apiModelId: undefined, + describe("International MiniMax (default)", () => { + beforeEach(() => { + handler = new MiniMaxHandler({ + minimaxApiKey: "test-minimax-api-key", + minimaxBaseUrl: "https://api.minimax.io/v1", }) - expect(handlerWithoutModel.getModel().id).toBe(minimaxDefaultModelId) }) - it("should use default base URL if not provided", () => { - const handlerWithoutBaseUrl = new MiniMaxHandler({ - ...mockOptions, - minimaxBaseUrl: undefined, - }) - expect(handlerWithoutBaseUrl).toBeInstanceOf(MiniMaxHandler) - // The base URL is passed to OpenAI client internally + it("should use the correct international MiniMax base URL by default", () => { + new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" }) expect(OpenAI).toHaveBeenCalledWith( expect.objectContaining({ baseURL: "https://api.minimax.io/v1", @@ -121,239 +50,235 @@ describe("MiniMaxHandler", () => { ) }) - it("should use chinese base URL if provided", () => { - const customBaseUrl = "https://api.minimax.io/v1" - const handlerWithCustomUrl = new MiniMaxHandler({ - ...mockOptions, - minimaxBaseUrl: customBaseUrl, + it("should use the provided API key", () => { + const minimaxApiKey = "test-minimax-api-key" + new MiniMaxHandler({ minimaxApiKey }) + expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: minimaxApiKey })) + }) + + it("should return default model when no model is specified", () => { + const model = handler.getModel() + expect(model.id).toBe(minimaxDefaultModelId) + expect(model.info).toEqual(minimaxModels[minimaxDefaultModelId]) + }) + + it("should return specified model when valid model is provided", () => { + const testModelId: MinimaxModelId = "MiniMax-M1" + const handlerWithModel = new MiniMaxHandler({ + apiModelId: testModelId, + minimaxApiKey: "test-minimax-api-key", }) - expect(handlerWithCustomUrl).toBeInstanceOf(MiniMaxHandler) - // The custom base URL is passed to OpenAI client - expect(OpenAI).toHaveBeenCalledWith( - expect.objectContaining({ - baseURL: customBaseUrl, - }), - ) + const model = handlerWithModel.getModel() + expect(model.id).toBe(testModelId) + expect(model.info).toEqual(minimaxModels[testModelId]) }) - it("should set includeMaxTokens to true", () => { - // Create a new handler and verify OpenAI client was called with includeMaxTokens - const _handler = new MiniMaxHandler(mockOptions) - expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: mockOptions.minimaxApiKey })) + it("should return MiniMax-M1 model with correct configuration", () => { + const testModelId: MinimaxModelId = "MiniMax-M1" + const handlerWithModel = new MiniMaxHandler({ + apiModelId: testModelId, + minimaxApiKey: "test-minimax-api-key", + }) + const model = handlerWithModel.getModel() + expect(model.id).toBe(testModelId) + expect(model.info).toEqual(minimaxModels[testModelId]) + expect(model.info.contextWindow).toBe(1_000_192) + expect(model.info.maxTokens).toBe(25_600) + expect(model.info.supportsPromptCache).toBe(false) }) }) - describe("getModel", () => { - it("should return model info for valid model ID", () => { - const model = handler.getModel() - expect(model.id).toBe(mockOptions.apiModelId) - expect(model.info).toBeDefined() - expect(model.info.maxTokens).toBe(16384) - expect(model.info.contextWindow).toBe(262144) - expect(model.info.supportsImages).toBe(false) - expect(model.info.supportsPromptCache).toBe(true) // Should be true now + describe("China MiniMax", () => { + beforeEach(() => { + handler = new MiniMaxHandler({ + minimaxApiKey: "test-minimax-api-key", + minimaxBaseUrl: "https://api.minimaxi.com/v1", + }) }) - it("should return provided model ID with default model info if model does not exist", () => { - const handlerWithInvalidModel = new MiniMaxHandler({ - ...mockOptions, - apiModelId: "invalid-model", + it("should use the correct China MiniMax base URL", () => { + new MiniMaxHandler({ + minimaxApiKey: "test-minimax-api-key", + minimaxBaseUrl: "https://api.minimaxi.com/v1", }) - const model = handlerWithInvalidModel.getModel() - expect(model.id).toBe("invalid-model") // Returns provided ID - expect(model.info).toBeDefined() - // With the current implementation, it's the same object reference when using default model info - expect(model.info).toBe(handler.getModel().info) - // Should have the same base properties - expect(model.info.contextWindow).toBe(handler.getModel().info.contextWindow) - // And should have supportsPromptCache set to true - expect(model.info.supportsPromptCache).toBe(true) + expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ baseURL: "https://api.minimaxi.com/v1" })) }) - it("should return default model if no model ID is provided", () => { - const handlerWithoutModel = new MiniMaxHandler({ - ...mockOptions, - apiModelId: undefined, - }) - const model = handlerWithoutModel.getModel() - expect(model.id).toBe(minimaxDefaultModelId) - expect(model.info).toBeDefined() - expect(model.info.supportsPromptCache).toBe(true) + it("should use the provided API key for China", () => { + const minimaxApiKey = "test-minimax-api-key" + new MiniMaxHandler({ minimaxApiKey, minimaxBaseUrl: "https://api.minimaxi.com/v1" }) + expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: minimaxApiKey })) }) - it("should include model parameters from getModelParams", () => { + it("should return default model when no model is specified", () => { const model = handler.getModel() - expect(model).toHaveProperty("temperature") - expect(model).toHaveProperty("maxTokens") + expect(model.id).toBe(minimaxDefaultModelId) + expect(model.info).toEqual(minimaxModels[minimaxDefaultModelId]) }) }) - describe("createMessage", () => { - const systemPrompt = "You are a helpful assistant." - const messages: Anthropic.Messages.MessageParam[] = [ - { - role: "user", - content: [ - { - type: "text" as const, - text: "Hello!", - }, - ], - }, - ] - - it("should handle streaming responses", async () => { - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - expect(chunks.length).toBeGreaterThan(0) - const textChunks = chunks.filter((chunk) => chunk.type === "text") - expect(textChunks).toHaveLength(1) - expect(textChunks[0].text).toBe("Test response") + describe("Default behavior", () => { + it("should default to international base URL when none is specified", () => { + const handlerDefault = new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" }) + expect(OpenAI).toHaveBeenCalledWith( + expect.objectContaining({ + baseURL: "https://api.minimax.io/v1", + }), + ) + + const model = handlerDefault.getModel() + expect(model.id).toBe(minimaxDefaultModelId) + expect(model.info).toEqual(minimaxModels[minimaxDefaultModelId]) }) - it("should include usage information", async () => { - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - const usageChunks = chunks.filter((chunk) => chunk.type === "usage") - expect(usageChunks.length).toBeGreaterThan(0) - expect(usageChunks[0].inputTokens).toBe(10) - expect(usageChunks[0].outputTokens).toBe(5) + it("should use undefined as default API key when none is specified", () => { + new MiniMaxHandler({}) + expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: undefined })) }) - it("should include cache metrics in usage information", async () => { - const stream = handler.createMessage(systemPrompt, messages) - const chunks: any[] = [] - for await (const chunk of stream) { - chunks.push(chunk) - } - - const usageChunks = chunks.filter((chunk) => chunk.type === "usage") - expect(usageChunks.length).toBeGreaterThan(0) - expect(usageChunks[0].cacheWriteTokens).toBe(0) - expect(usageChunks[0].cacheReadTokens).toBe(2) + it("should default to MiniMax-M1 model", () => { + const handlerDefault = new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" }) + const model = handlerDefault.getModel() + expect(model.id).toBe("MiniMax-M1") }) }) - describe("processUsageMetrics", () => { - it("should correctly process usage metrics including cache information", () => { - // We need to access the protected method, so we'll create a test subclass - class TestMiniMaxHandler extends MiniMaxHandler { - public testProcessUsageMetrics(usage: any) { - return this.processUsageMetrics(usage) - } - } - - const testHandler = new TestMiniMaxHandler(mockOptions) - - const usage = { - prompt_tokens: 100, - completion_tokens: 50, - total_tokens: 150, - cached_tokens: 20, - } + describe("API Methods", () => { + beforeEach(() => { + handler = new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" }) + }) - const result = testHandler.testProcessUsageMetrics(usage) + it("completePrompt method should return text from MiniMax API", async () => { + const expectedResponse = "This is a test response from MiniMax" + mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: expectedResponse } }] }) + const result = await handler.completePrompt("test prompt") + expect(result).toBe(expectedResponse) + }) - expect(result.type).toBe("usage") - expect(result.inputTokens).toBe(100) - expect(result.outputTokens).toBe(50) - expect(result.cacheWriteTokens).toBe(0) - expect(result.cacheReadTokens).toBe(20) + it("should handle errors in completePrompt", async () => { + const errorMessage = "MiniMax API error" + mockCreate.mockRejectedValueOnce(new Error(errorMessage)) + await expect(handler.completePrompt("test prompt")).rejects.toThrow() }) - it("should handle missing cache metrics gracefully", () => { - class TestMiniMaxHandler extends MiniMaxHandler { - public testProcessUsageMetrics(usage: any) { - return this.processUsageMetrics(usage) + it("createMessage should yield text content from stream", async () => { + const testContent = "This is test content from MiniMax stream" + + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + next: vitest + .fn() + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { content: testContent } }] }, + }) + .mockResolvedValueOnce({ done: true }), + }), } - } - - const testHandler = new TestMiniMaxHandler(mockOptions) - - const usage = { - prompt_tokens: 100, - completion_tokens: 50, - total_tokens: 150, - // No cached_tokens - } + }) - const result = testHandler.testProcessUsageMetrics(usage) + const stream = handler.createMessage("system prompt", []) + const firstChunk = await stream.next() - expect(result.type).toBe("usage") - expect(result.inputTokens).toBe(100) - expect(result.outputTokens).toBe(50) - expect(result.cacheWriteTokens).toBe(0) - expect(result.cacheReadTokens).toBeUndefined() + expect(firstChunk.done).toBe(false) + expect(firstChunk.value).toEqual({ type: "text", text: testContent }) }) - }) - describe("addMaxTokensIfNeeded", () => { - it("should always add max_tokens regardless of includeMaxTokens option", () => { - // Create a test subclass to access the protected method - class TestMiniMaxHandler extends MiniMaxHandler { - public testAddMaxTokensIfNeeded(requestOptions: any, modelInfo: any) { - this.addMaxTokensIfNeeded(requestOptions, modelInfo) + it("createMessage should yield usage data from stream", async () => { + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + next: vitest + .fn() + .mockResolvedValueOnce({ + done: false, + value: { + choices: [{ delta: {} }], + usage: { prompt_tokens: 10, completion_tokens: 20 }, + }, + }) + .mockResolvedValueOnce({ done: true }), + }), } - } - - const testHandler = new TestMiniMaxHandler(mockOptions) - const requestOptions: any = {} - const modelInfo = { - maxTokens: 32_000, - } + }) - // Test with includeMaxTokens set to false - should still add max tokens - testHandler.testAddMaxTokensIfNeeded(requestOptions, modelInfo) + const stream = handler.createMessage("system prompt", []) + const firstChunk = await stream.next() - expect(requestOptions.max_tokens).toBe(32_000) + expect(firstChunk.done).toBe(false) + expect(firstChunk.value).toEqual({ type: "usage", inputTokens: 10, outputTokens: 20 }) }) - it("should use modelMaxTokens when provided", () => { - class TestMiniMaxHandler extends MiniMaxHandler { - public testAddMaxTokensIfNeeded(requestOptions: any, modelInfo: any) { - this.addMaxTokensIfNeeded(requestOptions, modelInfo) - } - } + it("createMessage should pass correct parameters to MiniMax client", async () => { + const modelId: MinimaxModelId = "MiniMax-M1" + const modelInfo = minimaxModels[modelId] + const handlerWithModel = new MiniMaxHandler({ + apiModelId: modelId, + minimaxApiKey: "test-minimax-api-key", + }) - const customMaxTokens = 5000 - const testHandler = new TestMiniMaxHandler({ - ...mockOptions, - modelMaxTokens: customMaxTokens, + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + async next() { + return { done: true } + }, + }), + } }) - const requestOptions: any = {} - const modelInfo = { - maxTokens: 32_000, - } - testHandler.testAddMaxTokensIfNeeded(requestOptions, modelInfo) + const systemPrompt = "Test system prompt for MiniMax" + const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message for MiniMax" }] + + const messageGenerator = handlerWithModel.createMessage(systemPrompt, messages) + await messageGenerator.next() - expect(requestOptions.max_tokens).toBe(customMaxTokens) + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + model: modelId, + max_tokens: modelInfo.maxTokens, + temperature: 0, + messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]), + stream: true, + stream_options: { include_usage: true }, + }), + undefined, + ) }) - it("should fall back to modelInfo.maxTokens when modelMaxTokens is not provided", () => { - class TestMiniMaxHandler extends MiniMaxHandler { - public testAddMaxTokensIfNeeded(requestOptions: any, modelInfo: any) { - this.addMaxTokensIfNeeded(requestOptions, modelInfo) + it("should use temperature 0 by default", async () => { + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + async next() { + return { done: true } + }, + }), } - } + }) - const testHandler = new TestMiniMaxHandler(mockOptions) - const requestOptions: any = {} - const modelInfo = { - maxTokens: 16_000, - } + const messageGenerator = handler.createMessage("test", []) + await messageGenerator.next() - testHandler.testAddMaxTokensIfNeeded(requestOptions, modelInfo) + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + temperature: 0, + }), + undefined, + ) + }) + }) - expect(requestOptions.max_tokens).toBe(16_000) + describe("Model Configuration", () => { + it("should correctly configure MiniMax-M1 model properties", () => { + const model = minimaxModels["MiniMax-M1"] + expect(model.maxTokens).toBe(25_600) + expect(model.contextWindow).toBe(1_000_192) + expect(model.supportsImages).toBe(false) + expect(model.supportsPromptCache).toBe(false) + expect(model.inputPrice).toBe(0.4) + expect(model.outputPrice).toBe(2.2) }) }) }) diff --git a/src/api/providers/minimax.ts b/src/api/providers/minimax.ts index 1dafe99184b..c6895f16f56 100644 --- a/src/api/providers/minimax.ts +++ b/src/api/providers/minimax.ts @@ -1,39 +1,19 @@ -import { minimaxModels, minimaxDefaultModelId } from "@roo-code/types" +import { type MinimaxModelId, minimaxDefaultModelId, minimaxModels } from "@roo-code/types" import type { ApiHandlerOptions } from "../../shared/api" -import type { ApiStreamUsageChunk } from "../transform/stream" -import { getModelParams } from "../transform/model-params" +import { BaseOpenAiCompatibleProvider } from "./base-openai-compatible-provider" -import { OpenAiHandler } from "./openai" - -export class MiniMaxHandler extends OpenAiHandler { +export class MiniMaxHandler extends BaseOpenAiCompatibleProvider { constructor(options: ApiHandlerOptions) { super({ ...options, - openAiApiKey: options.minimaxApiKey ?? "not-provided", - openAiModelId: options.apiModelId ?? minimaxDefaultModelId, - openAiBaseUrl: options.minimaxBaseUrl ?? "https://api.minimax.io/v1", - openAiStreamingEnabled: true, - includeMaxTokens: true, + providerName: "MiniMax", + baseURL: options.minimaxBaseUrl ?? "https://api.minimax.io/v1", + apiKey: options.minimaxApiKey, + defaultProviderModelId: minimaxDefaultModelId, + providerModels: minimaxModels, + defaultTemperature: 0, }) } - - override getModel() { - const id = this.options.apiModelId ?? minimaxDefaultModelId - const info = minimaxModels[id as keyof typeof minimaxModels] || minimaxModels[minimaxDefaultModelId] - const params = getModelParams({ format: "openai", modelId: id, model: info, settings: this.options }) - return { id, info, ...params } - } - - // Override to handle Minimax's usage metrics, including caching. - protected override processUsageMetrics(usage: any): ApiStreamUsageChunk { - return { - type: "usage", - inputTokens: usage?.prompt_tokens || 0, - outputTokens: usage?.completion_tokens || 0, - cacheWriteTokens: usage?.prompt_tokens_details?.cache_miss_tokens, - cacheReadTokens: usage?.prompt_tokens_details?.cached_tokens, - } - } } From 849e3ba90fd872ec9e462ca273960b06e6a4260b Mon Sep 17 00:00:00 2001 From: xiaose Date: Wed, 22 Oct 2025 19:15:29 +0800 Subject: [PATCH 05/17] feat: code --- cli/src/constants/providers/validation.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/cli/src/constants/providers/validation.ts b/cli/src/constants/providers/validation.ts index 1637a081a07..9b6101cc79d 100644 --- a/cli/src/constants/providers/validation.ts +++ b/cli/src/constants/providers/validation.ts @@ -44,4 +44,5 @@ export const PROVIDER_REQUIRED_FIELDS: Record = { vertex: [], // Has special validation logic (either/or fields) "vscode-lm": [], // Has nested object validation "virtual-quota-fallback": [], // Has array validation + minimax: ["minimaxBaseUrl", "minimaxApiKey", "apiModelId"], } From cf49f2e15edf6be6774ee7a51648b4f09535682a Mon Sep 17 00:00:00 2001 From: xiaose Date: Thu, 23 Oct 2025 12:53:17 +0800 Subject: [PATCH 06/17] feat: url --- src/api/providers/minimax.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/providers/minimax.ts b/src/api/providers/minimax.ts index c6895f16f56..8a8e8c14e5b 100644 --- a/src/api/providers/minimax.ts +++ b/src/api/providers/minimax.ts @@ -13,7 +13,7 @@ export class MiniMaxHandler extends BaseOpenAiCompatibleProvider apiKey: options.minimaxApiKey, defaultProviderModelId: minimaxDefaultModelId, providerModels: minimaxModels, - defaultTemperature: 0, + defaultTemperature: 1.0, }) } } From bf9cdd7f4b2d6d1049dfb0c025ddef394fea2b49 Mon Sep 17 00:00:00 2001 From: xiaose Date: Thu, 23 Oct 2025 14:30:03 +0800 Subject: [PATCH 07/17] feat: format code --- packages/types/src/providers/minimax.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/types/src/providers/minimax.ts b/packages/types/src/providers/minimax.ts index 1337d64c3b2..cef8c19caf3 100644 --- a/packages/types/src/providers/minimax.ts +++ b/packages/types/src/providers/minimax.ts @@ -19,4 +19,4 @@ export const minimaxModels = { }, } as const satisfies Record -export const MINIMAX_DEFAULT_TEMPERATURE = 0 +export const MINIMAX_DEFAULT_TEMPERATURE = 1.0 From 7b347069f424582f7982eeaf39a4b8d3d54ae5ab Mon Sep 17 00:00:00 2001 From: xiaose Date: Thu, 23 Oct 2025 15:56:11 +0800 Subject: [PATCH 08/17] feat: param --- packages/types/src/providers/minimax.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/types/src/providers/minimax.ts b/packages/types/src/providers/minimax.ts index cef8c19caf3..0a977491fdb 100644 --- a/packages/types/src/providers/minimax.ts +++ b/packages/types/src/providers/minimax.ts @@ -8,8 +8,8 @@ export const minimaxDefaultModelId: MinimaxModelId = "MiniMax-M1" export const minimaxModels = { "MiniMax-M1": { - maxTokens: 25_600, - contextWindow: 1_000_192, + maxTokens: 40_000, + contextWindow: 1_000_000, supportsImages: false, supportsPromptCache: false, inputPrice: 0.4, From c9dd44a83d0038a8b8e0323c725f1c0c900b8fdf Mon Sep 17 00:00:00 2001 From: xiaose Date: Sat, 25 Oct 2025 11:57:27 +0800 Subject: [PATCH 09/17] feat: code --- cli/src/constants/providers/models.ts | 2 +- cli/src/constants/providers/settings.ts | 2 +- packages/types/src/providers/minimax.ts | 12 +++++++++++- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/cli/src/constants/providers/models.ts b/cli/src/constants/providers/models.ts index 546b08401e8..13a87df12fe 100644 --- a/cli/src/constants/providers/models.ts +++ b/cli/src/constants/providers/models.ts @@ -241,7 +241,7 @@ export const DEFAULT_MODEL_IDS: Partial> = { sambanova: sambaNovaDefaultModelId, featherless: featherlessDefaultModelId, deepinfra: "deepseek-ai/DeepSeek-R1-0528", - minimax: "MiniMax-M1", + minimax: "MiniMax-M2", zai: internationalZAiDefaultModelId, roo: rooDefaultModelId, "gemini-cli": geminiCliDefaultModelId, diff --git a/cli/src/constants/providers/settings.ts b/cli/src/constants/providers/settings.ts index 597fefdfef5..da69e7d42c2 100644 --- a/cli/src/constants/providers/settings.ts +++ b/cli/src/constants/providers/settings.ts @@ -841,7 +841,7 @@ export const PROVIDER_DEFAULT_MODELS: Record = { "vercel-ai-gateway": "gpt-4o", "virtual-quota-fallback": "gpt-4o", "human-relay": "human", - minimax: "MiniMax-M1", + minimax: "MiniMax-M2", "fake-ai": "fake-model", } diff --git a/packages/types/src/providers/minimax.ts b/packages/types/src/providers/minimax.ts index 0a977491fdb..0a95f16119f 100644 --- a/packages/types/src/providers/minimax.ts +++ b/packages/types/src/providers/minimax.ts @@ -4,9 +4,19 @@ import type { ModelInfo } from "../model.js" // https://www.minimax.io/platform/document/text_api_intro // https://www.minimax.io/platform/document/pricing export type MinimaxModelId = keyof typeof minimaxModels -export const minimaxDefaultModelId: MinimaxModelId = "MiniMax-M1" +export const minimaxDefaultModelId: MinimaxModelId = "MiniMax-M2" export const minimaxModels = { + "MiniMax-M2": { + maxTokens: 128_000, + contextWindow: 192_000, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0.3, + outputPrice: 1.2, + cacheWritesPrice: 0, + cacheReadsPrice: 0, + }, "MiniMax-M1": { maxTokens: 40_000, contextWindow: 1_000_000, From 0c8d4180d24f3423c1e949f7bc3297f55ea85540 Mon Sep 17 00:00:00 2001 From: xiaose Date: Sat, 25 Oct 2025 16:43:26 +0800 Subject: [PATCH 10/17] feat: m2 --- packages/types/src/providers/minimax.ts | 10 ---------- src/api/providers/__tests__/minimax.spec.ts | 16 ++++++++-------- 2 files changed, 8 insertions(+), 18 deletions(-) diff --git a/packages/types/src/providers/minimax.ts b/packages/types/src/providers/minimax.ts index 0a95f16119f..a01f45e3bac 100644 --- a/packages/types/src/providers/minimax.ts +++ b/packages/types/src/providers/minimax.ts @@ -17,16 +17,6 @@ export const minimaxModels = { cacheWritesPrice: 0, cacheReadsPrice: 0, }, - "MiniMax-M1": { - maxTokens: 40_000, - contextWindow: 1_000_000, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.4, - outputPrice: 2.2, - cacheWritesPrice: 0, - cacheReadsPrice: 0, - }, } as const satisfies Record export const MINIMAX_DEFAULT_TEMPERATURE = 1.0 diff --git a/src/api/providers/__tests__/minimax.spec.ts b/src/api/providers/__tests__/minimax.spec.ts index 0a7c375db4f..cecd9546076 100644 --- a/src/api/providers/__tests__/minimax.spec.ts +++ b/src/api/providers/__tests__/minimax.spec.ts @@ -63,7 +63,7 @@ describe("MiniMaxHandler", () => { }) it("should return specified model when valid model is provided", () => { - const testModelId: MinimaxModelId = "MiniMax-M1" + const testModelId: MinimaxModelId = "MiniMax-M2" const handlerWithModel = new MiniMaxHandler({ apiModelId: testModelId, minimaxApiKey: "test-minimax-api-key", @@ -73,8 +73,8 @@ describe("MiniMaxHandler", () => { expect(model.info).toEqual(minimaxModels[testModelId]) }) - it("should return MiniMax-M1 model with correct configuration", () => { - const testModelId: MinimaxModelId = "MiniMax-M1" + it("should return MiniMax-M2 model with correct configuration", () => { + const testModelId: MinimaxModelId = "MiniMax-M2" const handlerWithModel = new MiniMaxHandler({ apiModelId: testModelId, minimaxApiKey: "test-minimax-api-key", @@ -136,10 +136,10 @@ describe("MiniMaxHandler", () => { expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: undefined })) }) - it("should default to MiniMax-M1 model", () => { + it("should default to MiniMax-M2 model", () => { const handlerDefault = new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" }) const model = handlerDefault.getModel() - expect(model.id).toBe("MiniMax-M1") + expect(model.id).toBe("MiniMax-M2") }) }) @@ -211,7 +211,7 @@ describe("MiniMaxHandler", () => { }) it("createMessage should pass correct parameters to MiniMax client", async () => { - const modelId: MinimaxModelId = "MiniMax-M1" + const modelId: MinimaxModelId = "MiniMax-M2" const modelInfo = minimaxModels[modelId] const handlerWithModel = new MiniMaxHandler({ apiModelId: modelId, @@ -271,8 +271,8 @@ describe("MiniMaxHandler", () => { }) describe("Model Configuration", () => { - it("should correctly configure MiniMax-M1 model properties", () => { - const model = minimaxModels["MiniMax-M1"] + it("should correctly configure MiniMax-M2 model properties", () => { + const model = minimaxModels["MiniMax-M2"] expect(model.maxTokens).toBe(25_600) expect(model.contextWindow).toBe(1_000_192) expect(model.supportsImages).toBe(false) From a1967ac8bc1f630210ecdf9b177c3dd227b81990 Mon Sep 17 00:00:00 2001 From: xiaose Date: Sat, 25 Oct 2025 17:21:04 +0800 Subject: [PATCH 11/17] feat: m2 --- src/api/providers/index.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/api/providers/index.ts b/src/api/providers/index.ts index c778f29d30b..46454004c83 100644 --- a/src/api/providers/index.ts +++ b/src/api/providers/index.ts @@ -1,4 +1,3 @@ -import { MiniMaxHandler } from "./minimax" export { AnthropicVertexHandler } from "./anthropic-vertex" export { AnthropicHandler } from "./anthropic" export { AwsBedrockHandler } from "./bedrock" From a1b7387fd595c2588bb60cf7dd2d438895a1ba44 Mon Sep 17 00:00:00 2001 From: xiaose Date: Sat, 25 Oct 2025 19:06:46 +0800 Subject: [PATCH 12/17] feat: code --- packages/types/src/index.ts | 5 ++--- src/api/providers/__tests__/minimax.spec.ts | 23 ++++++++------------- 2 files changed, 11 insertions(+), 17 deletions(-) diff --git a/packages/types/src/index.ts b/packages/types/src/index.ts index dccde7afee4..60b514bc4ae 100644 --- a/packages/types/src/index.ts +++ b/packages/types/src/index.ts @@ -24,6 +24,5 @@ export * from "./type-fu.js" export * from "./vscode.js" export * from "./kilocode/kilocode.js" export * from "./kilocode/native-function-calling.js" -export * from "./usage-tracker.js" // kilocode_change - -export * from "./providers/index.js" +export * from "./usage-tracker.js" +export * from "./providers/index.js" // kilocode_change diff --git a/src/api/providers/__tests__/minimax.spec.ts b/src/api/providers/__tests__/minimax.spec.ts index cecd9546076..c22ecd5af3a 100644 --- a/src/api/providers/__tests__/minimax.spec.ts +++ b/src/api/providers/__tests__/minimax.spec.ts @@ -82,8 +82,8 @@ describe("MiniMaxHandler", () => { const model = handlerWithModel.getModel() expect(model.id).toBe(testModelId) expect(model.info).toEqual(minimaxModels[testModelId]) - expect(model.info.contextWindow).toBe(1_000_192) - expect(model.info.maxTokens).toBe(25_600) + expect(model.info.contextWindow).toBe(192_000) + expect(model.info.maxTokens).toBe(128_000) expect(model.info.supportsPromptCache).toBe(false) }) }) @@ -131,11 +131,6 @@ describe("MiniMaxHandler", () => { expect(model.info).toEqual(minimaxModels[minimaxDefaultModelId]) }) - it("should use undefined as default API key when none is specified", () => { - new MiniMaxHandler({}) - expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: undefined })) - }) - it("should default to MiniMax-M2 model", () => { const handlerDefault = new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" }) const model = handlerDefault.getModel() @@ -238,7 +233,7 @@ describe("MiniMaxHandler", () => { expect.objectContaining({ model: modelId, max_tokens: modelInfo.maxTokens, - temperature: 0, + temperature: 1, messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]), stream: true, stream_options: { include_usage: true }, @@ -247,7 +242,7 @@ describe("MiniMaxHandler", () => { ) }) - it("should use temperature 0 by default", async () => { + it("should use temperature 1 by default", async () => { mockCreate.mockImplementationOnce(() => { return { [Symbol.asyncIterator]: () => ({ @@ -263,7 +258,7 @@ describe("MiniMaxHandler", () => { expect(mockCreate).toHaveBeenCalledWith( expect.objectContaining({ - temperature: 0, + temperature: 1, }), undefined, ) @@ -273,12 +268,12 @@ describe("MiniMaxHandler", () => { describe("Model Configuration", () => { it("should correctly configure MiniMax-M2 model properties", () => { const model = minimaxModels["MiniMax-M2"] - expect(model.maxTokens).toBe(25_600) - expect(model.contextWindow).toBe(1_000_192) + expect(model.maxTokens).toBe(128_000) + expect(model.contextWindow).toBe(192_000) expect(model.supportsImages).toBe(false) expect(model.supportsPromptCache).toBe(false) - expect(model.inputPrice).toBe(0.4) - expect(model.outputPrice).toBe(2.2) + expect(model.inputPrice).toBe(0.3) + expect(model.outputPrice).toBe(1.2) }) }) }) From 047df331dc334eb23c08bc246cd251c867b155d0 Mon Sep 17 00:00:00 2001 From: xiaose Date: Sat, 25 Oct 2025 19:19:42 +0800 Subject: [PATCH 13/17] feat: code --- packages/types/src/index.ts | 5 +++-- .../src/components/kilocode/hooks/useProviderModels.ts | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/packages/types/src/index.ts b/packages/types/src/index.ts index 60b514bc4ae..dccde7afee4 100644 --- a/packages/types/src/index.ts +++ b/packages/types/src/index.ts @@ -24,5 +24,6 @@ export * from "./type-fu.js" export * from "./vscode.js" export * from "./kilocode/kilocode.js" export * from "./kilocode/native-function-calling.js" -export * from "./usage-tracker.js" -export * from "./providers/index.js" // kilocode_change +export * from "./usage-tracker.js" // kilocode_change + +export * from "./providers/index.js" diff --git a/webview-ui/src/components/kilocode/hooks/useProviderModels.ts b/webview-ui/src/components/kilocode/hooks/useProviderModels.ts index 66d0c0f7938..26c7ae75408 100644 --- a/webview-ui/src/components/kilocode/hooks/useProviderModels.ts +++ b/webview-ui/src/components/kilocode/hooks/useProviderModels.ts @@ -49,9 +49,9 @@ import { deepInfraDefaultModelId, cerebrasModels, cerebrasDefaultModelId, - ovhCloudAiEndpointsDefaultModelId, + ovhCloudAiEndpointsDefaultModelId, // kilocode_change minimaxModels, - minimaxDefaultModelId, // kilocode_change + minimaxDefaultModelId, } from "@roo-code/types" import type { ModelRecord, RouterModels } from "@roo/api" import { useRouterModels } from "../../ui/hooks/useRouterModels" From 77139b82aae0595981cefacd4bbbdabf9799e4f7 Mon Sep 17 00:00:00 2001 From: xiaose Date: Sat, 25 Oct 2025 20:07:19 +0800 Subject: [PATCH 14/17] feat: code --- cli/src/config/schema.json | 63 +++++++++++++++++++ .../providers/__tests__/models.test.ts | 1 + cli/src/constants/providers/models.ts | 7 +++ src/shared/ProfileValidator.ts | 1 + 4 files changed, 72 insertions(+) diff --git a/cli/src/config/schema.json b/cli/src/config/schema.json index 78d8f3ae49e..fa3791374e8 100644 --- a/cli/src/config/schema.json +++ b/cli/src/config/schema.json @@ -1208,6 +1208,27 @@ } } }, + { + "if": { + "properties": { "provider": { "const": "minimax" } } + }, + "then": { + "properties": { + "minimaxBaseUrl": { + "type": "string", + "description": "MiniMax base URL" + }, + "minimaxApiKey": { + "type": "string", + "description": "MiniMax API key" + }, + "apiModelId": { + "type": "string", + "description": "MiniMax model ID" + } + } + } + }, { "if": { "properties": { "provider": { "const": "doubao" } } @@ -1295,6 +1316,48 @@ } } }, + { + "if": { + "properties": { + "provider": { "const": "minimax" }, + "minimaxBaseUrl": { "type": "string", "minLength": 1 } + }, + "required": ["minimaxBaseUrl"] + }, + "then": { + "properties": { + "minimaxBaseUrl": { "minLength": 1 } + } + } + }, + { + "if": { + "properties": { + "provider": { "const": "minimax" }, + "minimaxApiKey": { "type": "string", "minLength": 1 } + }, + "required": ["minimaxApiKey"] + }, + "then": { + "properties": { + "minimaxApiKey": { "minLength": 10 } + } + } + }, + { + "if": { + "properties": { + "provider": { "const": "minimax" }, + "apiModelId": { "type": "string", "minLength": 1 } + }, + "required": ["apiModelId"] + }, + "then": { + "properties": { + "apiModelId": { "minLength": 1 } + } + } + }, { "if": { "properties": { "provider": { "const": "chutes" } } diff --git a/cli/src/constants/providers/__tests__/models.test.ts b/cli/src/constants/providers/__tests__/models.test.ts index b6b5dabaedc..b126f9b3ace 100644 --- a/cli/src/constants/providers/__tests__/models.test.ts +++ b/cli/src/constants/providers/__tests__/models.test.ts @@ -42,6 +42,7 @@ describe("Static Provider Models", () => { "cerebras", "sambanova", "zai", + "minimax", "fireworks", "featherless", "roo", diff --git a/cli/src/constants/providers/models.ts b/cli/src/constants/providers/models.ts index 13a87df12fe..8d482d6ebce 100644 --- a/cli/src/constants/providers/models.ts +++ b/cli/src/constants/providers/models.ts @@ -45,6 +45,8 @@ import { claudeCodeDefaultModelId, geminiCliModels, geminiCliDefaultModelId, + minimaxModels, + minimaxDefaultModelId, } from "@roo-code/types" /** @@ -305,6 +307,11 @@ export function getModelsByProvider(params: { models: moonshotModels as ModelRecord, defaultModel: moonshotDefaultModelId, } + case "minimax": + return { + models: minimaxModels as ModelRecord, + defaultModel: minimaxDefaultModelId, + } case "deepseek": return { models: deepSeekModels as ModelRecord, diff --git a/src/shared/ProfileValidator.ts b/src/shared/ProfileValidator.ts index 9b13047d016..4c1eb6d919c 100644 --- a/src/shared/ProfileValidator.ts +++ b/src/shared/ProfileValidator.ts @@ -66,6 +66,7 @@ export class ProfileValidator { case "deepseek": case "xai": case "zai": + case "minimax": case "groq": case "sambanova": case "chutes": From d33a1316ecc67fc02f332c7abbb3aa4546b89c77 Mon Sep 17 00:00:00 2001 From: xiaose Date: Sat, 1 Nov 2025 20:49:56 +0800 Subject: [PATCH 15/17] feat: support anthropic --- cli/src/constants/providers/settings.ts | 2 +- cli/src/types/messages.ts | 2 +- packages/types/src/provider-settings.ts | 2 +- packages/types/src/providers/minimax.ts | 1 + src/api/providers/__tests__/minimax.spec.ts | 16 +- src/api/providers/minimax.ts | 251 +++++++++++++++++- src/api/transform/stream.ts | 13 + src/core/task/Task.ts | 20 ++ .../components/settings/providers/MiniMax.tsx | 4 +- 9 files changed, 288 insertions(+), 23 deletions(-) diff --git a/cli/src/constants/providers/settings.ts b/cli/src/constants/providers/settings.ts index da69e7d42c2..e6a2dcdf821 100644 --- a/cli/src/constants/providers/settings.ts +++ b/cli/src/constants/providers/settings.ts @@ -781,7 +781,7 @@ export const getProviderSettings = (provider: ProviderName, config: ProviderSett ] case "minimax": return [ - createFieldConfig("minimaxBaseUrl", config, "https://api.minimax.io/v1"), + createFieldConfig("minimaxBaseUrl", config, "https://api.minimax.io/anthropic"), createFieldConfig("minimaxApiKey", config), ] case "fake-ai": diff --git a/cli/src/types/messages.ts b/cli/src/types/messages.ts index 7d4f5c35456..2c0ec856d6e 100644 --- a/cli/src/types/messages.ts +++ b/cli/src/types/messages.ts @@ -322,7 +322,7 @@ export interface ProviderSettings { vercelAiGatewayModelId?: string // MiniMax AI - minimaxBaseUrl?: "https://api.minimax.io/v1" | "https://api.minimaxi.com/v1" + minimaxBaseUrl?: "https://api.minimax.io/anthropic" | "https://api.minimaxi.com/anthropic" minimaxApiKey?: string // Allow additional fields for extensibility diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index 943aabcbac7..5fd14b722a7 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -415,7 +415,7 @@ const sambaNovaSchema = apiModelIdProviderModelSchema.extend({ const minimaxSchema = apiModelIdProviderModelSchema.extend({ minimaxBaseUrl: z - .union([z.literal("https://api.minimax.io/v1"), z.literal("https://api.minimaxi.com/v1")]) + .union([z.literal("https://api.minimax.io/anthropic"), z.literal("https://api.minimaxi.com/anthropic")]) .optional(), minimaxApiKey: z.string().optional(), }) diff --git a/packages/types/src/providers/minimax.ts b/packages/types/src/providers/minimax.ts index a01f45e3bac..d331012a585 100644 --- a/packages/types/src/providers/minimax.ts +++ b/packages/types/src/providers/minimax.ts @@ -20,3 +20,4 @@ export const minimaxModels = { } as const satisfies Record export const MINIMAX_DEFAULT_TEMPERATURE = 1.0 +export const MINIMAX_DEFAULT_MAX_TOKENS = 16384 diff --git a/src/api/providers/__tests__/minimax.spec.ts b/src/api/providers/__tests__/minimax.spec.ts index c22ecd5af3a..849f0d773ff 100644 --- a/src/api/providers/__tests__/minimax.spec.ts +++ b/src/api/providers/__tests__/minimax.spec.ts @@ -37,7 +37,7 @@ describe("MiniMaxHandler", () => { beforeEach(() => { handler = new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key", - minimaxBaseUrl: "https://api.minimax.io/v1", + minimaxBaseUrl: "https://api.minimax.io/anthropic", }) }) @@ -45,7 +45,7 @@ describe("MiniMaxHandler", () => { new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" }) expect(OpenAI).toHaveBeenCalledWith( expect.objectContaining({ - baseURL: "https://api.minimax.io/v1", + baseURL: "https://api.minimax.io/anthropic", }), ) }) @@ -92,21 +92,23 @@ describe("MiniMaxHandler", () => { beforeEach(() => { handler = new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key", - minimaxBaseUrl: "https://api.minimaxi.com/v1", + minimaxBaseUrl: "https://api.minimaxi.com/anthropic", }) }) it("should use the correct China MiniMax base URL", () => { new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key", - minimaxBaseUrl: "https://api.minimaxi.com/v1", + minimaxBaseUrl: "https://api.minimaxi.com/anthropic", }) - expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ baseURL: "https://api.minimaxi.com/v1" })) + expect(OpenAI).toHaveBeenCalledWith( + expect.objectContaining({ baseURL: "https://api.minimaxi.com/anthropic" }), + ) }) it("should use the provided API key for China", () => { const minimaxApiKey = "test-minimax-api-key" - new MiniMaxHandler({ minimaxApiKey, minimaxBaseUrl: "https://api.minimaxi.com/v1" }) + new MiniMaxHandler({ minimaxApiKey, minimaxBaseUrl: "https://api.minimaxi.com/anthropic" }) expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: minimaxApiKey })) }) @@ -122,7 +124,7 @@ describe("MiniMaxHandler", () => { const handlerDefault = new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" }) expect(OpenAI).toHaveBeenCalledWith( expect.objectContaining({ - baseURL: "https://api.minimax.io/v1", + baseURL: "https://api.minimax.io/anthropic", }), ) diff --git a/src/api/providers/minimax.ts b/src/api/providers/minimax.ts index 8a8e8c14e5b..288c00ac324 100644 --- a/src/api/providers/minimax.ts +++ b/src/api/providers/minimax.ts @@ -1,19 +1,248 @@ -import { type MinimaxModelId, minimaxDefaultModelId, minimaxModels } from "@roo-code/types" +import { Anthropic } from "@anthropic-ai/sdk" +import { Stream as AnthropicStream } from "@anthropic-ai/sdk/streaming" + +import { + type ModelInfo, + MINIMAX_DEFAULT_MAX_TOKENS, + MinimaxModelId, + minimaxDefaultModelId, + minimaxModels, +} from "@roo-code/types" import type { ApiHandlerOptions } from "../../shared/api" -import { BaseOpenAiCompatibleProvider } from "./base-openai-compatible-provider" +import { ApiStream } from "../transform/stream" +import { getModelParams } from "../transform/model-params" + +import { BaseProvider } from "./base-provider" +import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" +import { calculateApiCostAnthropic } from "../../shared/cost" + +export class MiniMaxHandler extends BaseProvider implements SingleCompletionHandler { + private options: ApiHandlerOptions + private client: Anthropic -export class MiniMaxHandler extends BaseOpenAiCompatibleProvider { constructor(options: ApiHandlerOptions) { - super({ - ...options, - providerName: "MiniMax", - baseURL: options.minimaxBaseUrl ?? "https://api.minimax.io/v1", - apiKey: options.minimaxApiKey, - defaultProviderModelId: minimaxDefaultModelId, - providerModels: minimaxModels, - defaultTemperature: 1.0, + super() + this.options = options + + this.client = new Anthropic({ + baseURL: this.options.minimaxBaseUrl || "https://api.minimax.io/anthropic", + apiKey: this.options.minimaxApiKey, + }) + } + + async *createMessage( + systemPrompt: string, + messages: Anthropic.Messages.MessageParam[], + metadata?: ApiHandlerCreateMessageMetadata, + ): ApiStream { + let stream: AnthropicStream + let { id: modelId, maxTokens, temperature } = this.getModel() + + stream = await this.client.messages.create({ + model: modelId, + max_tokens: maxTokens ?? MINIMAX_DEFAULT_MAX_TOKENS, + temperature, + system: [{ text: systemPrompt, type: "text" }], + messages, + stream: true, + }) + + let inputTokens = 0 + let outputTokens = 0 + let cacheWriteTokens = 0 + let cacheReadTokens = 0 + let thinkingDeltaAccumulator = "" + let thinkText = "" + let thinkSignature = "" + for await (const chunk of stream) { + switch (chunk.type) { + case "message_start": { + // Tells us cache reads/writes/input/output. + const { + input_tokens = 0, + output_tokens = 0, + cache_creation_input_tokens, + cache_read_input_tokens, + } = chunk.message.usage + + yield { + type: "usage", + inputTokens: input_tokens, + outputTokens: output_tokens, + cacheWriteTokens: cache_creation_input_tokens || undefined, + cacheReadTokens: cache_read_input_tokens || undefined, + } + + inputTokens += input_tokens + outputTokens += output_tokens + cacheWriteTokens += cache_creation_input_tokens || 0 + cacheReadTokens += cache_read_input_tokens || 0 + + break + } + case "message_delta": + // Tells us stop_reason, stop_sequence, and output tokens + // along the way and at the end of the message. + yield { + type: "usage", + inputTokens: 0, + outputTokens: chunk.usage.output_tokens || 0, + } + + break + case "message_stop": + // No usage data, just an indicator that the message is done. + break + case "content_block_start": + switch (chunk.content_block.type) { + case "thinking": + // We may receive multiple text blocks, in which + // case just insert a line break between them. + if (chunk.index > 0) { + yield { type: "reasoning", text: "\n" } + } + + yield { type: "reasoning", text: chunk.content_block.thinking } + thinkText = chunk.content_block.thinking + thinkSignature = chunk.content_block.signature + if (thinkText && thinkSignature) { + yield { + type: "ant_thinking", + thinking: thinkText, + signature: thinkSignature, + } + } + break + case "redacted_thinking": + // Content is encrypted, and we don't want to pass placeholder text back to the API + yield { + type: "reasoning", + text: "[Redacted thinking block]", + } + yield { + type: "ant_redacted_thinking", + data: chunk.content_block.data, + } + break + case "text": + // We may receive multiple text blocks, in which + // case just insert a line break between them. + if (chunk.index > 0) { + yield { type: "text", text: "\n" } + } + + yield { type: "text", text: chunk.content_block.text } + break + } + break + case "content_block_delta": + switch (chunk.delta.type) { + case "thinking_delta": + yield { type: "reasoning", text: chunk.delta.thinking } + thinkingDeltaAccumulator += chunk.delta.thinking + break + case "signature_delta": + // It's used when sending the thinking block back to the API + // API expects this in completed form, not as array of deltas + if (thinkingDeltaAccumulator && chunk.delta.signature) { + yield { + type: "ant_thinking", + thinking: thinkingDeltaAccumulator, + signature: chunk.delta.signature, + } + } + break + case "text_delta": + yield { type: "text", text: chunk.delta.text } + break + } + + break + case "content_block_stop": + break + } + } + + if (inputTokens > 0 || outputTokens > 0 || cacheWriteTokens > 0 || cacheReadTokens > 0) { + yield { + type: "usage", + inputTokens: 0, + outputTokens: 0, + totalCost: calculateApiCostAnthropic( + this.getModel().info, + inputTokens, + outputTokens, + cacheWriteTokens, + cacheReadTokens, + ), + } + } + } + + getModel() { + const modelId = this.options.apiModelId + let id = modelId && modelId in minimaxModels ? (modelId as MinimaxModelId) : minimaxDefaultModelId + let info: ModelInfo = minimaxModels[id] + + const params = getModelParams({ + format: "anthropic", + modelId: id, + model: info, + settings: this.options, + }) + + // The `:thinking` suffix indicates that the model is a "Hybrid" + // reasoning model and that reasoning is required to be enabled. + // The actual model ID honored by Anthropic's API does not have this + // suffix. + return { + id, + info, + ...params, + } + } + + async completePrompt(prompt: string) { + let { id: model, temperature } = this.getModel() + + const message = await this.client.messages.create({ + model, + max_tokens: MINIMAX_DEFAULT_MAX_TOKENS, + thinking: undefined, + temperature, + messages: [{ role: "user", content: prompt }], + stream: false, }) + + const content = message.content.find(({ type }) => type === "text") + return content?.type === "text" ? content.text : "" + } + + /** + * Counts tokens for the given content using Anthropic's API + * + * @param content The content blocks to count tokens for + * @returns A promise resolving to the token count + */ + override async countTokens(content: Array): Promise { + try { + // Use the current model + const { id: model } = this.getModel() + + const response = await this.client.messages.countTokens({ + model, + messages: [{ role: "user", content: content }], + }) + + return response.input_tokens + } catch (error) { + // Log error but fallback to tiktoken estimation + console.warn("Anthropic token counting failed, using fallback", error) + + // Use the base provider's implementation as fallback + return super.countTokens(content) + } } } diff --git a/src/api/transform/stream.ts b/src/api/transform/stream.ts index 031639cd29e..0246619b5dc 100644 --- a/src/api/transform/stream.ts +++ b/src/api/transform/stream.ts @@ -9,6 +9,8 @@ export type ApiStreamChunk = | ApiStreamReasoningChunk | ApiStreamGroundingChunk | ApiStreamError + | ApiStreamAnthropicThinkingChunk + | ApiStreamAnthropicRedactedThinkingChunk export interface ApiStreamError { type: "error" @@ -26,6 +28,17 @@ export interface ApiStreamReasoningChunk { text: string } +export interface ApiStreamAnthropicThinkingChunk { + type: "ant_thinking" + thinking: string + signature: string +} + +export interface ApiStreamAnthropicRedactedThinkingChunk { + type: "ant_redacted_thinking" + data: string +} + export interface ApiStreamUsageChunk { type: "usage" inputTokens: number diff --git a/src/core/task/Task.ts b/src/core/task/Task.ts index d92e7a38ef8..1c9f597143d 100644 --- a/src/core/task/Task.ts +++ b/src/core/task/Task.ts @@ -2057,6 +2057,10 @@ export class Task extends EventEmitter implements TaskLike { let reasoningMessage = "" let pendingGroundingSources: GroundingSource[] = [] this.isStreaming = true + const antThinkingContent: ( + | Anthropic.Messages.RedactedThinkingBlock + | Anthropic.Messages.ThinkingBlock + )[] = [] try { const iterator = stream[Symbol.asyncIterator]() @@ -2143,6 +2147,19 @@ export class Task extends EventEmitter implements TaskLike { presentAssistantMessage(this) break } + case "ant_thinking": + antThinkingContent.push({ + type: "thinking", + thinking: chunk.thinking, + signature: chunk.signature, + }) + break + case "ant_redacted_thinking": + antThinkingContent.push({ + type: "redacted_thinking", + data: chunk.data, + }) + break } if (this.abort) { @@ -2474,6 +2491,9 @@ export class Task extends EventEmitter implements TaskLike { // kilocode_change start: also add tool calls to history const assistantMessageContent = new Array() + if (antThinkingContent.length > 0) { + assistantMessageContent.push(...antThinkingContent) + } if (assistantMessage) { assistantMessageContent.push({ type: "text", text: assistantMessage }) } diff --git a/webview-ui/src/components/settings/providers/MiniMax.tsx b/webview-ui/src/components/settings/providers/MiniMax.tsx index 4055be7d179..864dad1da94 100644 --- a/webview-ui/src/components/settings/providers/MiniMax.tsx +++ b/webview-ui/src/components/settings/providers/MiniMax.tsx @@ -36,10 +36,10 @@ export const MiniMax = ({ apiConfiguration, setApiConfigurationField }: MiniMaxP value={apiConfiguration.minimaxBaseUrl} onChange={handleInputChange("minimaxBaseUrl")} className={cn("w-full")}> - + api.minimax.io - + api.minimaxi.com From c4ff9fa691739cddf9baac8707c328672d928b10 Mon Sep 17 00:00:00 2001 From: xiaose Date: Sat, 1 Nov 2025 20:52:26 +0800 Subject: [PATCH 16/17] feat: code --- webview-ui/src/components/settings/providers/MiniMax.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webview-ui/src/components/settings/providers/MiniMax.tsx b/webview-ui/src/components/settings/providers/MiniMax.tsx index 864dad1da94..6b7139fbed4 100644 --- a/webview-ui/src/components/settings/providers/MiniMax.tsx +++ b/webview-ui/src/components/settings/providers/MiniMax.tsx @@ -59,7 +59,7 @@ export const MiniMax = ({ apiConfiguration, setApiConfigurationField }: MiniMaxP {!apiConfiguration?.minimaxApiKey && ( Date: Sat, 1 Nov 2025 21:41:52 +0800 Subject: [PATCH 17/17] feat: ut --- src/api/providers/__tests__/minimax.spec.ts | 475 +++++++++++--------- src/api/providers/minimax.ts | 9 +- 2 files changed, 270 insertions(+), 214 deletions(-) diff --git a/src/api/providers/__tests__/minimax.spec.ts b/src/api/providers/__tests__/minimax.spec.ts index 849f0d773ff..e4f900293aa 100644 --- a/src/api/providers/__tests__/minimax.spec.ts +++ b/src/api/providers/__tests__/minimax.spec.ts @@ -1,281 +1,336 @@ // npx vitest run src/api/providers/__tests__/minimax.spec.ts -// kilocode_change start -vitest.mock("vscode", () => ({ - workspace: { - getConfiguration: vitest.fn().mockReturnValue({ - get: vitest.fn().mockReturnValue(600), // Default timeout in seconds - }), - }, -})) -// kilocode_change end - -import OpenAI from "openai" -import { Anthropic } from "@anthropic-ai/sdk" - -import { type MinimaxModelId, minimaxDefaultModelId, minimaxModels } from "@roo-code/types" - import { MiniMaxHandler } from "../minimax" +import { ApiHandlerOptions } from "../../../shared/api" +import { + minimaxDefaultModelId, + minimaxModels, + MINIMAX_DEFAULT_TEMPERATURE, + MINIMAX_DEFAULT_MAX_TOKENS, +} from "@roo-code/types" + +const mockCreate = vitest.fn() + +vitest.mock("@anthropic-ai/sdk", () => { + const mockAnthropicConstructor = vitest.fn().mockImplementation(() => ({ + messages: { + create: mockCreate.mockImplementation(async (options) => { + if (!options.stream) { + return { + id: "test-completion", + content: [{ type: "text", text: "Test response from MiniMax" }], + role: "assistant", + model: options.model, + usage: { + input_tokens: 10, + output_tokens: 5, + }, + } + } + return { + async *[Symbol.asyncIterator]() { + yield { + type: "message_start", + message: { + usage: { + input_tokens: 100, + output_tokens: 50, + cache_creation_input_tokens: 0, + cache_read_input_tokens: 0, + }, + }, + } + yield { + type: "content_block_start", + index: 0, + content_block: { + type: "text", + text: "Hello", + }, + } + yield { + type: "content_block_delta", + delta: { + type: "text_delta", + text: " from MiniMax", + }, + } + }, + } + }), + countTokens: vitest.fn().mockResolvedValue({ input_tokens: 42 }), + }, + })) -vitest.mock("openai", () => { - const createMock = vitest.fn() return { - default: vitest.fn(() => ({ chat: { completions: { create: createMock } } })), + Anthropic: mockAnthropicConstructor, } }) +// Import after mock +import { Anthropic } from "@anthropic-ai/sdk" + +const mockAnthropicConstructor = vitest.mocked(Anthropic) + describe("MiniMaxHandler", () => { let handler: MiniMaxHandler - let mockCreate: any + let mockOptions: ApiHandlerOptions beforeEach(() => { + mockOptions = { + minimaxApiKey: "test-minimax-api-key", + apiModelId: minimaxDefaultModelId, + } + handler = new MiniMaxHandler(mockOptions) vitest.clearAllMocks() - mockCreate = (OpenAI as unknown as any)().chat.completions.create }) - describe("International MiniMax (default)", () => { - beforeEach(() => { - handler = new MiniMaxHandler({ - minimaxApiKey: "test-minimax-api-key", - minimaxBaseUrl: "https://api.minimax.io/anthropic", - }) + describe("constructor", () => { + it("should initialize with provided options", () => { + expect(handler).toBeInstanceOf(MiniMaxHandler) + expect(handler.getModel().id).toBe(minimaxDefaultModelId) }) - it("should use the correct international MiniMax base URL by default", () => { - new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" }) - expect(OpenAI).toHaveBeenCalledWith( + it("should use default international base URL", () => { + new MiniMaxHandler(mockOptions) + expect(mockAnthropicConstructor).toHaveBeenCalledWith( expect.objectContaining({ baseURL: "https://api.minimax.io/anthropic", + apiKey: "test-minimax-api-key", }), ) }) - it("should use the provided API key", () => { - const minimaxApiKey = "test-minimax-api-key" - new MiniMaxHandler({ minimaxApiKey }) - expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: minimaxApiKey })) + it("should use custom base URL if provided", () => { + const customBaseUrl = "https://api.minimaxi.com/anthropic" + new MiniMaxHandler({ + ...mockOptions, + minimaxBaseUrl: customBaseUrl, + }) + expect(mockAnthropicConstructor).toHaveBeenCalledWith( + expect.objectContaining({ + baseURL: customBaseUrl, + }), + ) }) - it("should return default model when no model is specified", () => { - const model = handler.getModel() - expect(model.id).toBe(minimaxDefaultModelId) - expect(model.info).toEqual(minimaxModels[minimaxDefaultModelId]) + it("should use China base URL when provided", () => { + const chinaBaseUrl = "https://api.minimaxi.com/anthropic" + new MiniMaxHandler({ + ...mockOptions, + minimaxBaseUrl: chinaBaseUrl, + }) + expect(mockAnthropicConstructor).toHaveBeenCalledWith( + expect.objectContaining({ + baseURL: chinaBaseUrl, + apiKey: "test-minimax-api-key", + }), + ) }) - it("should return specified model when valid model is provided", () => { - const testModelId: MinimaxModelId = "MiniMax-M2" - const handlerWithModel = new MiniMaxHandler({ - apiModelId: testModelId, - minimaxApiKey: "test-minimax-api-key", + it("should initialize without API key", () => { + const handlerWithoutKey = new MiniMaxHandler({ + ...mockOptions, + minimaxApiKey: undefined, }) - const model = handlerWithModel.getModel() - expect(model.id).toBe(testModelId) - expect(model.info).toEqual(minimaxModels[testModelId]) + expect(handlerWithoutKey).toBeInstanceOf(MiniMaxHandler) }) + }) - it("should return MiniMax-M2 model with correct configuration", () => { - const testModelId: MinimaxModelId = "MiniMax-M2" - const handlerWithModel = new MiniMaxHandler({ - apiModelId: testModelId, - minimaxApiKey: "test-minimax-api-key", - }) - const model = handlerWithModel.getModel() - expect(model.id).toBe(testModelId) - expect(model.info).toEqual(minimaxModels[testModelId]) - expect(model.info.contextWindow).toBe(192_000) - expect(model.info.maxTokens).toBe(128_000) - expect(model.info.supportsPromptCache).toBe(false) + describe("createMessage", () => { + const systemPrompt = "You are a helpful assistant." + + it("should stream messages successfully", async () => { + const stream = handler.createMessage(systemPrompt, [ + { + role: "user", + content: [{ type: "text" as const, text: "Hello MiniMax" }], + }, + ]) + + const chunks: any[] = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + // Verify usage information + const usageChunk = chunks.find((chunk) => chunk.type === "usage") + expect(usageChunk).toBeDefined() + expect(usageChunk?.inputTokens).toBe(100) + expect(usageChunk?.outputTokens).toBe(50) + + // Verify text content + const textChunks = chunks.filter((chunk) => chunk.type === "text") + expect(textChunks).toHaveLength(2) + expect(textChunks[0].text).toBe("Hello") + expect(textChunks[1].text).toBe(" from MiniMax") + + // Verify API call + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + model: minimaxDefaultModelId, + max_tokens: 38400, + temperature: MINIMAX_DEFAULT_TEMPERATURE, + system: [{ text: systemPrompt, type: "text" }], + stream: true, + }), + ) + }) + + it("should handle multiple messages", async () => { + const stream = handler.createMessage(systemPrompt, [ + { + role: "user", + content: [{ type: "text" as const, text: "First message" }], + }, + { + role: "assistant", + content: [{ type: "text" as const, text: "Response" }], + }, + { + role: "user", + content: [{ type: "text" as const, text: "Second message" }], + }, + ]) + + const chunks: any[] = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(chunks.length).toBeGreaterThan(0) + expect(mockCreate).toHaveBeenCalled() }) }) - describe("China MiniMax", () => { - beforeEach(() => { - handler = new MiniMaxHandler({ - minimaxApiKey: "test-minimax-api-key", - minimaxBaseUrl: "https://api.minimaxi.com/anthropic", + describe("completePrompt", () => { + it("should complete prompt successfully", async () => { + const result = await handler.completePrompt("Test prompt") + expect(result).toBe("Test response from MiniMax") + expect(mockCreate).toHaveBeenCalledWith({ + model: minimaxDefaultModelId, + messages: [{ role: "user", content: "Test prompt" }], + max_tokens: MINIMAX_DEFAULT_MAX_TOKENS, + temperature: MINIMAX_DEFAULT_TEMPERATURE, + thinking: undefined, + stream: false, }) }) - it("should use the correct China MiniMax base URL", () => { - new MiniMaxHandler({ - minimaxApiKey: "test-minimax-api-key", - minimaxBaseUrl: "https://api.minimaxi.com/anthropic", - }) - expect(OpenAI).toHaveBeenCalledWith( - expect.objectContaining({ baseURL: "https://api.minimaxi.com/anthropic" }), - ) + it("should handle API errors", async () => { + mockCreate.mockRejectedValueOnce(new Error("MiniMax API Error")) + await expect(handler.completePrompt("Test prompt")).rejects.toThrow("MiniMax API Error") }) - it("should use the provided API key for China", () => { - const minimaxApiKey = "test-minimax-api-key" - new MiniMaxHandler({ minimaxApiKey, minimaxBaseUrl: "https://api.minimaxi.com/anthropic" }) - expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: minimaxApiKey })) + it("should handle non-text content", async () => { + mockCreate.mockImplementationOnce(async () => ({ + content: [{ type: "image" }], + })) + const result = await handler.completePrompt("Test prompt") + expect(result).toBe("") }) - it("should return default model when no model is specified", () => { - const model = handler.getModel() - expect(model.id).toBe(minimaxDefaultModelId) - expect(model.info).toEqual(minimaxModels[minimaxDefaultModelId]) + it("should handle empty response", async () => { + mockCreate.mockImplementationOnce(async () => ({ + content: [{ type: "text", text: "" }], + })) + const result = await handler.completePrompt("Test prompt") + expect(result).toBe("") }) }) - describe("Default behavior", () => { - it("should default to international base URL when none is specified", () => { - const handlerDefault = new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" }) - expect(OpenAI).toHaveBeenCalledWith( - expect.objectContaining({ - baseURL: "https://api.minimax.io/anthropic", - }), - ) - - const model = handlerDefault.getModel() + describe("getModel", () => { + it("should return default model if no model ID is provided", () => { + const handlerWithoutModel = new MiniMaxHandler({ + ...mockOptions, + apiModelId: undefined, + }) + const model = handlerWithoutModel.getModel() expect(model.id).toBe(minimaxDefaultModelId) - expect(model.info).toEqual(minimaxModels[minimaxDefaultModelId]) + expect(model.info).toBeDefined() }) - it("should default to MiniMax-M2 model", () => { - const handlerDefault = new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" }) - const model = handlerDefault.getModel() + it("should return MiniMax-M2 as default model", () => { + const model = handler.getModel() expect(model.id).toBe("MiniMax-M2") + expect(model.info).toEqual(minimaxModels["MiniMax-M2"]) }) - }) - describe("API Methods", () => { - beforeEach(() => { - handler = new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" }) + it("should return correct model configuration for MiniMax-M2", () => { + const model = handler.getModel() + expect(model.id).toBe("MiniMax-M2") + expect(model.info.maxTokens).toBe(128_000) + expect(model.info.contextWindow).toBe(192_000) + expect(model.info.supportsImages).toBe(false) + expect(model.info.supportsPromptCache).toBe(false) + expect(model.info.inputPrice).toBe(0.3) + expect(model.info.outputPrice).toBe(1.2) }) - it("completePrompt method should return text from MiniMax API", async () => { - const expectedResponse = "This is a test response from MiniMax" - mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: expectedResponse } }] }) - const result = await handler.completePrompt("test prompt") - expect(result).toBe(expectedResponse) + it("should use correct default temperature", () => { + const model = handler.getModel() + expect(model.temperature).toBe(0) }) - it("should handle errors in completePrompt", async () => { - const errorMessage = "MiniMax API error" - mockCreate.mockRejectedValueOnce(new Error(errorMessage)) - await expect(handler.completePrompt("test prompt")).rejects.toThrow() + it("should use correct default max tokens", () => { + const model = handler.getModel() + expect(model.maxTokens).toBe(38400) }) + }) - it("createMessage should yield text content from stream", async () => { - const testContent = "This is test content from MiniMax stream" - - mockCreate.mockImplementationOnce(() => { - return { - [Symbol.asyncIterator]: () => ({ - next: vitest - .fn() - .mockResolvedValueOnce({ - done: false, - value: { choices: [{ delta: { content: testContent } }] }, - }) - .mockResolvedValueOnce({ done: true }), - }), - } - }) - - const stream = handler.createMessage("system prompt", []) - const firstChunk = await stream.next() + describe("countTokens", () => { + it("should count tokens using Anthropic API", async () => { + // Create a fresh handler to get the Anthropic instance + const testHandler = new MiniMaxHandler(mockOptions) + const anthropicInstance = + mockAnthropicConstructor.mock.results[mockAnthropicConstructor.mock.results.length - 1]?.value - expect(firstChunk.done).toBe(false) - expect(firstChunk.value).toEqual({ type: "text", text: testContent }) - }) + const content = [{ type: "text" as const, text: "Test content for MiniMax" }] + const result = await testHandler.countTokens(content) - it("createMessage should yield usage data from stream", async () => { - mockCreate.mockImplementationOnce(() => { - return { - [Symbol.asyncIterator]: () => ({ - next: vitest - .fn() - .mockResolvedValueOnce({ - done: false, - value: { - choices: [{ delta: {} }], - usage: { prompt_tokens: 10, completion_tokens: 20 }, - }, - }) - .mockResolvedValueOnce({ done: true }), - }), - } + expect(result).toBe(42) + expect(anthropicInstance?.messages.countTokens).toHaveBeenCalledWith({ + model: minimaxDefaultModelId, + messages: [{ role: "user", content }], }) - - const stream = handler.createMessage("system prompt", []) - const firstChunk = await stream.next() - - expect(firstChunk.done).toBe(false) - expect(firstChunk.value).toEqual({ type: "usage", inputTokens: 10, outputTokens: 20 }) }) - it("createMessage should pass correct parameters to MiniMax client", async () => { - const modelId: MinimaxModelId = "MiniMax-M2" - const modelInfo = minimaxModels[modelId] - const handlerWithModel = new MiniMaxHandler({ - apiModelId: modelId, - minimaxApiKey: "test-minimax-api-key", - }) - - mockCreate.mockImplementationOnce(() => { - return { - [Symbol.asyncIterator]: () => ({ - async next() { - return { done: true } - }, - }), - } - }) + it("should fallback to base implementation on error", async () => { + // Create a fresh handler to get the Anthropic instance + const testHandler = new MiniMaxHandler(mockOptions) + const anthropicInstance = + mockAnthropicConstructor.mock.results[mockAnthropicConstructor.mock.results.length - 1]?.value - const systemPrompt = "Test system prompt for MiniMax" - const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message for MiniMax" }] + if (anthropicInstance) { + anthropicInstance.messages.countTokens.mockRejectedValueOnce(new Error("API error")) + } - const messageGenerator = handlerWithModel.createMessage(systemPrompt, messages) - await messageGenerator.next() + const content = [{ type: "text" as const, text: "Test content" }] + const result = await testHandler.countTokens(content) - expect(mockCreate).toHaveBeenCalledWith( - expect.objectContaining({ - model: modelId, - max_tokens: modelInfo.maxTokens, - temperature: 1, - messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]), - stream: true, - stream_options: { include_usage: true }, - }), - undefined, - ) + // Should not throw and return some number from fallback + expect(typeof result).toBe("number") }) + }) - it("should use temperature 1 by default", async () => { - mockCreate.mockImplementationOnce(() => { - return { - [Symbol.asyncIterator]: () => ({ - async next() { - return { done: true } - }, - }), - } + describe("Model Configuration", () => { + it("should have correct model configuration", () => { + expect(minimaxDefaultModelId).toBe("MiniMax-M2") + expect(minimaxModels["MiniMax-M2"]).toEqual({ + maxTokens: 128_000, + contextWindow: 192_000, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0.3, + outputPrice: 1.2, + cacheWritesPrice: 0, + cacheReadsPrice: 0, }) - - const messageGenerator = handler.createMessage("test", []) - await messageGenerator.next() - - expect(mockCreate).toHaveBeenCalledWith( - expect.objectContaining({ - temperature: 1, - }), - undefined, - ) }) - }) - describe("Model Configuration", () => { - it("should correctly configure MiniMax-M2 model properties", () => { - const model = minimaxModels["MiniMax-M2"] - expect(model.maxTokens).toBe(128_000) - expect(model.contextWindow).toBe(192_000) - expect(model.supportsImages).toBe(false) - expect(model.supportsPromptCache).toBe(false) - expect(model.inputPrice).toBe(0.3) - expect(model.outputPrice).toBe(1.2) + it("should have correct default constants", () => { + expect(MINIMAX_DEFAULT_TEMPERATURE).toBe(1.0) + expect(MINIMAX_DEFAULT_MAX_TOKENS).toBe(16384) }) }) }) diff --git a/src/api/providers/minimax.ts b/src/api/providers/minimax.ts index 288c00ac324..2f6398bf0a3 100644 --- a/src/api/providers/minimax.ts +++ b/src/api/providers/minimax.ts @@ -4,6 +4,7 @@ import { Stream as AnthropicStream } from "@anthropic-ai/sdk/streaming" import { type ModelInfo, MINIMAX_DEFAULT_MAX_TOKENS, + MINIMAX_DEFAULT_TEMPERATURE, MinimaxModelId, minimaxDefaultModelId, minimaxModels, @@ -38,12 +39,12 @@ export class MiniMaxHandler extends BaseProvider implements SingleCompletionHand metadata?: ApiHandlerCreateMessageMetadata, ): ApiStream { let stream: AnthropicStream - let { id: modelId, maxTokens, temperature } = this.getModel() + let { id: modelId, maxTokens } = this.getModel() stream = await this.client.messages.create({ model: modelId, max_tokens: maxTokens ?? MINIMAX_DEFAULT_MAX_TOKENS, - temperature, + temperature: MINIMAX_DEFAULT_TEMPERATURE, system: [{ text: systemPrompt, type: "text" }], messages, stream: true, @@ -205,13 +206,13 @@ export class MiniMaxHandler extends BaseProvider implements SingleCompletionHand } async completePrompt(prompt: string) { - let { id: model, temperature } = this.getModel() + let { id: model } = this.getModel() const message = await this.client.messages.create({ model, max_tokens: MINIMAX_DEFAULT_MAX_TOKENS, thinking: undefined, - temperature, + temperature: MINIMAX_DEFAULT_TEMPERATURE, messages: [{ role: "user", content: prompt }], stream: false, })