diff --git a/cli/docs/PROVIDER_CONFIGURATION.md b/cli/docs/PROVIDER_CONFIGURATION.md index 20dc6a53ff9..f7869055a86 100644 --- a/cli/docs/PROVIDER_CONFIGURATION.md +++ b/cli/docs/PROVIDER_CONFIGURATION.md @@ -45,6 +45,7 @@ This guide provides detailed information on how to configure each provider in Ki - [Virtual Quota Fallback](#virtual-quota-fallback) - [Human Relay](#human-relay) - [Fake AI](#fake-ai) + - [OVHcloud AI Endpoints](#ovhcloud-ai-endpoints) ## Introduction @@ -1322,6 +1323,41 @@ Fake AI provider for testing and development. --- +### OVHcloud AI Endpoints + +OVHcloud AI Endpoints inference provider. + +**Description**: Use OVHcloud leading cloud computing for accessing various open-source models, with GDPR compliance and data sovreignty. + +**Required Field**: + +- `ovhCloudAiEndpointsModelId` (text): Model identifier (default: `gpt-oss-120b`) + +**Optional Field**: + +- `ovhCloudAiEndpointsApiKey` (password): Your OVHcloud AI Endpoints API key + If you do not provide the API key, you can use our service for free with a rate limit. + +**Example Configuration**: + +```json +{ + "id": "default", + "provider": "ovhcloud", + "ovhCloudAiEndpointsApiKey": "your-api-key", // optional + "ovhCloudAiEndpointsModelId": "gpt-oss-120b" +} +``` + +**Default Model**: `gpt-oss-120b` + +**Notes**: + +- Get your API key from https://ovh.com/manager in `Public Cloud > AI & Machine Learning` section, then in `AI Endpoints`. +- You can browse our [catalog](https://www.ovhcloud.com/en/public-cloud/ai-endpoints/catalog/) to discover all of our models. + +--- + ## Additional Resources - [Kilo Code Documentation](https://docs.kilocode.com/) diff --git a/cli/src/commands/__tests__/model.test.ts b/cli/src/commands/__tests__/model.test.ts index f25dc8d3250..b26f4ec5cdd 100644 --- a/cli/src/commands/__tests__/model.test.ts +++ b/cli/src/commands/__tests__/model.test.ts @@ -41,6 +41,7 @@ describe("/model command", () => { "io-intelligence": {}, deepinfra: {}, "vercel-ai-gateway": {}, + ovhcloud: {}, } const mockProvider: ProviderConfig = { @@ -404,6 +405,7 @@ describe("/model command", () => { "io-intelligence": {}, deepinfra: {}, "vercel-ai-gateway": {}, + ovhcloud: {}, } mockContext.args = ["list"] diff --git a/cli/src/commands/model.ts b/cli/src/commands/model.ts index ce5c7b5ca72..0d008634682 100644 --- a/cli/src/commands/model.ts +++ b/cli/src/commands/model.ts @@ -37,6 +37,7 @@ async function ensureRouterModels(context: any): Promise { "deepinfra", "io-intelligence", "vercel-ai-gateway", + "ovhcloud", ].includes(routerName) if (!needsRouterModels) { diff --git a/cli/src/config/mapper.ts b/cli/src/config/mapper.ts index ea3134e8867..29fd612f164 100644 --- a/cli/src/config/mapper.ts +++ b/cli/src/config/mapper.ts @@ -88,6 +88,8 @@ function getModelIdForProvider(provider: ProviderConfig): string { return provider.vercelAiGatewayModelId || "" case "io-intelligence": return provider.ioIntelligenceModelId || "" + case "ovhcloud": + return provider.ovhCloudAiEndpointsModelId || "" default: return provider.apiModelId || provider.modelId || "" } diff --git a/cli/src/config/schema.json b/cli/src/config/schema.json index 55892de7f9b..157e586e786 100644 --- a/cli/src/config/schema.json +++ b/cli/src/config/schema.json @@ -263,7 +263,8 @@ "vercel-ai-gateway", "virtual-quota-fallback", "human-relay", - "fake-ai" + "fake-ai", + "ovhcloud" ] } }, @@ -2065,6 +2066,48 @@ } } } + }, + { + "if": { + "properties": { "provider": { "const": "ovhcloud" } } + }, + "then": { + "properties": { + "ovhCloudAiEndpointsApiKey": { + "type": "string", + "description": "OVHcloud AI Endpoints API key" + }, + "ovhCloudAiEndpointsModelId": { + "type": "string", + "description": "OVHcloud AI Endpoints model ID" + } + } + } + }, + { + "if": { + "properties": { "provider": { "const": "ovhcloud" } } + }, + "then": { + "properties": { + "ovhCloudAiEndpointsApiKey": { "type": "string" } + }, + "required": [] + } + }, + { + "if": { + "properties": { + "provider": { "const": "ovhcloud" }, + "ovhCloudAiEndpointsModelId": { "type": "string", "minLength": 1 } + }, + "required": ["ovhCloudAiEndpointsModelId"] + }, + "then": { + "properties": { + "ovhCloudAiEndpointsModelId": { "minLength": 1 } + } + } } ] } diff --git a/cli/src/constants/providers/__tests__/models.test.ts b/cli/src/constants/providers/__tests__/models.test.ts index b6b5dabaedc..d915cb7c3cd 100644 --- a/cli/src/constants/providers/__tests__/models.test.ts +++ b/cli/src/constants/providers/__tests__/models.test.ts @@ -217,6 +217,7 @@ describe("Static Provider Models", () => { "io-intelligence": {}, deepinfra: {}, "vercel-ai-gateway": {}, + ovhcloud: {}, } it("should return router models for openrouter provider", () => { diff --git a/cli/src/constants/providers/labels.ts b/cli/src/constants/providers/labels.ts index 24b4f03b269..10f3a3ec919 100644 --- a/cli/src/constants/providers/labels.ts +++ b/cli/src/constants/providers/labels.ts @@ -43,6 +43,7 @@ export const PROVIDER_LABELS: Record = { "virtual-quota-fallback": "Virtual Quota Fallback", "human-relay": "Human Relay", "fake-ai": "Fake AI", + ovhcloud: "OVHcloud AI Endpoints", } /** diff --git a/cli/src/constants/providers/models.ts b/cli/src/constants/providers/models.ts index baa5e157366..5f7e57d604a 100644 --- a/cli/src/constants/providers/models.ts +++ b/cli/src/constants/providers/models.ts @@ -45,6 +45,7 @@ import { claudeCodeDefaultModelId, geminiCliModels, geminiCliDefaultModelId, + ovhCloudAiEndpointsDefaultModelId, } from "@roo-code/types" /** @@ -62,6 +63,7 @@ export type RouterName = | "io-intelligence" | "deepinfra" | "vercel-ai-gateway" + | "ovhcloud" /** * ModelInfo interface - mirrors the one from packages/types/src/model.ts @@ -105,6 +107,7 @@ export const PROVIDER_TO_ROUTER_NAME: Record = deepinfra: "deepinfra", "io-intelligence": "io-intelligence", "vercel-ai-gateway": "vercel-ai-gateway", + ovhcloud: "ovhcloud", // Providers without dynamic model support anthropic: null, bedrock: null, @@ -150,6 +153,7 @@ export const PROVIDER_MODEL_FIELD: Record = { deepinfra: "deepInfraModelId", "io-intelligence": "ioIntelligenceModelId", "vercel-ai-gateway": "vercelAiGatewayModelId", + ovhcloud: "ovhCloudAiEndpointsModelId", // Providers without dynamic model support anthropic: null, bedrock: null, @@ -242,6 +246,7 @@ export const DEFAULT_MODEL_IDS: Partial> = { zai: internationalZAiDefaultModelId, roo: rooDefaultModelId, "gemini-cli": geminiCliDefaultModelId, + ovhcloud: ovhCloudAiEndpointsDefaultModelId, } /** @@ -413,6 +418,8 @@ export function getModelIdKey(provider: ProviderName): string { return "ioIntelligenceModelId" case "vercel-ai-gateway": return "vercelAiGatewayModelId" + case "ovhcloud": + return "ovhCloudAiEndpointsModelId" default: return "apiModelId" } diff --git a/cli/src/constants/providers/settings.ts b/cli/src/constants/providers/settings.ts index 82071db233b..4d571412bec 100644 --- a/cli/src/constants/providers/settings.ts +++ b/cli/src/constants/providers/settings.ts @@ -452,6 +452,18 @@ export const FIELD_REGISTRY: Record = { placeholder: "Enter model ID...", }, + // OVHcloud AI Endpoints fields + ovhCloudAiEndpointsApiKey: { + label: "API Key", + type: "password", + placeholder: "Enter OVHcloud AI Endpoints API key...", + }, + ovhCloudAiEndpointsModelId: { + label: "Model ID", + type: "text", + placeholder: "Enter model ID...", + }, + // Virtual Quota Fallback fields profiles: { label: "Profiles Configuration", @@ -779,6 +791,12 @@ export const getProviderSettings = (provider: ProviderName, config: ProviderSett }, ] + case "ovhcloud": + return [ + createFieldConfig("ovhCloudAiEndpointsApiKey", config), + createFieldConfig("ovhCloudAiEndpointsModelId", config, "gpt-oss-120b"), + ] + default: return [] } @@ -826,6 +844,7 @@ export const PROVIDER_DEFAULT_MODELS: Record = { "virtual-quota-fallback": "gpt-4o", "human-relay": "human", "fake-ai": "fake-model", + ovhcloud: "gpt-oss-120b", } /** diff --git a/cli/src/constants/providers/validation.ts b/cli/src/constants/providers/validation.ts index 1637a081a07..f234271f1ad 100644 --- a/cli/src/constants/providers/validation.ts +++ b/cli/src/constants/providers/validation.ts @@ -40,6 +40,7 @@ export const PROVIDER_REQUIRED_FIELDS: Record = { "vercel-ai-gateway": ["vercelAiGatewayApiKey", "vercelAiGatewayModelId"], "human-relay": ["apiModelId"], "fake-ai": ["apiModelId"], + ovhcloud: ["ovhCloudAiEndpointsModelId"], // Special cases handled separately in handleSpecialValidations vertex: [], // Has special validation logic (either/or fields) "vscode-lm": [], // Has nested object validation diff --git a/cli/src/types/messages.ts b/cli/src/types/messages.ts index d4acbd296ac..e62304dbb81 100644 --- a/cli/src/types/messages.ts +++ b/cli/src/types/messages.ts @@ -103,6 +103,7 @@ export type ProviderName = | "io-intelligence" | "roo" | "vercel-ai-gateway" + | "ovhcloud" // Provider Settings Entry for profile metadata export interface ProviderSettingsEntry { @@ -320,6 +321,10 @@ export interface ProviderSettings { vercelAiGatewayApiKey?: string vercelAiGatewayModelId?: string + // OVHcloud AI Endpoints + ovhCloudAiEndpointsApiKey?: string + ovhCloudAiEndpointsModelId?: string + // Allow additional fields for extensibility [key: string]: any } diff --git a/cli/src/utils/__tests__/providers.test.ts b/cli/src/utils/__tests__/providers.test.ts index 12bcac97faa..dd44c68050d 100644 --- a/cli/src/utils/__tests__/providers.test.ts +++ b/cli/src/utils/__tests__/providers.test.ts @@ -65,6 +65,12 @@ describe("getSelectedModelId", () => { expect(result).toBe("litellm-model-1") }) + it("should return correct model for OVHcloud AI Endpoints provider", () => { + const apiConfig = { ovhCloudAiEndpointsModelId: "ovhcloud-model" } + const result = getSelectedModelId("ovhcloud", apiConfig) + expect(result).toBe("ovhcloud-model") + }) + it("should return 'unknown' when model field is not set", () => { const apiConfig = { someOtherField: "value" } const result = getSelectedModelId("kilocode", apiConfig)