diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index 15833e00c4..0630263ac8 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -21,6 +21,7 @@ import { qwenCodeModels, rooModels, sambaNovaModels, + sapAiCoreModels, vertexModels, vscodeLlmModels, xaiModels, @@ -66,6 +67,7 @@ export const providerNames = [ "featherless", "io-intelligence", "roo", + "sapaicore", "vercel-ai-gateway", ] as const @@ -330,6 +332,15 @@ const rooSchema = apiModelIdProviderModelSchema.extend({ // No additional fields needed - uses cloud authentication }) +const sapAiCoreSchema = apiModelIdProviderModelSchema.extend({ + sapAiCoreClientId: z.string().optional(), + sapAiCoreClientSecret: z.string().optional(), + sapAiCoreTokenUrl: z.string().optional(), + sapAiResourceGroup: z.string().optional(), + sapAiCoreBaseUrl: z.string().optional(), + reasoningEffort: reasoningEffortWithMinimalSchema.optional(), + thinkingBudgetTokens: z.number().optional(), +}) const vercelAiGatewaySchema = baseProviderSettingsSchema.extend({ vercelAiGatewayApiKey: z.string().optional(), vercelAiGatewayModelId: z.string().optional(), @@ -374,6 +385,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv ioIntelligenceSchema.merge(z.object({ apiProvider: z.literal("io-intelligence") })), qwenCodeSchema.merge(z.object({ apiProvider: z.literal("qwen-code") })), rooSchema.merge(z.object({ apiProvider: z.literal("roo") })), + sapAiCoreSchema.merge(z.object({ apiProvider: z.literal("sapaicore") })), vercelAiGatewaySchema.merge(z.object({ apiProvider: z.literal("vercel-ai-gateway") })), defaultSchema, ]) @@ -414,6 +426,7 @@ export const providerSettingsSchema = z.object({ ...ioIntelligenceSchema.shape, ...qwenCodeSchema.shape, ...rooSchema.shape, + ...sapAiCoreSchema.shape, ...vercelAiGatewaySchema.shape, ...codebaseIndexProviderSchema.shape, }) @@ -543,6 +556,11 @@ export const MODELS_BY_PROVIDER: Record< label: "SambaNova", models: Object.keys(sambaNovaModels), }, + sapaicore: { + id: "sapaicore", + label: "SAP AI Core", + models: Object.keys(sapAiCoreModels), + }, vertex: { id: "vertex", label: "GCP Vertex AI", diff --git a/packages/types/src/providers/index.ts b/packages/types/src/providers/index.ts index 97fa10ca82..b41cbbcf5e 100644 --- a/packages/types/src/providers/index.ts +++ b/packages/types/src/providers/index.ts @@ -23,6 +23,7 @@ export * from "./qwen-code.js" export * from "./requesty.js" export * from "./roo.js" export * from "./sambanova.js" +export * from "./sapaicore.js" export * from "./unbound.js" export * from "./vertex.js" export * from "./vscode-llm.js" diff --git a/packages/types/src/providers/sapaicore.ts b/packages/types/src/providers/sapaicore.ts new file mode 100644 index 0000000000..1a157ccef3 --- /dev/null +++ b/packages/types/src/providers/sapaicore.ts @@ -0,0 +1,156 @@ +import type { ModelInfo } from "../model.js" + +export type SapAiCoreModelId = keyof typeof sapAiCoreModels +export const sapAiCoreDefaultModelId: SapAiCoreModelId = "anthropic--claude-3.5-sonnet" +const sapAiCoreModelDescription = "Pricing is calculated using SAP's Capacity Units rather than direct USD pricing." +export const sapAiCoreModels = { + "anthropic--claude-4-sonnet": { + maxTokens: 8192, + contextWindow: 200_000, + supportsImages: true, + supportsPromptCache: true, + description: sapAiCoreModelDescription, + }, + "anthropic--claude-4-opus": { + maxTokens: 8192, + contextWindow: 200_000, + supportsImages: true, + supportsPromptCache: true, + description: sapAiCoreModelDescription, + }, + "anthropic--claude-3.7-sonnet": { + maxTokens: 64_000, + contextWindow: 200_000, + supportsImages: true, + supportsPromptCache: true, + description: sapAiCoreModelDescription, + }, + "anthropic--claude-3.5-sonnet": { + maxTokens: 8192, + contextWindow: 200_000, + supportsImages: true, + supportsPromptCache: false, + description: sapAiCoreModelDescription, + }, + "anthropic--claude-3-sonnet": { + maxTokens: 4096, + contextWindow: 200_000, + supportsImages: true, + supportsPromptCache: false, + description: sapAiCoreModelDescription, + }, + "anthropic--claude-3-haiku": { + maxTokens: 4096, + contextWindow: 200_000, + supportsImages: true, + supportsPromptCache: false, + description: sapAiCoreModelDescription, + }, + "anthropic--claude-3-opus": { + maxTokens: 4096, + contextWindow: 200_000, + supportsImages: true, + supportsPromptCache: false, + description: sapAiCoreModelDescription, + }, + "gemini-2.5-pro": { + maxTokens: 65536, + contextWindow: 1_048_576, + supportsImages: true, + supportsPromptCache: true, + maxThinkingTokens: 32_768, + description: sapAiCoreModelDescription, + }, + "gemini-2.5-flash": { + maxTokens: 65536, + contextWindow: 1_048_576, + supportsImages: true, + supportsPromptCache: true, + maxThinkingTokens: 24_576, + description: sapAiCoreModelDescription, + }, + "gpt-4": { + maxTokens: 4096, + contextWindow: 200_000, + supportsImages: true, + supportsPromptCache: false, + description: sapAiCoreModelDescription, + }, + "gpt-4o": { + maxTokens: 4096, + contextWindow: 200_000, + supportsImages: true, + supportsPromptCache: false, + description: sapAiCoreModelDescription, + }, + "gpt-4o-mini": { + maxTokens: 4096, + contextWindow: 200_000, + supportsImages: true, + supportsPromptCache: false, + description: sapAiCoreModelDescription, + }, + "gpt-4.1": { + maxTokens: 32_768, + contextWindow: 1_047_576, + supportsImages: true, + supportsPromptCache: true, + description: sapAiCoreModelDescription, + }, + "gpt-4.1-nano": { + maxTokens: 32_768, + contextWindow: 1_047_576, + supportsImages: true, + supportsPromptCache: true, + description: sapAiCoreModelDescription, + }, + "gpt-5": { + maxTokens: 128_000, + contextWindow: 272_000, + supportsImages: true, + supportsPromptCache: true, + description: sapAiCoreModelDescription, + }, + "gpt-5-nano": { + maxTokens: 128_000, + contextWindow: 272_000, + supportsImages: true, + supportsPromptCache: true, + description: sapAiCoreModelDescription, + }, + "gpt-5-mini": { + maxTokens: 128_000, + contextWindow: 272_000, + supportsImages: true, + supportsPromptCache: true, + description: sapAiCoreModelDescription, + }, + o1: { + maxTokens: 4096, + contextWindow: 200_000, + supportsImages: true, + supportsPromptCache: false, + description: sapAiCoreModelDescription, + }, + o3: { + maxTokens: 100_000, + contextWindow: 200_000, + supportsImages: true, + supportsPromptCache: true, + description: sapAiCoreModelDescription, + }, + "o3-mini": { + maxTokens: 4096, + contextWindow: 200_000, + supportsImages: true, + supportsPromptCache: false, + description: sapAiCoreModelDescription, + }, + "o4-mini": { + maxTokens: 100_000, + contextWindow: 200_000, + supportsImages: true, + supportsPromptCache: true, + description: sapAiCoreModelDescription, + }, +} as const satisfies Record diff --git a/src/api/index.ts b/src/api/index.ts index b50afbb023..05657d7eb0 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -38,6 +38,7 @@ import { FireworksHandler, RooHandler, FeatherlessHandler, + SapAiCoreHandler, VercelAiGatewayHandler, } from "./providers" import { NativeOllamaHandler } from "./providers/native-ollama" @@ -160,6 +161,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler { return new RooHandler(options) case "featherless": return new FeatherlessHandler(options) + case "sapaicore": + return new SapAiCoreHandler(options) case "vercel-ai-gateway": return new VercelAiGatewayHandler(options) default: diff --git a/src/api/providers/__tests__/sapaicore-streaming.spec.ts b/src/api/providers/__tests__/sapaicore-streaming.spec.ts new file mode 100644 index 0000000000..63091d26f3 --- /dev/null +++ b/src/api/providers/__tests__/sapaicore-streaming.spec.ts @@ -0,0 +1,423 @@ +// npx vitest run src/api/providers/__tests__/sapaicore-streaming.spec.ts + +import { SapAiCoreHandler } from "../sapaicore.js" +import type { ApiHandlerOptions } from "../../../shared/api.js" +import axios from "axios" + +vitest.mock("axios", () => ({ + default: { + post: vitest.fn(), + get: vitest.fn(), + }, +})) + +// Create mock stream that implements async iteration +function createMockStream(chunks: string[]) { + let index = 0 + return { + async *[Symbol.asyncIterator]() { + for (const chunk of chunks) { + yield Buffer.from(chunk) + } + }, + toString() { + return chunks.join("") + }, + } +} + +describe("SAP AI Core Streaming", () => { + let handler: SapAiCoreHandler + let mockOptions: ApiHandlerOptions & { + sapAiCoreClientId?: string + sapAiCoreClientSecret?: string + sapAiCoreTokenUrl?: string + sapAiResourceGroup?: string + sapAiCoreBaseUrl?: string + } + + beforeEach(() => { + mockOptions = { + apiKey: "test-api-key", + apiModelId: "anthropic--claude-3.5-sonnet", + sapAiCoreClientId: "test-client-id", + sapAiCoreClientSecret: "test-client-secret", + sapAiCoreTokenUrl: "https://test.auth.com", + sapAiResourceGroup: "test-group", + sapAiCoreBaseUrl: "https://test.ai-core.com", + } + handler = new SapAiCoreHandler(mockOptions) + + // Set up authentication and deployments + const mockToken = { + access_token: "test-token", + expires_at: Date.now() + 3600000, + } + ;(handler as any).token = mockToken + ;(handler as any).deployments = [{ id: "deployment-1", name: "anthropic--claude-3.5-sonnet:1.0" }] + + // Reset mocks + }) + + describe("Anthropic streaming", () => { + it("should stream Anthropic responses correctly", async () => { + const streamChunks = [ + 'data: {"type":"message_start","message":{"usage":{"input_tokens":15,"output_tokens":0}}}\n', + 'data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":"Hello"}}\n', + 'data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":" world"}}\n', + 'data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"!"}}\n', + 'data: {"type":"message_delta","delta":{"stop_reason":"end_turn","usage":{"output_tokens":3}}}\n', + ] + + const mockStream = createMockStream(streamChunks) + vi.mocked(axios.post).mockResolvedValueOnce({ data: mockStream }) + + const messages = [{ role: "user" as const, content: "Say hello" }] + const responses = [] + + for await (const chunk of handler.createMessage("You are helpful", messages)) { + responses.push(chunk) + } + + expect(responses).toContainEqual({ + type: "usage", + inputTokens: 15, + outputTokens: 0, + }) + expect(responses).toContainEqual({ + type: "text", + text: "Hello", + }) + expect(responses).toContainEqual({ + type: "text", + text: " world", + }) + expect(responses).toContainEqual({ + type: "text", + text: "!", + }) + expect(responses).toContainEqual({ + type: "usage", + inputTokens: 15, + outputTokens: 0, + }) + }) + }) + + describe("OpenAI streaming", () => { + beforeEach(() => { + const openAiHandler = new SapAiCoreHandler({ + ...mockOptions, + apiModelId: "gpt-4o", + }) + const mockToken = { + access_token: "test-token", + expires_at: Date.now() + 3600000, + } + ;(openAiHandler as any).token = mockToken + ;(openAiHandler as any).deployments = [{ id: "deployment-2", name: "gpt-4o:1.0" }] + handler = openAiHandler + }) + + it("should stream OpenAI responses correctly", async () => { + const streamChunks = [ + 'data: {"choices":[{"delta":{"content":"Hello"}}]}\n', + 'data: {"choices":[{"delta":{"content":" world"}}]}\n', + 'data: {"choices":[{"delta":{"content":"!"}}]}\n', + 'data: {"usage":{"prompt_tokens":10,"completion_tokens":3}}\n', + "data: [DONE]\n", + ] + + const mockStream = createMockStream(streamChunks) + vi.mocked(axios.post).mockResolvedValueOnce({ data: mockStream }) + + const messages = [{ role: "user" as const, content: "Say hello" }] + const responses = [] + + for await (const chunk of handler.createMessage("You are helpful", messages)) { + responses.push(chunk) + } + + expect(responses).toContainEqual({ + type: "text", + text: "Hello", + }) + expect(responses).toContainEqual({ + type: "text", + text: " world", + }) + expect(responses).toContainEqual({ + type: "text", + text: "!", + }) + expect(responses).toContainEqual({ + type: "usage", + inputTokens: 10, + outputTokens: 3, + }) + }) + + it("should handle O3-mini non-streaming response", async () => { + const o3Handler = new SapAiCoreHandler({ + ...mockOptions, + apiModelId: "o3-mini", + }) + const mockToken = { + access_token: "test-token", + expires_at: Date.now() + 3600000, + } + ;(o3Handler as any).token = mockToken + ;(o3Handler as any).deployments = [{ id: "deployment-3", name: "o3-mini:1.0" }] + + const mockResponse = { + data: { + choices: [ + { + message: { + content: "Hello world!", + }, + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 3, + }, + }, + } + + // Mock both the stream and non-stream calls since o3-mini uses non-stream + vi.mocked(axios.post) + .mockResolvedValueOnce({ data: createMockStream([]) }) // First call (stream) + .mockResolvedValueOnce(mockResponse) // Second call (non-stream) + + const messages = [{ role: "user" as const, content: "Say hello" }] + const responses = [] + + for await (const chunk of o3Handler.createMessage("You are helpful", messages)) { + responses.push(chunk) + } + + expect(responses).toContainEqual({ + type: "usage", + inputTokens: 10, + outputTokens: 3, + }) + expect(responses).toContainEqual({ + type: "text", + text: "Hello world!", + }) + }) + }) + + describe("Gemini streaming", () => { + beforeEach(() => { + const geminiHandler = new SapAiCoreHandler({ + ...mockOptions, + apiModelId: "gemini-2.5-flash", + }) + const mockToken = { + access_token: "test-token", + expires_at: Date.now() + 3600000, + } + ;(geminiHandler as any).token = mockToken + ;(geminiHandler as any).deployments = [{ id: "deployment-3", name: "gemini-2.5-flash:1.0" }] + handler = geminiHandler + }) + + it("should stream Gemini responses correctly", async () => { + const streamChunks = [ + 'data: {"candidates":[{"content":{"parts":[{"text":"Hello"}]}}],"usageMetadata":{"promptTokenCount":10}}\n', + 'data: {"candidates":[{"content":{"parts":[{"text":" world"}]}}]}\n', + 'data: {"candidates":[{"content":{"parts":[{"text":"!"}]}}],"usageMetadata":{"candidatesTokenCount":3}}\n', + ] + + const mockStream = createMockStream(streamChunks) + vi.mocked(axios.post).mockResolvedValueOnce({ data: mockStream }) + + const messages = [{ role: "user" as const, content: "Say hello" }] + const responses = [] + + for await (const chunk of handler.createMessage("You are helpful", messages)) { + responses.push(chunk) + } + + expect(responses).toContainEqual({ + type: "text", + text: "Hello", + }) + expect(responses).toContainEqual({ + type: "text", + text: " world", + }) + expect(responses).toContainEqual({ + type: "text", + text: "!", + }) + expect(responses).toContainEqual({ + type: "usage", + inputTokens: 10, + outputTokens: 3, + }) + }) + + it("should handle Gemini thinking responses", async () => { + const streamChunks = [ + 'data: {"candidates":[{"content":{"parts":[{"thought":true,"text":"Let me think..."}]}}]}\n', + 'data: {"candidates":[{"content":{"parts":[{"text":"Hello world!"}]}}],"usageMetadata":{"promptTokenCount":10,"candidatesTokenCount":3,"thoughtsTokenCount":5}}\n', + ] + + const mockStream = createMockStream(streamChunks) + vi.mocked(axios.post).mockResolvedValueOnce({ data: mockStream }) + + const messages = [{ role: "user" as const, content: "Say hello" }] + const responses = [] + + for await (const chunk of handler.createMessage("You are helpful", messages)) { + responses.push(chunk) + } + + expect(responses).toContainEqual({ + type: "reasoning", + text: "Let me think...", + }) + expect(responses).toContainEqual({ + type: "text", + text: "Hello world!", + }) + expect(responses).toContainEqual({ + type: "usage", + inputTokens: 10, + outputTokens: 3, + }) + }) + }) + + describe("Claude 3.7/4 Sonnet streaming", () => { + beforeEach(() => { + const sonnetHandler = new SapAiCoreHandler({ + ...mockOptions, + apiModelId: "anthropic--claude-3.7-sonnet", + }) + const mockToken = { + access_token: "test-token", + expires_at: Date.now() + 3600000, + } + ;(sonnetHandler as any).token = mockToken + ;(sonnetHandler as any).deployments = [{ id: "deployment-4", name: "anthropic--claude-3.7-sonnet:1.0" }] + handler = sonnetHandler + }) + + it("should stream Claude 3.7 Sonnet responses with proper JSON parsing", async () => { + const streamChunks = [ + "data: {metadata:{usage:{inputTokens:15,outputTokens:0}}}\n", + 'data: {contentBlockDelta:{delta:{text:"Hello"}}}\n', + 'data: {contentBlockDelta:{delta:{text:" world"}}}\n', + "data: {metadata:{usage:{inputTokens:15,outputTokens:3,totalTokens:18}}}\n", + ] + + const mockStream = createMockStream(streamChunks) + vi.mocked(axios.post).mockResolvedValueOnce({ data: mockStream }) + + const messages = [{ role: "user" as const, content: "Say hello" }] + const responses = [] + + for await (const chunk of handler.createMessage("You are helpful", messages)) { + responses.push(chunk) + } + + expect(responses).toContainEqual({ + type: "usage", + inputTokens: 0, + outputTokens: 0, + }) + expect(responses).toContainEqual({ + type: "text", + text: "Hello", + }) + expect(responses).toContainEqual({ + type: "text", + text: " world", + }) + expect(responses).toContainEqual({ + type: "usage", + inputTokens: 15, + outputTokens: 3, + }) + }) + + it("should handle reasoning content in Claude 3.7 Sonnet", async () => { + const streamChunks = [ + 'data: {contentBlockDelta:{delta:{reasoningContent:{text:"Thinking about this..."}}}}\n', + 'data: {contentBlockDelta:{delta:{text:"Hello world!"}}}\n', + ] + + const mockStream = createMockStream(streamChunks) + vi.mocked(axios.post).mockResolvedValueOnce({ data: mockStream }) + + const messages = [{ role: "user" as const, content: "Say hello" }] + const responses = [] + + for await (const chunk of handler.createMessage("You are helpful", messages)) { + responses.push(chunk) + } + + expect(responses).toContainEqual({ + type: "reasoning", + text: "Thinking about this...", + }) + expect(responses).toContainEqual({ + type: "text", + text: "Hello world!", + }) + }) + }) + + describe("Error handling in streaming", () => { + it("should handle malformed JSON in stream", async () => { + const streamChunks = [ + 'data: {"type":"message_start","message":{"usage":{"input_tokens":10}}}\n', + "data: invalid json\n", + 'data: {"type":"content_block_delta","delta":{"type":"text_delta","text":"Hello"}}\n', + ] + + const mockStream = createMockStream(streamChunks) + vi.mocked(axios.post).mockResolvedValueOnce({ data: mockStream }) + + const consoleSpy = vitest.spyOn(console, "error").mockImplementation(() => {}) + + const messages = [{ role: "user" as const, content: "Say hello" }] + const responses = [] + + for await (const chunk of handler.createMessage("You are helpful", messages)) { + responses.push(chunk) + } + + expect(consoleSpy).toHaveBeenCalledWith("Failed to parse JSON data:", expect.any(Error)) + expect(responses).toContainEqual({ + type: "text", + text: "Hello", + }) + + consoleSpy.mockRestore() + }) + + it("should handle stream errors", async () => { + const mockStream = { + async *[Symbol.asyncIterator]() { + yield Buffer.from('data: {"type":"message_start"}\n') + throw new Error("Stream error") + }, + } + + vi.mocked(axios.post).mockResolvedValueOnce({ data: mockStream }) + + const messages = [{ role: "user" as const, content: "Say hello" }] + + await expect(async () => { + for await (const chunk of handler.createMessage("You are helpful", messages)) { + // Should throw before yielding chunks + } + }).rejects.toThrow("Stream error") + }) + }) +}) diff --git a/src/api/providers/__tests__/sapaicore-test.spec.ts b/src/api/providers/__tests__/sapaicore-test.spec.ts new file mode 100644 index 0000000000..b64468d73f --- /dev/null +++ b/src/api/providers/__tests__/sapaicore-test.spec.ts @@ -0,0 +1,55 @@ +// Test file for SAP AI Core handler + +import { SapAiCoreHandler } from "../sapaicore" +import { ApiHandlerOptions } from "../../../shared/api" +import { sapAiCoreDefaultModelId } from "@roo-code/types" +import axios from "axios" + +vitest.mock("axios", () => ({ + default: { + post: vitest.fn(), + get: vitest.fn(), + }, +})) + +const mockAxios = axios as any + +describe("SapAiCoreHandler", () => { + let handler: SapAiCoreHandler + let mockOptions: ApiHandlerOptions & { + sapAiCoreClientId?: string + sapAiCoreClientSecret?: string + sapAiCoreTokenUrl?: string + sapAiResourceGroup?: string + sapAiCoreBaseUrl?: string + } + + beforeEach(() => { + mockOptions = { + apiKey: "test-api-key", + apiModelId: "anthropic--claude-3.5-sonnet", + sapAiCoreClientId: "test-client-id", + sapAiCoreClientSecret: "test-client-secret", + sapAiCoreTokenUrl: "https://test.auth.com/oauth/token", + sapAiResourceGroup: "test-resource-group", + sapAiCoreBaseUrl: "https://test.ai-core.com", + } + handler = new SapAiCoreHandler(mockOptions) + + mockAxios.post.mockClear() + mockAxios.get.mockClear() + }) + + describe("constructor", () => { + it("should initialize with provided options", () => { + expect(handler).toBeInstanceOf(SapAiCoreHandler) + }) + + it("should use default model if no model specified", () => { + const handlerWithoutModel = new SapAiCoreHandler({ + apiKey: "test-key", + }) + expect(handlerWithoutModel.getModel().id).toBe(sapAiCoreDefaultModelId) + }) + }) +}) diff --git a/src/api/providers/__tests__/sapaicore.spec.ts b/src/api/providers/__tests__/sapaicore.spec.ts new file mode 100644 index 0000000000..2eed8a8868 --- /dev/null +++ b/src/api/providers/__tests__/sapaicore.spec.ts @@ -0,0 +1,990 @@ +// npx vitest run src/api/providers/__tests__/sapaicore.spec.ts + +import { SapAiCoreHandler } from "../sapaicore" +import type { ApiHandlerOptions } from "../../../shared/api.js" +import axios from "axios" + +vitest.mock("axios", () => ({ + default: { + post: vitest.fn(), + get: vitest.fn(), + }, +})) + +// Mock Gemini utility functions +vitest.mock("../sapaicore.js", async (importOriginal) => { + const original = (await importOriginal()) as any + return { + ...original, + processGeminiStreamChunk: vitest.fn().mockImplementation((data: any) => { + // Simple mock implementation + const result: any = {} + if (data.candidates?.[0]?.content?.parts?.[0]?.text) { + result.text = data.candidates[0].content.parts[0].text + } + if (data.usageMetadata) { + result.usageMetadata = data.usageMetadata + } + return result + }), + prepareGeminiRequestPayload: vitest.fn().mockReturnValue({ + messages: [], + generationConfig: { maxOutputTokens: 8192 }, + }), + } +}) + +describe("SapAiCoreHandler", () => { + const mockOptions: ApiHandlerOptions = { + apiModelId: "anthropic--claude-3.5-sonnet", + sapAiCoreClientId: "test-client-id", + sapAiCoreClientSecret: "test-client-secret", + sapAiCoreTokenUrl: "https://test.sapaicore.ai/oauth/token", + sapAiCoreBaseUrl: "https://test.sapaicore.ai", + sapAiResourceGroup: "test-group", + } + + let handler: SapAiCoreHandler + + beforeEach(() => { + handler = new SapAiCoreHandler(mockOptions) + vitest.clearAllMocks() + }) + + describe("constructor", () => { + it("should create handler with valid options", () => { + expect(handler).toBeInstanceOf(SapAiCoreHandler) + }) + + it("should get model info correctly", () => { + const model = handler.getModel() + expect(model.id).toBe("anthropic--claude-3.5-sonnet") + expect(model.info).toBeDefined() + }) + }) + + describe("createMessage", () => { + it("should handle successful streaming response", async () => { + // Mock successful auth + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "test-access-token", expires_in: 3600 }, + status: 200, + }) + + // Mock deployments response with correct deployment structure + vi.mocked(axios.get).mockResolvedValueOnce({ + data: { + resources: [ + { + id: "test-deployment-123", + targetStatus: "RUNNING", + details: { + resources: { + backend_details: { + model: { + name: "anthropic--claude-3.5-sonnet", + version: "1.0", + }, + }, + }, + }, + }, + ], + }, + }) + + // Mock streaming response - should be an async iterable + const mockStreamData = [ + 'data: {"type": "message_start", "message": {"usage": {"input_tokens": 10, "output_tokens": 5}}}\n\n', + 'data: {"type": "content_block_start", "content_block": {"type": "text", "text": "Hello"}}\n\n', + 'data: {"type": "content_block_delta", "delta": {"type": "text_delta", "text": " world"}}\n\n', + 'data: {"type": "message_delta", "delta": {"stop_reason": "end_turn", "usage": {"output_tokens": 15}}}\n\n', + "data: [DONE]\n\n", + ] + + const mockStream = { + async *[Symbol.asyncIterator]() { + for (const chunk of mockStreamData) { + yield chunk + } + }, + } + + vi.mocked(axios.post).mockResolvedValueOnce({ + data: mockStream, + }) + + const stream = handler.createMessage("You are a helpful assistant", [{ role: "user", content: "Hello" }]) + + const chunks = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(chunks.length).toBeGreaterThan(0) + expect(chunks.some((chunk) => chunk.type === "text")).toBe(true) + }) + }) + + describe("completePrompt", () => { + it("should complete a simple prompt", async () => { + // Mock successful auth + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "test-access-token", expires_in: 3600 }, + status: 200, + }) + + // Mock deployments response with correct deployment structure + vi.mocked(axios.get).mockResolvedValueOnce({ + data: { + resources: [ + { + id: "test-deployment-123", + targetStatus: "RUNNING", + details: { + resources: { + backend_details: { + model: { + name: "anthropic--claude-3.5-sonnet", + version: "1.0", + }, + }, + }, + }, + }, + ], + }, + }) + + // Mock streaming response - should be an async iterable + const mockStreamData = [ + 'data: {"type": "message_start", "message": {"usage": {"input_tokens": 10, "output_tokens": 5}}}\n\n', + 'data: {"type": "content_block_start", "content_block": {"type": "text", "text": "Test response"}}\n\n', + 'data: {"type": "message_delta", "delta": {"stop_reason": "end_turn", "usage": {"output_tokens": 15}}}\n\n', + "data: [DONE]\n\n", + ] + + const mockStream = { + async *[Symbol.asyncIterator]() { + for (const chunk of mockStreamData) { + yield chunk + } + }, + } + + vi.mocked(axios.post).mockResolvedValueOnce({ + data: mockStream, + }) + + const result = await handler.completePrompt("Hello") + expect(typeof result).toBe("string") + expect(result).toContain("Test response") + }) + }) + + describe("Authentication and Token Management", () => { + it("should handle authentication failures", async () => { + // Mock auth failure + vi.mocked(axios.post).mockRejectedValueOnce({ + response: { + status: 401, + data: { error: "unauthorized", error_description: "Invalid client credentials" }, + }, + }) + + await expect(handler.createMessage("system", [{ role: "user", content: "test" }]).next()).rejects.toThrow() + }) + + it("should handle token refresh when token expires", async () => { + // Mock first auth call + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "expired-token", expires_in: -1 }, // Already expired + status: 200, + }) + + // Mock second auth call (refresh) + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "new-token", expires_in: 3600 }, + status: 200, + }) + + // Mock deployments response + vi.mocked(axios.get).mockResolvedValueOnce({ + data: { + resources: [ + { + id: "test-deployment-123", + targetStatus: "RUNNING", + details: { + resources: { + backend_details: { + model: { + name: "anthropic--claude-3.5-sonnet", + version: "1.0", + }, + }, + }, + }, + }, + ], + }, + }) + + const mockStream = { + async *[Symbol.asyncIterator]() { + yield 'data: {"type": "content_block_start", "content_block": {"type": "text", "text": "response"}}\n\n' + }, + } + + vi.mocked(axios.post).mockResolvedValueOnce({ + data: mockStream, + }) + + const stream = handler.createMessage("system", [{ role: "user", content: "test" }]) + const chunks = [] + for await (const chunk of stream) { + chunks.push(chunk) + break // Just test one chunk + } + + // Verify we made 2 auth calls (initial + refresh) + expect(vi.mocked(axios.post)).toHaveBeenCalledTimes(3) // 2 auth + 1 inference + }) + + it("should handle expired token mid-stream scenario", async () => { + // Mock successful initial auth + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "valid-token", expires_in: 3600 }, + status: 200, + }) + + // Mock deployments response + vi.mocked(axios.get).mockResolvedValueOnce({ + data: { + resources: [ + { + id: "test-deployment-123", + targetStatus: "RUNNING", + details: { + resources: { + backend_details: { + model: { + name: "anthropic--claude-3.5-sonnet", + version: "1.0", + }, + }, + }, + }, + }, + ], + }, + }) + + // Mock inference call that fails with 401 (expired token) + vi.mocked(axios.post).mockRejectedValueOnce({ + response: { + status: 401, + data: { error: "unauthorized", error_description: "Access token expired" }, + }, + }) + + const stream = handler.createMessage("system", [{ role: "user", content: "test" }]) + + await expect(stream.next()).rejects.toThrow() + }) + + it("should handle invalid token format in response", async () => { + // Mock auth with invalid response format + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { invalid_response: "missing access_token" }, + status: 200, + }) + + await expect(handler.createMessage("system", [{ role: "user", content: "test" }]).next()).rejects.toThrow() + }) + }) + + describe("Network Errors and Timeouts", () => { + it("should handle network timeout during authentication", async () => { + // Mock network timeout + vi.mocked(axios.post).mockRejectedValueOnce({ + code: "ECONNABORTED", + message: "timeout of 5000ms exceeded", + request: {}, + }) + + await expect(handler.createMessage("system", [{ role: "user", content: "test" }]).next()).rejects.toThrow( + "timeout of 5000ms exceeded", + ) + }) + + it("should handle network timeout during inference", async () => { + // Mock successful auth + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "test-token", expires_in: 3600 }, + status: 200, + }) + + // Mock deployments response + vi.mocked(axios.get).mockResolvedValueOnce({ + data: { + resources: [ + { + id: "test-deployment-123", + targetStatus: "RUNNING", + details: { + resources: { + backend_details: { + model: { + name: "anthropic--claude-3.5-sonnet", + version: "1.0", + }, + }, + }, + }, + }, + ], + }, + }) + + // Mock timeout during inference + vi.mocked(axios.post).mockRejectedValueOnce({ + code: "ECONNABORTED", + message: "timeout of 30000ms exceeded", + request: {}, + }) + + await expect(handler.createMessage("system", [{ role: "user", content: "test" }]).next()).rejects.toThrow( + "No response received from server", + ) + }) + + it("should handle connection refused errors", async () => { + // Mock connection refused + vi.mocked(axios.post).mockRejectedValueOnce({ + code: "ECONNREFUSED", + message: "connect ECONNREFUSED 127.0.0.1:443", + request: {}, + }) + + await expect(handler.createMessage("system", [{ role: "user", content: "test" }]).next()).rejects.toThrow( + "connect ECONNREFUSED 127.0.0.1:443", + ) + }) + + it("should handle DNS resolution failures", async () => { + // Mock DNS failure + vi.mocked(axios.post).mockRejectedValueOnce({ + code: "ENOTFOUND", + message: "getaddrinfo ENOTFOUND invalid-domain.com", + request: {}, + }) + + await expect(handler.createMessage("system", [{ role: "user", content: "test" }]).next()).rejects.toThrow( + "getaddrinfo ENOTFOUND invalid-domain.com", + ) + }) + + it("should handle SSL/TLS certificate errors", async () => { + // Mock SSL error + vi.mocked(axios.post).mockRejectedValueOnce({ + code: "CERT_UNTRUSTED", + message: "certificate not trusted", + request: {}, + }) + + await expect(handler.createMessage("system", [{ role: "user", content: "test" }]).next()).rejects.toThrow( + "certificate not trusted", + ) + }) + }) + + describe("Invalid Deployment Responses", () => { + it("should handle empty deployments list", async () => { + // Mock successful auth + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "test-token", expires_in: 3600 }, + status: 200, + }) + + // Mock empty deployments response + vi.mocked(axios.get).mockResolvedValueOnce({ + data: { resources: [] }, + }) + + await expect(handler.createMessage("system", [{ role: "user", content: "test" }]).next()).rejects.toThrow( + "No running deployment found for model", + ) + }) + + it("should handle deployments with no RUNNING status", async () => { + // Mock successful auth + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "test-token", expires_in: 3600 }, + status: 200, + }) + + // Mock deployments with stopped/error status + vi.mocked(axios.get).mockResolvedValueOnce({ + data: { + resources: [ + { + id: "test-deployment-123", + targetStatus: "STOPPED", + details: { + resources: { + backend_details: { + model: { + name: "anthropic--claude-3.5-sonnet", + version: "1.0", + }, + }, + }, + }, + }, + ], + }, + }) + + await expect(handler.createMessage("system", [{ role: "user", content: "test" }]).next()).rejects.toThrow( + "No running deployment found for model", + ) + }) + + it("should handle malformed deployment response structure", async () => { + // Mock successful auth + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "test-token", expires_in: 3600 }, + status: 200, + }) + + // Mock malformed deployments response + vi.mocked(axios.get).mockResolvedValueOnce({ + data: { + resources: [ + { + id: "test-deployment-123", + targetStatus: "RUNNING", + // Missing details.resources.backend_details.model + details: { + invalid_structure: true, + }, + }, + ], + }, + }) + + await expect(handler.createMessage("system", [{ role: "user", content: "test" }]).next()).rejects.toThrow( + "No running deployment found for model", + ) + }) + + it("should handle deployments API returning 404", async () => { + const handler = new SapAiCoreHandler(mockOptions) + + // Mock successful auth + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "test-token", expires_in: 3600 }, + status: 200, + }) + + // Mock 404 error for deployments API + vi.mocked(axios.get).mockRejectedValueOnce({ + response: { + status: 404, + data: "Deployments endpoint not found", + }, + }) + + await expect(handler.createMessage("system", [{ role: "user", content: "test" }]).next()).rejects.toThrow( + "Failed to fetch deployments", + ) + }) + + it("should handle deployments API returning 500", async () => { + // Mock successful auth + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "test-token", expires_in: 3600 }, + status: 200, + }) + + // Mock 500 error for deployments API + vi.mocked(axios.get).mockRejectedValueOnce({ + response: { + status: 500, + data: "Internal server error", + }, + }) + + await expect(handler.createMessage("system", [{ role: "user", content: "test" }]).next()).rejects.toThrow( + "Failed to fetch deployments", + ) + }) + }) + + describe("Different Model Types", () => { + describe("OpenAI GPT Models", () => { + const gptOptions: ApiHandlerOptions = { + ...mockOptions, + apiModelId: "gpt-4o", + } + + it("should handle GPT-4o streaming correctly", async () => { + const gptHandler = new SapAiCoreHandler(gptOptions) + + // Mock successful auth + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "test-token", expires_in: 3600 }, + status: 200, + }) + + // Mock deployments response + vi.mocked(axios.get).mockResolvedValueOnce({ + data: { + resources: [ + { + id: "gpt-deployment-123", + targetStatus: "RUNNING", + details: { + resources: { + backend_details: { + model: { + name: "gpt-4o", + version: "1.0", + }, + }, + }, + }, + }, + ], + }, + }) + + // Mock GPT streaming response + const mockGptStream = { + async *[Symbol.asyncIterator]() { + yield 'data: {"choices":[{"delta":{"content":"Hello"}}]}\n\n' + yield 'data: {"choices":[{"delta":{"content":" world"}}]}\n\n' + yield 'data: {"usage":{"prompt_tokens":10,"completion_tokens":5}}\n\n' + yield "data: [DONE]\n\n" + }, + } + + vi.mocked(axios.post).mockResolvedValueOnce({ + data: mockGptStream, + }) + + const stream = gptHandler.createMessage("system", [{ role: "user", content: "test" }]) + const chunks = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(chunks.some((chunk) => chunk.type === "text" && chunk.text === "Hello")).toBe(true) + expect(chunks.some((chunk) => chunk.type === "text" && chunk.text === " world")).toBe(true) + expect(chunks.some((chunk) => chunk.type === "usage")).toBe(true) + }) + + it("should handle O1 reasoning models without temperature/max_tokens", async () => { + const o1Handler = new SapAiCoreHandler({ ...mockOptions, apiModelId: "o1" }) + + // Mock successful auth + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "test-token", expires_in: 3600 }, + status: 200, + }) + + // Mock deployments response + vi.mocked(axios.get).mockResolvedValueOnce({ + data: { + resources: [ + { + id: "o1-deployment-123", + targetStatus: "RUNNING", + details: { + resources: { + backend_details: { + model: { + name: "o1", + version: "1.0", + }, + }, + }, + }, + }, + ], + }, + }) + + const mockO1Stream = { + async *[Symbol.asyncIterator]() { + yield 'data: {"choices":[{"delta":{"content":"Reasoning response"}}]}\n\n' + yield "data: [DONE]\n\n" + }, + } + + vi.mocked(axios.post).mockResolvedValueOnce({ + data: mockO1Stream, + }) + + const stream = o1Handler.createMessage("system", [{ role: "user", content: "test" }]) + const chunks = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(chunks.some((chunk) => chunk.type === "text")).toBe(true) + }) + + it("should handle O3-mini non-streaming response", async () => { + const o3Handler = new SapAiCoreHandler({ ...mockOptions, apiModelId: "o3-mini" }) + + // Mock successful auth + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "test-token", expires_in: 3600 }, + status: 200, + }) + + // Mock deployments response + vi.mocked(axios.get).mockResolvedValueOnce({ + data: { + resources: [ + { + id: "o3-deployment-123", + targetStatus: "RUNNING", + details: { + resources: { + backend_details: { + model: { + name: "o3-mini", + version: "1.0", + }, + }, + }, + }, + }, + ], + }, + }) + + // Mock non-streaming response for O3-mini (single call, not stream) + vi.mocked(axios.post) + .mockResolvedValueOnce({ + data: { access_token: "test-token", expires_in: 3600 }, + status: 200, + }) // auth call + .mockResolvedValueOnce({ + data: { + choices: [ + { + message: { + content: "O3-mini response", + }, + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 5, + }, + }, + }) // inference call + + const stream = o3Handler.createMessage("system", [{ role: "user", content: "test" }]) + const chunks = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(chunks.some((chunk) => chunk.type === "text" && chunk.text === "O3-mini response")).toBe(true) + expect(chunks.some((chunk) => chunk.type === "usage")).toBe(true) + }) + }) + + describe("Gemini Models", () => { + const geminiOptions: ApiHandlerOptions = { + ...mockOptions, + apiModelId: "gemini-2.5-flash", + } + + it("should handle Gemini streaming with reasoning", async () => { + const geminiHandler = new SapAiCoreHandler(geminiOptions) + + // Mock successful auth + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "test-token", expires_in: 3600 }, + status: 200, + }) + + // Mock deployments response + vi.mocked(axios.get).mockResolvedValueOnce({ + data: { + resources: [ + { + id: "gemini-deployment-123", + targetStatus: "RUNNING", + details: { + resources: { + backend_details: { + model: { + name: "gemini-2.5-flash", + version: "1.0", + }, + }, + }, + }, + }, + ], + }, + }) + + // Mock Gemini streaming response + const mockGeminiStream = { + async *[Symbol.asyncIterator]() { + yield 'data: {"candidates":[{"content":{"parts":[{"text":"Response text"}]}}]}\n\n' + yield 'data: {"usageMetadata":{"promptTokenCount":10,"candidatesTokenCount":5}}\n\n' + }, + } + + vi.mocked(axios.post).mockResolvedValueOnce({ + data: mockGeminiStream, + }) + + const stream = geminiHandler.createMessage("system", [{ role: "user", content: "test" }]) + const chunks = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(chunks.length).toBeGreaterThan(0) + }) + }) + + describe("Claude Models with Caching", () => { + const claudeOptions: ApiHandlerOptions = { + ...mockOptions, + apiModelId: "anthropic--claude-4-sonnet", + } + + it("should handle Claude 4 with caching support", async () => { + const claudeHandler = new SapAiCoreHandler(claudeOptions) + + // Mock successful auth + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "test-token", expires_in: 3600 }, + status: 200, + }) + + // Mock deployments response + vi.mocked(axios.get).mockResolvedValueOnce({ + data: { + resources: [ + { + id: "claude-deployment-123", + targetStatus: "RUNNING", + details: { + resources: { + backend_details: { + model: { + name: "anthropic--claude-4-sonnet", + version: "1.0", + }, + }, + }, + }, + }, + ], + }, + }) + + // Mock Claude 4 streaming response with caching metadata + const mockClaude4Stream = { + async *[Symbol.asyncIterator]() { + yield 'data: {"metadata":{"usage":{"inputTokens":10,"outputTokens":5,"cacheReadInputTokens":2}}}\n\n' + yield 'data: {"contentBlockDelta":{"delta":{"text":"Cached response"}}}\n\n' + }, + } + + vi.mocked(axios.post).mockResolvedValueOnce({ + data: mockClaude4Stream, + }) + + const stream = claudeHandler.createMessage("system", [{ role: "user", content: "test" }]) + const chunks = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(chunks.some((chunk) => chunk.type === "text")).toBe(true) + expect(chunks.some((chunk) => chunk.type === "usage")).toBe(true) + }) + }) + + it("should handle unsupported model types", async () => { + const unsupportedHandler = new SapAiCoreHandler({ + ...mockOptions, + apiModelId: "completely-unsupported-model-xyz" as any, + }) + + // Clear all mocks first + vitest.clearAllMocks() + + // Mock successful auth + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "test-token", expires_in: 3600 }, + status: 200, + }) + + // Mock deployments response with the matching unsupported model + vi.mocked(axios.get).mockResolvedValueOnce({ + data: { + resources: [ + { + id: "unsupported-deployment-123", + targetStatus: "RUNNING", + details: { + resources: { + backend_details: { + model: { + name: "completely-unsupported-model-xyz", + version: "1.0", + }, + }, + }, + }, + }, + ], + }, + }) + + await expect( + unsupportedHandler.createMessage("system", [{ role: "user", content: "test" }]).next(), + ).rejects.toThrow("Unsupported model") + }) + + it("should handle no matching deployment for model", async () => { + const noDeploymentHandler = new SapAiCoreHandler({ ...mockOptions, apiModelId: "gpt-4" }) + + // Mock successful auth + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "test-token", expires_in: 3600 }, + status: 200, + }) + + // Mock deployments response with a different model (so it doesn't find a deployment) + vi.mocked(axios.get).mockResolvedValueOnce({ + data: { + resources: [ + { + id: "different-deployment-123", + targetStatus: "RUNNING", + details: { + resources: { + backend_details: { + model: { + name: "gpt-4o", + version: "1.0", + }, + }, + }, + }, + }, + ], + }, + }) + + await expect( + noDeploymentHandler.createMessage("system", [{ role: "user", content: "test" }]).next(), + ).rejects.toThrow("No running deployment found for model") + }) + }) + + describe("Error Handling and Resilience", () => { + it("should handle authentication failures", async () => { + // Mock auth failure + vi.mocked(axios.post).mockRejectedValueOnce({ + response: { + status: 401, + data: { error: "unauthorized", error_description: "Invalid client credentials" }, + }, + }) + + await expect(handler.createMessage("system", [{ role: "user", content: "test" }]).next()).rejects.toThrow() + }) + + it("should handle network timeout during authentication", async () => { + // Mock network timeout + vi.mocked(axios.post).mockRejectedValueOnce({ + code: "ECONNABORTED", + message: "timeout of 5000ms exceeded", + request: {}, + }) + + await expect(handler.createMessage("system", [{ role: "user", content: "test" }]).next()).rejects.toThrow( + "timeout of 5000ms exceeded", + ) + }) + + it("should handle empty deployments list", async () => { + // Mock successful auth + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "test-token", expires_in: 3600 }, + status: 200, + }) + + // Mock empty deployments response + vi.mocked(axios.get).mockResolvedValueOnce({ + data: { resources: [] }, + }) + + await expect(handler.createMessage("system", [{ role: "user", content: "test" }]).next()).rejects.toThrow( + "No running deployment found for model", + ) + }) + + it("should handle deployments API returning 404", async () => { + const handler = new SapAiCoreHandler(mockOptions) + + // Mock successful auth + vi.mocked(axios.post).mockResolvedValueOnce({ + data: { access_token: "test-token", expires_in: 3600 }, + status: 200, + }) + + // Mock 404 error for deployments API + vi.mocked(axios.get).mockRejectedValueOnce({ + response: { + status: 404, + data: "Deployments endpoint not found", + }, + }) + + await expect(handler.createMessage("system", [{ role: "user", content: "test" }]).next()).rejects.toThrow( + "No running deployment found for model", + ) + }) + }) + + describe("HTTPS Security Validation", () => { + it("should reject non-HTTPS token URLs", async () => { + const insecureHandler = new SapAiCoreHandler({ + ...mockOptions, + sapAiCoreTokenUrl: "http://insecure.example.com/oauth/token", + }) + + await expect( + insecureHandler.createMessage("system", [{ role: "user", content: "test" }]).next(), + ).rejects.toThrow("SAP AI Core Token URL must use HTTPS for security") + }) + + it("should reject non-HTTPS base URLs", async () => { + const insecureHandler = new SapAiCoreHandler({ + ...mockOptions, + sapAiCoreBaseUrl: "http://insecure.example.com", + }) + + await expect( + insecureHandler.createMessage("system", [{ role: "user", content: "test" }]).next(), + ).rejects.toThrow("SAP AI Core Base URL must use HTTPS for security") + }) + }) +}) diff --git a/src/api/providers/fetchers/__tests__/sapaicore-integration.spec.ts b/src/api/providers/fetchers/__tests__/sapaicore-integration.spec.ts new file mode 100644 index 0000000000..dcc2d0b15d --- /dev/null +++ b/src/api/providers/fetchers/__tests__/sapaicore-integration.spec.ts @@ -0,0 +1,244 @@ +import { describe, it, expect, beforeEach, afterEach, vi } from "vitest" + +import { getSapAiCoreModels, clearSapAiCoreCache, type SapAiCoreFetcherOptions } from "../sapaicore" +import { getModels, flushModels } from "../modelCache" + +describe("SAP AI Core Integration Tests", () => { + const mockOptions: SapAiCoreFetcherOptions = { + sapAiCoreClientId: "test-client-id", + sapAiCoreClientSecret: "test-client-secret", + sapAiCoreTokenUrl: "https://test.authentication.sap.hana.ondemand.com", + sapAiCoreBaseUrl: "https://api.ai.ml.hana.ondemand.com", + sapAiResourceGroup: "default", + } + + beforeEach(() => { + vi.clearAllMocks() + clearSapAiCoreCache() + }) + + afterEach(async () => { + clearSapAiCoreCache() + await flushModels("sapaicore") + }) + + describe("Integration with modelCache", () => { + it("should integrate with the central model cache system", async () => { + // This test verifies that the SAP AI Core fetcher integrates properly + // with the central caching system. In a real environment, this would + // make actual API calls, but for testing we expect it to handle + // missing credentials gracefully. + + const models = await getModels({ + provider: "sapaicore", + sapAiCoreClientId: "", + sapAiCoreClientSecret: "", + sapAiCoreTokenUrl: "https://test.authentication.sap.hana.ondemand.com", + sapAiCoreBaseUrl: "https://api.ai.ml.hana.ondemand.com", + }) + + // Should return empty object when credentials are missing + expect(models).toEqual({}) + }) + + it("should handle missing required parameters gracefully", async () => { + const models = await getModels({ + provider: "sapaicore", + sapAiCoreClientId: "test-id", + sapAiCoreClientSecret: "test-secret", + sapAiCoreTokenUrl: "", + sapAiCoreBaseUrl: "", + }) + + // Should return empty object when URLs are missing + expect(models).toEqual({}) + }) + + it("should respect cache invalidation", async () => { + // First call + const models1 = await getModels({ + provider: "sapaicore", + sapAiCoreClientId: "", + sapAiCoreClientSecret: "", + sapAiCoreTokenUrl: "https://test.authentication.sap.hana.ondemand.com", + sapAiCoreBaseUrl: "https://api.ai.ml.hana.ondemand.com", + }) + + // Flush cache + await flushModels("sapaicore") + + // Second call should not use cache + const models2 = await getModels({ + provider: "sapaicore", + sapAiCoreClientId: "", + sapAiCoreClientSecret: "", + sapAiCoreTokenUrl: "https://test.authentication.sap.hana.ondemand.com", + sapAiCoreBaseUrl: "https://api.ai.ml.hana.ondemand.com", + }) + + expect(models1).toEqual(models2) + expect(models1).toEqual({}) + }) + }) + + describe("Error handling in integration context", () => { + it("should handle network errors gracefully in integration context", async () => { + // Test with invalid URLs that would cause network errors + const models = await getSapAiCoreModels({ + sapAiCoreClientId: "test-id", + sapAiCoreClientSecret: "test-secret", + sapAiCoreTokenUrl: "https://invalid-domain-that-does-not-exist.com", + sapAiCoreBaseUrl: "https://invalid-domain-that-does-not-exist.com", + }) + + // Should return empty object instead of throwing + expect(models).toEqual({}) + }) + + it("should validate HTTPS requirements", async () => { + const models = await getSapAiCoreModels({ + sapAiCoreClientId: "test-id", + sapAiCoreClientSecret: "test-secret", + sapAiCoreTokenUrl: "http://insecure.example.com", + sapAiCoreBaseUrl: "https://api.ai.ml.hana.ondemand.com", + }) + + // Should return empty object due to HTTPS validation failure + expect(models).toEqual({}) + }) + + it("should handle authentication failures gracefully", async () => { + // This would typically result in a 401 error in real scenarios + const models = await getSapAiCoreModels({ + sapAiCoreClientId: "invalid-client-id", + sapAiCoreClientSecret: "invalid-client-secret", + sapAiCoreTokenUrl: "https://test.authentication.sap.hana.ondemand.com", + sapAiCoreBaseUrl: "https://api.ai.ml.hana.ondemand.com", + }) + + // Should return empty object instead of throwing + expect(models).toEqual({}) + }) + }) + + describe("Model information consistency", () => { + it("should return consistent model information structure", async () => { + const models = await getSapAiCoreModels(mockOptions) + + // Even with empty results, the structure should be consistent + expect(models).toBeDefined() + expect(typeof models).toBe("object") + + // If models are returned, they should have the correct structure + for (const [modelId, modelInfo] of Object.entries(models)) { + expect(typeof modelId).toBe("string") + expect(modelInfo).toHaveProperty("maxTokens") + expect(modelInfo).toHaveProperty("contextWindow") + expect(modelInfo).toHaveProperty("supportsImages") + expect(modelInfo).toHaveProperty("supportsPromptCache") + expect(modelInfo).toHaveProperty("supportsComputerUse") + expect(typeof modelInfo.maxTokens).toBe("number") + expect(typeof modelInfo.contextWindow).toBe("number") + expect(typeof modelInfo.supportsImages).toBe("boolean") + expect(typeof modelInfo.supportsPromptCache).toBe("boolean") + expect(typeof modelInfo.supportsComputerUse).toBe("boolean") + } + }) + + it("should include deployment information in descriptions", async () => { + const models = await getSapAiCoreModels(mockOptions) + + // If models are returned, descriptions should include deployment info + for (const [, modelInfo] of Object.entries(models)) { + if (modelInfo.description) { + expect(modelInfo.description).toContain("SAP AI Core") + } + } + }) + }) + + describe("Performance and caching behavior", () => { + it("should cache results appropriately", async () => { + const startTime = Date.now() + + // First call + const models1 = await getSapAiCoreModels(mockOptions) + const firstCallTime = Date.now() - startTime + + const secondStartTime = Date.now() + + // Second call should be faster due to caching + const models2 = await getSapAiCoreModels(mockOptions) + const secondCallTime = Date.now() - secondStartTime + + expect(models1).toEqual(models2) + // Second call should be significantly faster (cached) + expect(secondCallTime).toBeLessThan(firstCallTime) + }) + + it("should handle concurrent requests properly", async () => { + // Make multiple concurrent requests + const promises = Array(5) + .fill(null) + .map(() => getSapAiCoreModels(mockOptions)) + + const results = await Promise.all(promises) + + // All results should be identical + for (let i = 1; i < results.length; i++) { + expect(results[i]).toEqual(results[0]) + } + }) + }) + + describe("Configuration validation", () => { + it("should validate required configuration parameters", async () => { + // Test missing client ID + const models1 = await getSapAiCoreModels({ + ...mockOptions, + sapAiCoreClientId: "", + }) + expect(models1).toEqual({}) + + // Test missing client secret + const models2 = await getSapAiCoreModels({ + ...mockOptions, + sapAiCoreClientSecret: "", + }) + expect(models2).toEqual({}) + + // Test missing token URL + const models3 = await getSapAiCoreModels({ + ...mockOptions, + sapAiCoreTokenUrl: "", + }) + expect(models3).toEqual({}) + + // Test missing base URL + const models4 = await getSapAiCoreModels({ + ...mockOptions, + sapAiCoreBaseUrl: "", + }) + expect(models4).toEqual({}) + }) + + it("should handle optional resource group parameter", async () => { + // Test with resource group + const models1 = await getSapAiCoreModels({ + ...mockOptions, + sapAiResourceGroup: "custom-group", + }) + + // Test without resource group (should default to "default") + const models2 = await getSapAiCoreModels({ + ...mockOptions, + sapAiResourceGroup: undefined, + }) + + // Both should return empty objects due to invalid credentials + // but should not fail due to missing resource group + expect(models1).toEqual({}) + expect(models2).toEqual({}) + }) + }) +}) diff --git a/src/api/providers/fetchers/__tests__/sapaicore.spec.ts b/src/api/providers/fetchers/__tests__/sapaicore.spec.ts new file mode 100644 index 0000000000..7577ffc829 --- /dev/null +++ b/src/api/providers/fetchers/__tests__/sapaicore.spec.ts @@ -0,0 +1,455 @@ +// Mocks must come first, before imports +vi.mock("axios") + +import type { Mock } from "vitest" +import axios from "axios" +import { describe, it, expect, beforeEach, afterEach, vi } from "vitest" + +import { + getSapAiCoreModels, + getCachedSapAiCoreModels, + clearSapAiCoreCache, + getSapAiCoreDeployedModelNames, + type SapAiCoreFetcherOptions, +} from "../sapaicore" +import { DEFAULT_HEADERS } from "../../constants" + +const mockedAxios = axios as typeof axios & { + get: Mock + post: Mock + isAxiosError: Mock +} + +describe("SAP AI Core Fetcher", () => { + const mockOptions: SapAiCoreFetcherOptions = { + sapAiCoreClientId: "test-client-id", + sapAiCoreClientSecret: "test-client-secret", + sapAiCoreTokenUrl: "https://test.authentication.sap.hana.ondemand.com", + sapAiCoreBaseUrl: "https://api.ai.ml.hana.ondemand.com", + sapAiResourceGroup: "default", + } + + const mockTokenResponse = { + access_token: "mock-access-token", + expires_in: 3600, + scope: "test-scope", + jti: "test-jti", + token_type: "Bearer", + } + + const mockDeploymentsResponse = { + resources: [ + { + id: "deployment-1", + targetStatus: "RUNNING", + details: { + resources: { + backendDetails: { + model: { + name: "anthropic--claude-3.5-sonnet", + version: "1.0.0", + }, + }, + }, + }, + }, + { + id: "deployment-2", + targetStatus: "RUNNING", + details: { + resources: { + backend_details: { + model: { + name: "gpt-4o", + version: "2.0.0", + }, + }, + }, + }, + }, + { + id: "deployment-3", + targetStatus: "STOPPED", + details: { + resources: { + backendDetails: { + model: { + name: "gemini-2.5-pro", + version: "1.0.0", + }, + }, + }, + }, + }, + ], + } + + beforeEach(() => { + vi.clearAllMocks() + clearSapAiCoreCache() + }) + + afterEach(() => { + clearSapAiCoreCache() + }) + + describe("getSapAiCoreModels", () => { + it("should successfully fetch and parse SAP AI Core models", async () => { + // Mock authentication request + mockedAxios.post.mockResolvedValue({ + data: mockTokenResponse, + }) + + // Mock deployments request + mockedAxios.get.mockResolvedValue({ + data: mockDeploymentsResponse, + }) + + const models = await getSapAiCoreModels(mockOptions) + + expect(models).toBeDefined() + expect(Object.keys(models)).toHaveLength(2) // Only RUNNING deployments + expect(models["anthropic--claude-3.5-sonnet"]).toBeDefined() + expect(models["gpt-4o"]).toBeDefined() + expect(models["gemini-2.5-pro"]).toBeUndefined() // STOPPED deployment + + // Verify authentication call + expect(mockedAxios.post).toHaveBeenCalledWith( + "https://test.authentication.sap.hana.ondemand.com/oauth/token", + expect.any(URLSearchParams), + expect.objectContaining({ + headers: expect.objectContaining({ + "Content-Type": "application/x-www-form-urlencoded", + }), + }), + ) + + // Verify deployments call + expect(mockedAxios.get).toHaveBeenCalledWith( + "https://api.ai.ml.hana.ondemand.com/v2/lm/deployments?$top=10000&$skip=0", + expect.objectContaining({ + headers: expect.objectContaining({ + Authorization: "Bearer mock-access-token", + "AI-Resource-Group": "default", + "Content-Type": "application/json", + "AI-Client-Type": "Roo-Code", + }), + }), + ) + }) + + it("should handle authentication failure", async () => { + mockedAxios.post.mockRejectedValue(new Error("Authentication failed")) + + const models = await getSapAiCoreModels(mockOptions) + + expect(models).toEqual({}) + expect(mockedAxios.get).not.toHaveBeenCalled() + }) + + it("should handle deployments fetch failure", async () => { + mockedAxios.post.mockResolvedValue({ + data: mockTokenResponse, + }) + + mockedAxios.get.mockRejectedValue(new Error("Deployments fetch failed")) + + const models = await getSapAiCoreModels(mockOptions) + + expect(models).toEqual({}) + }) + + it("should validate HTTPS URLs", async () => { + const invalidOptions = { + ...mockOptions, + sapAiCoreTokenUrl: "http://insecure.example.com", + } + + const models = await getSapAiCoreModels(invalidOptions) + + expect(models).toEqual({}) + expect(mockedAxios.post).not.toHaveBeenCalled() + }) + + it("should handle missing credentials gracefully", async () => { + const incompleteOptions = { + ...mockOptions, + sapAiCoreClientId: "", + sapAiCoreClientSecret: "", + } + + const models = await getSapAiCoreModels(incompleteOptions) + + expect(models).toEqual({}) + expect(mockedAxios.post).not.toHaveBeenCalled() + }) + + it("should use cached models when available", async () => { + // First call - should make API requests + mockedAxios.post.mockResolvedValue({ + data: mockTokenResponse, + }) + mockedAxios.get.mockResolvedValue({ + data: mockDeploymentsResponse, + }) + + const models1 = await getSapAiCoreModels(mockOptions) + + // Second call - should use cache + const models2 = await getSapAiCoreModels(mockOptions) + + expect(models1).toEqual(models2) + expect(mockedAxios.post).toHaveBeenCalledTimes(1) + expect(mockedAxios.get).toHaveBeenCalledTimes(1) + }) + + it("should handle invalid token response format", async () => { + mockedAxios.post.mockResolvedValue({ + data: { invalid: "response" }, + }) + + const models = await getSapAiCoreModels(mockOptions) + + expect(models).toEqual({}) + }) + + it("should handle invalid deployments response format", async () => { + mockedAxios.post.mockResolvedValue({ + data: mockTokenResponse, + }) + + mockedAxios.get.mockResolvedValue({ + data: { invalid: "response" }, + }) + + const models = await getSapAiCoreModels(mockOptions) + + expect(models).toEqual({}) + }) + + it("should handle deployments with missing model information", async () => { + mockedAxios.post.mockResolvedValue({ + data: mockTokenResponse, + }) + + const incompleteDeploymentsResponse = { + resources: [ + { + id: "deployment-1", + targetStatus: "RUNNING", + details: { + resources: { + backendDetails: { + // Missing model information + }, + }, + }, + }, + { + id: "deployment-2", + targetStatus: "RUNNING", + details: { + resources: { + backendDetails: { + model: { + // Missing name + version: "1.0.0", + }, + }, + }, + }, + }, + ], + } + + mockedAxios.get.mockResolvedValue({ + data: incompleteDeploymentsResponse, + }) + + const models = await getSapAiCoreModels(mockOptions) + + expect(models).toEqual({}) + }) + + it("should handle token expiration and refresh", async () => { + // First authentication + mockedAxios.post.mockResolvedValue({ + data: { ...mockTokenResponse, expires_in: -1 }, // Expired token + }) + + mockedAxios.get.mockResolvedValue({ + data: mockDeploymentsResponse, + }) + + await getSapAiCoreModels(mockOptions) + + // Second call should trigger re-authentication + mockedAxios.post.mockResolvedValue({ + data: mockTokenResponse, + }) + + mockedAxios.get.mockResolvedValue({ + data: mockDeploymentsResponse, + }) + + await getSapAiCoreModels(mockOptions) + + expect(mockedAxios.post).toHaveBeenCalledTimes(2) + }) + }) + + describe("getSapAiCoreDeployedModelNames", () => { + it("should return sorted model names", async () => { + mockedAxios.post.mockResolvedValue({ + data: mockTokenResponse, + }) + + mockedAxios.get.mockResolvedValue({ + data: mockDeploymentsResponse, + }) + + const modelNames = await getSapAiCoreDeployedModelNames(mockOptions) + + expect(modelNames).toEqual(["anthropic--claude-3.5-sonnet", "gpt-4o"]) + }) + + it("should handle errors gracefully", async () => { + mockedAxios.post.mockRejectedValue(new Error("Network error")) + + const modelNames = await getSapAiCoreDeployedModelNames(mockOptions) + + expect(modelNames).toEqual([]) + }) + }) + + describe("Cache functions", () => { + it("should return null when no cache exists", () => { + const cached = getCachedSapAiCoreModels() + expect(cached).toBeNull() + }) + + it("should return cached models after successful fetch", async () => { + mockedAxios.post.mockResolvedValue({ + data: mockTokenResponse, + }) + + mockedAxios.get.mockResolvedValue({ + data: mockDeploymentsResponse, + }) + + await getSapAiCoreModels(mockOptions) + + const cached = getCachedSapAiCoreModels() + expect(cached).toBeDefined() + expect(Object.keys(cached!)).toHaveLength(2) + }) + + it("should clear cache when requested", async () => { + mockedAxios.post.mockResolvedValue({ + data: mockTokenResponse, + }) + + mockedAxios.get.mockResolvedValue({ + data: mockDeploymentsResponse, + }) + + await getSapAiCoreModels(mockOptions) + expect(getCachedSapAiCoreModels()).toBeDefined() + + clearSapAiCoreCache() + expect(getCachedSapAiCoreModels()).toBeNull() + }) + }) + + describe("Model information parsing", () => { + it("should include deployment ID in model description", async () => { + mockedAxios.post.mockResolvedValue({ + data: mockTokenResponse, + }) + + mockedAxios.get.mockResolvedValue({ + data: mockDeploymentsResponse, + }) + + const models = await getSapAiCoreModels(mockOptions) + + expect(models["anthropic--claude-3.5-sonnet"].description).toContain("deployment-1") + expect(models["gpt-4o"].description).toContain("deployment-2") + }) + + it("should handle unknown models with fallback information", async () => { + const unknownModelResponse = { + resources: [ + { + id: "deployment-unknown", + targetStatus: "RUNNING", + details: { + resources: { + backendDetails: { + model: { + name: "unknown-model", + version: "1.0.0", + }, + }, + }, + }, + }, + ], + } + + mockedAxios.post.mockResolvedValue({ + data: mockTokenResponse, + }) + + mockedAxios.get.mockResolvedValue({ + data: unknownModelResponse, + }) + + const models = await getSapAiCoreModels(mockOptions) + + expect(models["unknown-model"]).toBeDefined() + expect(models["unknown-model"].description).toContain("Unknown model") + expect(models["unknown-model"].maxTokens).toBe(8192) + expect(models["unknown-model"].contextWindow).toBe(200000) + }) + }) + + describe("Error handling", () => { + it("should handle network timeouts", async () => { + const timeoutError = new Error("timeout") + timeoutError.name = "ECONNABORTED" + mockedAxios.post.mockRejectedValue(timeoutError) + + const models = await getSapAiCoreModels(mockOptions) + + expect(models).toEqual({}) + }) + + it("should handle HTTP error responses", async () => { + const httpError = { + response: { + status: 401, + statusText: "Unauthorized", + data: "Invalid credentials", + }, + isAxiosError: true, + } + + mockedAxios.post.mockRejectedValue(httpError) + + const models = await getSapAiCoreModels(mockOptions) + + expect(models).toEqual({}) + }) + + it("should handle malformed JSON responses", async () => { + mockedAxios.post.mockResolvedValue({ + data: "invalid json", + }) + + const models = await getSapAiCoreModels(mockOptions) + + expect(models).toEqual({}) + }) + }) +}) diff --git a/src/api/providers/fetchers/modelCache.ts b/src/api/providers/fetchers/modelCache.ts index 0005e8205f..41d027858b 100644 --- a/src/api/providers/fetchers/modelCache.ts +++ b/src/api/providers/fetchers/modelCache.ts @@ -19,16 +19,21 @@ import { GetModelsOptions } from "../../../shared/api" import { getOllamaModels } from "./ollama" import { getLMStudioModels } from "./lmstudio" import { getIOIntelligenceModels } from "./io-intelligence" +import { getSapAiCoreModels } from "./sapaicore" + +// Type for all providers that support model fetching (both router and non-router providers) +type ModelProvider = RouterName | "sapaicore" + const memoryCache = new NodeCache({ stdTTL: 5 * 60, checkperiod: 5 * 60 }) -async function writeModels(router: RouterName, data: ModelRecord) { - const filename = `${router}_models.json` +async function writeModels(provider: ModelProvider, data: ModelRecord) { + const filename = `${provider}_models.json` const cacheDir = await getCacheDirectoryPath(ContextProxy.instance.globalStorageUri.fsPath) await safeWriteJson(path.join(cacheDir, filename), data) } -async function readModels(router: RouterName): Promise { - const filename = `${router}_models.json` +async function readModels(provider: ModelProvider): Promise { + const filename = `${provider}_models.json` const cacheDir = await getCacheDirectoryPath(ContextProxy.instance.globalStorageUri.fsPath) const filePath = path.join(cacheDir, filename) const exists = await fileExistsAtPath(filePath) @@ -85,6 +90,16 @@ export const getModels = async (options: GetModelsOptions): Promise case "vercel-ai-gateway": models = await getVercelAiGatewayModels() break + case "sapaicore": + // SAP AI Core models endpoint requires authentication credentials + models = await getSapAiCoreModels({ + sapAiCoreClientId: options.sapAiCoreClientId, + sapAiCoreClientSecret: options.sapAiCoreClientSecret, + sapAiCoreTokenUrl: options.sapAiCoreTokenUrl, + sapAiCoreBaseUrl: options.sapAiCoreBaseUrl, + sapAiResourceGroup: options.sapAiResourceGroup, + }) + break default: { // Ensures router is exhaustively checked if RouterName is a strict union const exhaustiveCheck: never = provider @@ -117,8 +132,8 @@ export const getModels = async (options: GetModelsOptions): Promise * Flush models memory cache for a specific router * @param router - The router to flush models for. */ -export const flushModels = async (router: RouterName) => { - memoryCache.del(router) +export const flushModels = async (provider: ModelProvider) => { + memoryCache.del(provider) } export function getModelsFromCache(provider: string) { diff --git a/src/api/providers/fetchers/sapaicore.ts b/src/api/providers/fetchers/sapaicore.ts new file mode 100644 index 0000000000..d2de28260b --- /dev/null +++ b/src/api/providers/fetchers/sapaicore.ts @@ -0,0 +1,430 @@ +import axios from "axios" +import { z } from "zod" + +import type { ModelInfo } from "@roo-code/types" +import { sapAiCoreModels } from "@roo-code/types" + +import type { ModelRecord } from "../../../shared/api" +import { DEFAULT_HEADERS } from "../constants" + +/** + * SAP AI Core Authentication Response Schema + */ +const sapAiCoreTokenResponseSchema = z.object({ + access_token: z.string(), + expires_in: z.number(), + scope: z.string(), + jti: z.string(), + token_type: z.string(), +}) + +export type SapAiCoreTokenResponse = z.infer + +/** + * SAP AI Core Model Details Schema + */ +const sapAiCoreModelDetailsSchema = z.object({ + name: z.string(), + version: z.string().optional(), +}) + +/** + * SAP AI Core Backend Details Schema + */ +const sapAiCoreBackendDetailsSchema = z + .object({ + model: sapAiCoreModelDetailsSchema.optional(), + }) + .passthrough() + +/** + * SAP AI Core Deployment Resource Schema + */ +const sapAiCoreDeploymentResourceSchema = z.object({ + backendDetails: sapAiCoreBackendDetailsSchema.optional(), + backend_details: sapAiCoreBackendDetailsSchema.optional(), // Alternative naming + scaling: z + .object({ + backendDetails: z.object({}).passthrough().optional(), + backend_details: z.object({}).passthrough().optional(), + }) + .optional(), +}) + +/** + * SAP AI Core Deployment Details Schema + */ +const sapAiCoreDeploymentDetailsSchema = z.object({ + resources: sapAiCoreDeploymentResourceSchema.optional(), +}) + +/** + * SAP AI Core Deployment Schema + */ +const sapAiCoreDeploymentSchema = z.object({ + id: z.string(), + createdAt: z.string().optional(), + modifiedAt: z.string().optional(), + status: z.string().optional(), + targetStatus: z.string(), + details: sapAiCoreDeploymentDetailsSchema.optional(), + scenarioId: z.string().optional(), + configurationId: z.string().optional(), + latestRunningConfigurationId: z.string().optional(), + lastOperation: z.string().optional(), + submissionTime: z.string().optional(), + startTime: z.string().optional(), + configurationName: z.string().optional(), + deploymentUrl: z.string().optional(), +}) + +/** + * SAP AI Core Deployments Response Schema + */ +const sapAiCoreDeploymentsResponseSchema = z.object({ + count: z.number().optional(), + resources: z.array(sapAiCoreDeploymentSchema), +}) + +export type SapAiCoreDeployment = z.infer +export type SapAiCoreDeploymentsResponse = z.infer + +/** + * SAP AI Core Fetcher Options + */ +export interface SapAiCoreFetcherOptions { + sapAiCoreClientId: string + sapAiCoreClientSecret: string + sapAiCoreTokenUrl: string + sapAiCoreBaseUrl: string + sapAiResourceGroup?: string +} + +/** + * Cache entry for storing fetched models and authentication tokens + */ +interface SapAiCoreCacheEntry { + models: ModelRecord + token?: { + access_token: string + expires_at: number + } + timestamp: number +} + +// Cache duration: 5 minutes for models, token expiry handled separately +const CACHE_DURATION = 5 * 60 * 1000 +let cache: SapAiCoreCacheEntry | null = null + +/** + * Authenticates with SAP AI Core and returns an access token + */ +async function authenticateWithSapAiCore(options: SapAiCoreFetcherOptions): Promise { + // Validate HTTPS requirement for security + if (!options.sapAiCoreTokenUrl.startsWith("https://")) { + throw new Error("SAP AI Core Token URL must use HTTPS for security") + } + + const payload = new URLSearchParams({ + grant_type: "client_credentials", + client_id: options.sapAiCoreClientId, + client_secret: options.sapAiCoreClientSecret, + }) + + const tokenUrl = options.sapAiCoreTokenUrl.replace(/\/+$/, "") + "/oauth/token" + + try { + const response = await axios.post(tokenUrl, payload, { + headers: { + "Content-Type": "application/x-www-form-urlencoded", + ...DEFAULT_HEADERS, + }, + timeout: 10000, // 10 second timeout + }) + + const result = sapAiCoreTokenResponseSchema.safeParse(response.data) + + if (!result.success) { + console.error("SAP AI Core token response validation failed:", result.error.format()) + throw new Error("Invalid token response format from SAP AI Core") + } + + return result.data + } catch (error) { + console.error("SAP AI Core authentication failed:", error) + + if (axios.isAxiosError(error)) { + if (error.response) { + throw new Error( + `SAP AI Core authentication failed: ${error.response.status} ${error.response.statusText}`, + ) + } else if (error.request) { + throw new Error( + "SAP AI Core authentication failed: No response from server. Check your internet connection and token URL.", + ) + } + } + + throw new Error( + `SAP AI Core authentication failed: ${error instanceof Error ? error.message : "Unknown error"}`, + ) + } +} + +/** + * Gets a valid access token, using cache if available and not expired + */ +async function getValidToken(options: SapAiCoreFetcherOptions): Promise { + const now = Date.now() + + // Check if we have a cached token that's still valid + if (cache?.token && cache.token.expires_at > now) { + return cache.token.access_token + } + + // Authenticate and get new token + const tokenResponse = await authenticateWithSapAiCore(options) + const expiresAt = now + tokenResponse.expires_in * 1000 + + // Update cache with new token + if (cache) { + cache.token = { + access_token: tokenResponse.access_token, + expires_at: expiresAt, + } + } else { + cache = { + models: {}, + token: { + access_token: tokenResponse.access_token, + expires_at: expiresAt, + }, + timestamp: now, + } + } + + return tokenResponse.access_token +} + +/** + * Fetches deployments from SAP AI Core + */ +async function fetchSapAiCoreDeployments(options: SapAiCoreFetcherOptions): Promise { + // Validate HTTPS requirement for security + if (!options.sapAiCoreBaseUrl.startsWith("https://")) { + throw new Error("SAP AI Core Base URL must use HTTPS for security") + } + + const token = await getValidToken(options) + + const headers = { + Authorization: `Bearer ${token}`, + "AI-Resource-Group": options.sapAiResourceGroup || "default", + "Content-Type": "application/json", + "AI-Client-Type": "Roo-Code", + ...DEFAULT_HEADERS, + } + + const url = `${options.sapAiCoreBaseUrl}/v2/lm/deployments?$top=10000&$skip=0` + + try { + const response = await axios.get(url, { + headers, + timeout: 15000, // 15 second timeout for deployment fetching + }) + + const result = sapAiCoreDeploymentsResponseSchema.safeParse(response.data) + + if (!result.success) { + console.error("Validation errors:", JSON.stringify(result.error.format(), null, 2)) + throw new Error("Invalid deployments response format from SAP AI Core") + } + + return result.data.resources + } catch (error) { + console.error("Error fetching SAP AI Core deployments:", error) + + if (axios.isAxiosError(error)) { + if (error.response) { + throw new Error( + `Failed to fetch SAP AI Core deployments: ${error.response.status} ${error.response.statusText}`, + ) + } else if (error.request) { + throw new Error( + "Failed to fetch SAP AI Core deployments: No response from server. Check your internet connection and base URL.", + ) + } + } + + throw new Error( + `Failed to fetch SAP AI Core deployments: ${error instanceof Error ? error.message : "Unknown error"}`, + ) + } +} + +/** + * Parses SAP AI Core deployment into model information + */ +function parseSapAiCoreDeployment(deployment: SapAiCoreDeployment): { modelName: string; modelInfo: ModelInfo } | null { + // Skip non-running deployments + if (deployment.targetStatus !== "RUNNING") { + return null + } + + // Try both camelCase and snake_case property names for backend details + const backendDetails = + deployment.details?.resources?.backendDetails || deployment.details?.resources?.backend_details + + // Skip deployments without model information (e.g., orchestration deployments) + if (!backendDetails?.model?.name) { + return null + } + + const modelName = backendDetails.model.name.toLowerCase() + + // Skip invalid model names + if (!modelName || modelName === "") { + return null + } + + // Get model info from predefined SAP AI Core models + const predefinedModelInfo = sapAiCoreModels[modelName as keyof typeof sapAiCoreModels] + + if (predefinedModelInfo) { + return { + modelName, + modelInfo: { + ...predefinedModelInfo, + description: `${modelName} via SAP AI Core (Deployment: ${deployment.id})`, + }, + } + } + + // Fallback for unknown models - create basic model info + const fallbackModelInfo: ModelInfo = { + maxTokens: 8192, // Conservative default + contextWindow: 200000, // Conservative default + supportsImages: false, // Conservative default + supportsPromptCache: false, // Conservative default + supportsComputerUse: false, // Conservative default + description: `${modelName} via SAP AI Core (Deployment: ${deployment.id}) - Unknown model`, + } + + return { + modelName, + modelInfo: fallbackModelInfo, + } +} + +/** + * Fetches available models from SAP AI Core + * + * This function authenticates with SAP AI Core, fetches running deployments, + * and returns model information for deployed models. + * + * @param options SAP AI Core configuration options + * @returns A promise that resolves to a record of model IDs to model info + * @throws Will throw an error if authentication fails or deployments cannot be fetched + */ +export async function getSapAiCoreModels(options: SapAiCoreFetcherOptions): Promise { + const now = Date.now() + + // Check cache first, but also validate token expiration + if (cache && now - cache.timestamp < CACHE_DURATION) { + // If we have a token, check if it's still valid + if (!cache.token || cache.token.expires_at > now) { + return cache.models + } + // Token is expired, proceed with fresh fetch which will re-authenticate + } + + const models: ModelRecord = {} + + try { + // Validate required options + if (!options.sapAiCoreClientId || !options.sapAiCoreClientSecret) { + console.warn("SAP AI Core credentials not provided, returning empty model list") + return models + } + + if (!options.sapAiCoreTokenUrl || !options.sapAiCoreBaseUrl) { + console.warn("SAP AI Core URLs not provided, returning empty model list") + return models + } + + // Fetch deployments + const deployments = await fetchSapAiCoreDeployments(options) + + // Process deployments and extract model information + const processedModels = new Map() + + for (const deployment of deployments) { + const parsed = parseSapAiCoreDeployment(deployment) + + if (parsed) { + const { modelName, modelInfo } = parsed + + // Avoid duplicates - keep the first occurrence + if (!processedModels.has(modelName)) { + processedModels.set(modelName, modelInfo) + } + } + } + + // Convert Map to Record + for (const [modelName, modelInfo] of processedModels) { + models[modelName] = modelInfo + } + + // Update cache + cache = { + models, + token: cache?.token, // Preserve existing token if available + timestamp: now, + } + + console.log(`Successfully fetched ${Object.keys(models).length} SAP AI Core models`) + return models + } catch (error) { + console.error("Error fetching SAP AI Core models:", error) + + // Return cached data if available + if (cache) { + console.warn("Returning cached SAP AI Core models due to fetch error") + return cache.models + } + + // For testing purposes when no SAP credentials are available, + // return empty object instead of throwing (allows graceful degradation) + console.warn("No cached SAP AI Core models available, returning empty model list") + return models + } +} + +/** + * Get cached SAP AI Core models without making an API request + */ +export function getCachedSapAiCoreModels(): ModelRecord | null { + return cache?.models || null +} + +/** + * Clear the SAP AI Core models cache + */ +export function clearSapAiCoreCache(): void { + cache = null +} + +/** + * Get deployed model names for the model picker + * This is a convenience function that extracts just the model names + */ +export async function getSapAiCoreDeployedModelNames(options: SapAiCoreFetcherOptions): Promise { + try { + const models = await getSapAiCoreModels(options) + return Object.keys(models).sort() + } catch (error) { + console.error("Error fetching SAP AI Core deployed model names:", error) + return [] + } +} diff --git a/src/api/providers/index.ts b/src/api/providers/index.ts index c3786c5f56..0dba3c47e0 100644 --- a/src/api/providers/index.ts +++ b/src/api/providers/index.ts @@ -32,4 +32,5 @@ export { ZAiHandler } from "./zai" export { FireworksHandler } from "./fireworks" export { RooHandler } from "./roo" export { FeatherlessHandler } from "./featherless" +export { SapAiCoreHandler } from "./sapaicore" export { VercelAiGatewayHandler } from "./vercel-ai-gateway" diff --git a/src/api/providers/sapaicore.ts b/src/api/providers/sapaicore.ts new file mode 100644 index 0000000000..b6d2a222af --- /dev/null +++ b/src/api/providers/sapaicore.ts @@ -0,0 +1,779 @@ +import { Anthropic } from "@anthropic-ai/sdk" +import axios from "axios" +import OpenAI from "openai" + +import type { ModelInfo } from "@roo-code/types" +import { SapAiCoreModelId, sapAiCoreDefaultModelId, sapAiCoreModels } from "@roo-code/types" + +import type { ApiHandlerOptions } from "../../shared/api" +import type { ApiHandlerCreateMessageMetadata, SingleCompletionHandler } from "../index" +import { convertToOpenAiMessages } from "../transform/openai-format" +import { convertToBedrockConverseMessages } from "../transform/bedrock-converse-format" +import { convertAnthropicContentToGemini } from "../transform/gemini-format" +import { ApiStream } from "../transform/stream" +import { addCacheBreakpoints } from "../transform/caching/anthropic" +import { BaseProvider } from "./base-provider" +import { getSapAiCoreDeployedModelNames } from "./fetchers/sapaicore" + +/** + * Fetches deployed model names from SAP AI Core + * @param options SAP AI Core configuration options + * @returns Promise Array of deployed model base names + * @deprecated Use getSapAiCoreDeployedModelNames from fetchers/sapaicore instead + */ +export async function getSapAiCoreDeployedModels(options: { + sapAiCoreClientId: string + sapAiCoreClientSecret: string + sapAiCoreTokenUrl: string + sapAiCoreBaseUrl: string + sapAiResourceGroup?: string +}): Promise { + // Delegate to the standardized fetcher implementation + return getSapAiCoreDeployedModelNames(options) +} + +interface SapAiCoreHandlerOptions extends ApiHandlerOptions { + sapAiCoreClientId?: string + sapAiCoreClientSecret?: string + sapAiCoreTokenUrl?: string + sapAiResourceGroup?: string + sapAiCoreBaseUrl?: string + reasoningEffort?: "low" | "medium" | "high" | "minimal" + thinkingBudgetTokens?: number +} + +interface Deployment { + id: string + name: string +} + +interface Token { + access_token: string + expires_in: number + scope: string + jti: string + token_type: string + expires_at: number +} + +/** + * Process Gemini streaming response with enhanced thinking content support + */ +export function processGeminiStreamChunk(data: any): { + text?: string + reasoning?: string + usageMetadata?: { + promptTokenCount?: number + candidatesTokenCount?: number + thoughtsTokenCount?: number + cachedContentTokenCount?: number + } +} { + const result: ReturnType = {} + + // Early return for null, undefined, or falsy data + if (!data) { + return result + } + + // Handle thinking content from Gemini's response + const candidateForThoughts = data?.candidates?.[0] + const partsForThoughts = candidateForThoughts?.content?.parts + let thoughts = "" + + if (partsForThoughts) { + for (const part of partsForThoughts) { + const { thought, text } = part + if (thought && text) { + thoughts += text + "\n" + } + } + } + + if (thoughts.trim() !== "") { + result.reasoning = thoughts.trim() + } + + // Handle regular text content + if (data.text) { + result.text = data.text + } + + // Handle content parts for non-thought text + if (data.candidates && data.candidates[0]?.content?.parts) { + let nonThoughtText = "" + for (const part of data.candidates[0].content.parts) { + if (part.text && !part.thought) { + nonThoughtText += part.text + } + } + if (nonThoughtText && !result.text) { + result.text = nonThoughtText + } + } + + // Handle usage metadata + if (data.usageMetadata) { + result.usageMetadata = { + promptTokenCount: data.usageMetadata.promptTokenCount, + candidatesTokenCount: data.usageMetadata.candidatesTokenCount, + thoughtsTokenCount: data.usageMetadata.thoughtsTokenCount, + cachedContentTokenCount: data.usageMetadata.cachedContentTokenCount, + } + } + + return result +} + +/** + * Safely parse JSON with fallback handling for common malformed JSON issues + * Used specifically for SAP AI Core streaming responses + */ +export function parseJsonSafely(str: string): any { + // Wrap it in parentheses so JS will treat it as an expression + const obj = new Function("return " + str)() + return JSON.stringify(obj) +} + +/** + * Prepare Gemini request payload with thinking configuration using standardized transforms + */ +function prepareGeminiRequestPayload( + systemPrompt: string, + messages: Anthropic.Messages.MessageParam[], + model: { id: SapAiCoreModelId; info: ModelInfo }, + thinkingBudgetTokens?: number, +): any { + // Use standardized Gemini content conversion + const contents = messages.map((message) => ({ + role: message.role === "assistant" ? "model" : "user", + parts: convertAnthropicContentToGemini(message.content), + })) + + const payload = { + contents, + systemInstruction: { + parts: [ + { + text: systemPrompt, + }, + ], + }, + generationConfig: { + maxOutputTokens: model.info.maxTokens, + temperature: 0.0, + }, + } + + // Add thinking config if the model supports it and budget is provided + const thinkingBudget = thinkingBudgetTokens ?? 0 + + if (thinkingBudget > 0 && model.info.maxThinkingTokens) { + // Add thinking configuration to the payload + ;(payload as any).thinkingConfig = { + thinkingBudget: thinkingBudget, + includeThoughts: true, + } + } + + return payload +} + +export class SapAiCoreHandler extends BaseProvider implements SingleCompletionHandler { + private options: SapAiCoreHandlerOptions + private token?: Token + private deployments?: Deployment[] + + constructor(options: SapAiCoreHandlerOptions) { + super() + this.options = options + } + + private async authenticate(): Promise { + const payload = { + grant_type: "client_credentials", + client_id: this.options.sapAiCoreClientId || "", + client_secret: this.options.sapAiCoreClientSecret || "", + } + + const baseTokenUrl = this.options.sapAiCoreTokenUrl || "" + if (!baseTokenUrl.startsWith("https://")) { + throw new Error("SAP AI Core Token URL must use HTTPS for security") + } + + const tokenUrl = baseTokenUrl.replace(/\/+$/, "") + "/oauth/token" + const response = await axios.post(tokenUrl, payload, { + headers: { "Content-Type": "application/x-www-form-urlencoded" }, + }) + const token = response.data as Token + token.expires_at = Date.now() + token.expires_in * 1000 + return token + } + + private async getToken(): Promise { + if (!this.token || this.token.expires_at < Date.now()) { + this.token = await this.authenticate() + } + return this.token.access_token + } + + private async getAiCoreDeployments(): Promise { + if (this.options.sapAiCoreClientSecret === "") { + return [{ id: "notconfigured", name: "ai-core-not-configured" }] + } + + if (!this.options.sapAiCoreBaseUrl?.startsWith("https://")) { + throw new Error("SAP AI Core Base URL must use HTTPS for security") + } + + const token = await this.getToken() + const headers = { + Authorization: `Bearer ${token}`, + "AI-Resource-Group": this.options.sapAiResourceGroup || "default", + "Content-Type": "application/json", + "AI-Client-Type": "Roo-Code", + } + + const url = `${this.options.sapAiCoreBaseUrl}/v2/lm/deployments?$top=10000&$skip=0` + + try { + const response = await axios.get(url, { headers }) + const deployments = response.data.resources + + return deployments + .filter((deployment: any) => deployment.targetStatus === "RUNNING") + .map((deployment: any) => { + const model = deployment.details?.resources?.backend_details?.model + if (!model?.name || !model?.version) { + return null // Skip this row + } + return { + id: deployment.id, + name: `${model.name}:${model.version}`, + } + }) + .filter((deployment: any) => deployment !== null) + } catch (error) { + console.error("Error fetching deployments:", error) + throw new Error("Failed to fetch deployments") + } + } + + private async getDeploymentForModel(modelId: string): Promise { + // If deployments are not fetched yet or the model is not found in the fetched deployments, fetch deployments + if (!this.deployments || !this.hasDeploymentForModel(modelId)) { + this.deployments = await this.getAiCoreDeployments() + } + + const deployment = this.deployments.find((d) => { + const deploymentBaseName = d.name.split(":")[0].toLowerCase() + const modelBaseName = modelId.split(":")[0].toLowerCase() + return deploymentBaseName === modelBaseName + }) + + if (!deployment) { + throw new Error(`No running deployment found for model ${modelId}`) + } + + return deployment.id + } + + private hasDeploymentForModel(modelId: string): boolean { + if (!this.deployments || !Array.isArray(this.deployments)) { + return false + } + return this.deployments.some((d) => { + if (!d.name || typeof d.name !== "string") { + return false + } + const deploymentBaseName = d.name.split(":")[0].toLowerCase() + const modelBaseName = modelId.split(":")[0].toLowerCase() + return deploymentBaseName === modelBaseName + }) + } + + /** + * Get deployed model names for the model picker + * Uses the standardized fetcher implementation + * @returns Promise Array of deployed model base names + */ + async getDeployedModelNames(): Promise { + return getSapAiCoreDeployedModelNames({ + sapAiCoreClientId: this.options.sapAiCoreClientId || "", + sapAiCoreClientSecret: this.options.sapAiCoreClientSecret || "", + sapAiCoreTokenUrl: this.options.sapAiCoreTokenUrl || "", + sapAiCoreBaseUrl: this.options.sapAiCoreBaseUrl || "", + sapAiResourceGroup: this.options.sapAiResourceGroup, + }) + } + + async *createMessage( + systemPrompt: string, + messages: Anthropic.Messages.MessageParam[], + metadata?: ApiHandlerCreateMessageMetadata, + ): ApiStream { + if (!this.options.sapAiCoreBaseUrl?.startsWith("https://")) { + throw new Error("SAP AI Core Base URL must use HTTPS for security") + } + + const token = await this.getToken() + const headers = { + Authorization: `Bearer ${token}`, + "AI-Resource-Group": this.options.sapAiResourceGroup || "default", + "Content-Type": "application/json", + "AI-Client-Type": "Roo-Code", + } + + const model = this.getModel() + + const anthropicModels = [ + "anthropic--claude-4-sonnet", + "anthropic--claude-4-opus", + "anthropic--claude-3.7-sonnet", + "anthropic--claude-3.5-sonnet", + "anthropic--claude-3-sonnet", + "anthropic--claude-3-haiku", + "anthropic--claude-3-opus", + ] + + const openAIModels = [ + "gpt-4o", + "gpt-4", + "gpt-4o-mini", + "o1", + "gpt-4.1", + "gpt-4.1-nano", + "gpt-5", + "gpt-5-nano", + "gpt-5-mini", + "o3-mini", + "o3", + "o4-mini", + ] + + const geminiModels = ["gemini-2.5-flash", "gemini-2.5-pro"] + // Check if model is supported before getting deployment + if ( + !anthropicModels.includes(model.id) && + !openAIModels.includes(model.id) && + !geminiModels.includes(model.id) + ) { + throw new Error(`Unsupported model: ${model.id}`) + } + + const deploymentId = await this.getDeploymentForModel(model.id) + + let url: string + let payload: any + + if (anthropicModels.includes(model.id)) { + url = `${this.options.sapAiCoreBaseUrl}/v2/inference/deployments/${deploymentId}/invoke-with-response-stream` + + // Use standardized Bedrock Converse format transformer + const formattedMessages = convertToBedrockConverseMessages(messages) + + // Get message indices for caching + const userMsgIndices = messages.reduce( + (acc, msg, index) => (msg.role === "user" ? [...acc, index] : acc), + [] as number[], + ) + const lastUserMsgIndex = userMsgIndices[userMsgIndices.length - 1] ?? -1 + const secondLastMsgUserIndex = userMsgIndices[userMsgIndices.length - 2] ?? -1 + + if ( + model.id === "anthropic--claude-4-sonnet" || + model.id === "anthropic--claude-4-opus" || + model.id === "anthropic--claude-3.7-sonnet" + ) { + // Use converse-stream endpoint with caching support + url = `${this.options.sapAiCoreBaseUrl}/v2/inference/deployments/${deploymentId}/converse-stream` + + const messagesWithCache = convertToBedrockConverseMessages(messages) + + const lastMessageIndex = messagesWithCache.length - 1 + if (lastMessageIndex >= 0 && messagesWithCache[lastMessageIndex].role === "user") { + const content = messagesWithCache[lastMessageIndex].content + if (Array.isArray(content) && content.length > 0) { + const lastContent = content[content.length - 1] as any + lastContent.cache_control = { type: "ephemeral" } + } + } + + payload = { + inferenceConfig: { + maxTokens: model.info.maxTokens, + temperature: 0.0, + }, + system: [{ text: systemPrompt, cache_control: { type: "ephemeral" } }], + messages: messagesWithCache, + } + } else { + // Use invoke-with-response-stream endpoint + payload = { + max_tokens: model.info.maxTokens, + system: systemPrompt, + messages, + anthropic_version: "bedrock-2023-05-31", + } + } + } else if (openAIModels.includes(model.id)) { + // Use standardized OpenAI message conversion + const openAiMessages = convertToOpenAiMessages(messages) + + url = `${this.options.sapAiCoreBaseUrl}/v2/inference/deployments/${deploymentId}/chat/completions?api-version=2024-12-01-preview` + payload = { + stream: true, + messages: [{ role: "system", content: systemPrompt }, ...openAiMessages], + max_tokens: model.info.maxTokens, + temperature: 0.0, + frequency_penalty: 0, + presence_penalty: 0, + stop: null, + stream_options: { include_usage: true }, + } + + // Handle reasoning models + if (["o1", "o3-mini", "o3", "o4-mini", "gpt-5", "gpt-5-nano", "gpt-5-mini"].includes(model.id)) { + delete payload.max_tokens + delete payload.temperature + + // Add reasoning effort for reasoning models + if (this.options.reasoningEffort) { + payload.reasoning_effort = this.options.reasoningEffort + } + } + + if (model.id === "o3-mini") { + delete payload.stream + delete payload.stream_options + } + } else if (geminiModels.includes(model.id)) { + url = `${this.options.sapAiCoreBaseUrl}/v2/inference/deployments/${deploymentId}/models/${model.id}:streamGenerateContent` + payload = prepareGeminiRequestPayload(systemPrompt, messages, model, this.options.thinkingBudgetTokens) + } else { + // This should never be reached due to the earlier model support check + throw new Error(`Unsupported model: ${model.id}`) + } + try { + const response = await axios.post(url, JSON.stringify(payload, null, 2), { + headers, + responseType: "stream", + }) + + if (model.id === "o3-mini") { + const response = await axios.post(url, JSON.stringify(payload, null, 2), { headers }) + + // Yield the usage information + if (response.data.usage) { + yield { + type: "usage", + inputTokens: response.data.usage.prompt_tokens, + outputTokens: response.data.usage.completion_tokens, + } + } + + // Yield the content + if (response.data.choices && response.data.choices.length > 0) { + yield { + type: "text", + text: response.data.choices[0].message.content, + } + } + } else if (openAIModels.includes(model.id)) { + yield* this.streamCompletionGPT(response.data, model) + } else if ( + model.id === "anthropic--claude-4-sonnet" || + model.id === "anthropic--claude-4-opus" || + model.id === "anthropic--claude-3.7-sonnet" + ) { + yield* this.streamCompletionSonnet37(response.data, model) + } else if (geminiModels.includes(model.id)) { + yield* this.streamCompletionGemini(response.data, model) + } else { + yield* this.streamCompletion(response.data, model) + } + } catch (error: any) { + if (error.response) { + console.error("Error status:", error.response.status) + console.error("Error data:", error.response.data) + + if (error.response.status === 404) { + throw new Error(`404 Not Found: ${error.response.data}`) + } + + // Handle other HTTP errors + throw new Error("Failed to create message") + } else if (error.request) { + throw new Error("No response received from server") + } else { + throw new Error(`Error setting up request: ${error.message}`) + } + } + } + + private async *streamCompletion(stream: any, _model: { id: SapAiCoreModelId; info: ModelInfo }): ApiStream { + const usage = { input_tokens: 0, output_tokens: 0 } + + try { + for await (const chunk of stream) { + const lines = chunk.toString().split("\n").filter(Boolean) + for (const line of lines) { + if (line.startsWith("data: ")) { + const jsonData = line.slice(6) + try { + const data = JSON.parse(jsonData) + if (data.type === "message_start") { + usage.input_tokens = data.message.usage.input_tokens + yield { + type: "usage", + inputTokens: usage.input_tokens, + outputTokens: usage.output_tokens, + } + } else if (data.type === "content_block_start" || data.type === "content_block_delta") { + const contentBlock = + data.type === "content_block_start" ? data.content_block : data.delta + + if (contentBlock.type === "text" || contentBlock.type === "text_delta") { + yield { + type: "text", + text: contentBlock.text || "", + } + } + } else if (data.type === "message_delta") { + if (data.usage) { + usage.output_tokens = data.usage.output_tokens + yield { + type: "usage", + inputTokens: 0, + outputTokens: data.usage.output_tokens, + } + } + } + } catch (error) { + console.error("Failed to parse JSON data:", error) + } + } + } + } + } catch (error) { + console.error("Error streaming completion:", error) + throw error + } + } + + private async *streamCompletionSonnet37(stream: any, _model: { id: SapAiCoreModelId; info: ModelInfo }): ApiStream { + try { + for await (const chunk of stream) { + const lines = chunk.toString().split("\n").filter(Boolean) + + for (const line of lines) { + if (line.startsWith("data: ")) { + const jsonData = line.slice(6) + + try { + const data = JSON.parse(parseJsonSafely(jsonData)) + + // Handle metadata (token usage) + if (data.metadata?.usage) { + let inputTokens = data.metadata.usage.inputTokens || 0 + const outputTokens = data.metadata.usage.outputTokens || 0 + + // calibrate input token + const totalTokens = data.metadata.usage.totalTokens || 0 + const cacheReadInputTokens = data.metadata.usage.cacheReadInputTokens || 0 + const cacheWriteOutputTokens = data.metadata.usage.cacheWriteOutputTokens || 0 + if ( + inputTokens + outputTokens + cacheReadInputTokens + cacheWriteOutputTokens !== + totalTokens + ) { + inputTokens = + totalTokens - outputTokens - cacheReadInputTokens - cacheWriteOutputTokens + } + + yield { + type: "usage", + inputTokens, + outputTokens, + } + } + + // Handle content block delta (text generation) + if (data.contentBlockDelta) { + if (data.contentBlockDelta?.delta?.text) { + yield { + type: "text", + text: data.contentBlockDelta.delta.text, + } + } + + // Handle reasoning content if present + if (data.contentBlockDelta?.delta?.reasoningContent?.text) { + yield { + type: "reasoning", + text: data.contentBlockDelta.delta.reasoningContent.text, + } + } + } + } catch (error) { + console.error("Failed to parse JSON data:", error) + } + } + } + } + } catch (error) { + console.error("Error streaming completion:", error) + throw error + } + } + + private async *streamCompletionGPT(stream: any, _model: { id: SapAiCoreModelId; info: ModelInfo }): ApiStream { + let inputTokens = 0 + let outputTokens = 0 + + try { + for await (const chunk of stream) { + const lines = chunk.toString().split("\n").filter(Boolean) + for (const line of lines) { + if (line.trim() === "data: [DONE]") { + // End of stream, yield final usage + yield { + type: "usage", + inputTokens, + outputTokens, + } + return + } + + if (line.startsWith("data: ")) { + const jsonData = line.slice(6) + try { + const data = JSON.parse(jsonData) + + if (data.choices && data.choices.length > 0) { + const choice = data.choices[0] + if (choice.delta && choice.delta.content) { + yield { + type: "text", + text: choice.delta.content, + } + } + } + + // Handle usage information + if (data.usage) { + inputTokens = data.usage.prompt_tokens || inputTokens + outputTokens = data.usage.completion_tokens || outputTokens + yield { + type: "usage", + inputTokens, + outputTokens, + } + } + } catch (error) { + console.error("Failed to parse GPT JSON data:", error) + } + } + } + } + } catch (error) { + console.error("Error streaming GPT completion:", error) + throw error + } + } + + private async *streamCompletionGemini(stream: any, _model: { id: SapAiCoreModelId; info: ModelInfo }): ApiStream { + let promptTokens = 0 + let outputTokens = 0 + let cacheReadTokens = 0 + let thoughtsTokenCount = 0 + + try { + for await (const chunk of stream) { + const lines = chunk.toString().split("\n").filter(Boolean) + for (const line of lines) { + if (line.startsWith("data: ")) { + const jsonData = line.slice(6) + try { + const data = JSON.parse(jsonData) + + // Use Gemini module function to process the chunk + const processed = processGeminiStreamChunk(data) + + // Yield reasoning if present + if (processed.reasoning) { + yield { + type: "reasoning", + text: processed.reasoning, + } + } + + // Yield text if present + if (processed.text) { + yield { + type: "text", + text: processed.text, + } + } + + if (processed.usageMetadata) { + promptTokens = processed.usageMetadata.promptTokenCount ?? promptTokens + outputTokens = processed.usageMetadata.candidatesTokenCount ?? outputTokens + thoughtsTokenCount = processed.usageMetadata.thoughtsTokenCount ?? thoughtsTokenCount + cacheReadTokens = processed.usageMetadata.cachedContentTokenCount ?? cacheReadTokens + + yield { + type: "usage", + inputTokens: promptTokens - cacheReadTokens, + outputTokens, + } + } + } catch (error) { + console.error("Failed to parse Gemini JSON data:", error) + } + } + } + } + } catch (error) { + console.error("Error streaming Gemini completion:", error) + throw error + } + } + async completePrompt(prompt: string): Promise { + // For SAP AI Core, we'll use the standard createMessage flow + // and extract the text response + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: prompt, + }, + ] + + let result = "" + for await (const chunk of this.createMessage("", messages)) { + if (chunk.type === "text") { + result += chunk.text + } + } + + return result + } + + override getModel(): { id: SapAiCoreModelId; info: ModelInfo } { + const modelId = this.options.apiModelId + const models = sapAiCoreModels as Record + + if (modelId && modelId in models) { + const id = modelId as SapAiCoreModelId + return { id, info: models[id] } + } + + // If modelId is provided but not found in models, return it as-is for proper error handling + if (modelId) { + const id = modelId as SapAiCoreModelId + // Use default model info as fallback to prevent crashes, but preserve the unsupported ID + return { id, info: models[sapAiCoreDefaultModelId] } + } + + return { id: sapAiCoreDefaultModelId, info: models[sapAiCoreDefaultModelId] } + } +} diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts index 9e4434745f..f71695e03d 100644 --- a/src/core/webview/ClineProvider.ts +++ b/src/core/webview/ClineProvider.ts @@ -2213,7 +2213,7 @@ export class ClineProvider await ExtensionBridgeService.handleRemoteControlState( userInfo, enabled, - { ...bridgeConfig, provider: this, sessionId: vscode.env.sessionId }, + { ...bridgeConfig, provider: this as any, sessionId: vscode.env.sessionId }, (message: string) => this.log(message), ) diff --git a/src/core/webview/webviewMessageHandler.ts b/src/core/webview/webviewMessageHandler.ts index a6a7e7022a..b7be5ca12b 100644 --- a/src/core/webview/webviewMessageHandler.ts +++ b/src/core/webview/webviewMessageHandler.ts @@ -647,6 +647,58 @@ export const webviewMessageHandler = async ( routerModels: fetchedRouterModels as Record, }) + break + case "requestSapAiCoreModels": + const sapAiCoreConfig = await provider.getState().then((state) => state.apiConfiguration) + + try { + // Import the function dynamically to avoid circular dependencies + const { getSapAiCoreDeployedModels } = await import("../../api/providers/sapaicore") + + // Check if required configuration is provided + if ( + !sapAiCoreConfig.sapAiCoreClientId || + !sapAiCoreConfig.sapAiCoreClientSecret || + !sapAiCoreConfig.sapAiCoreBaseUrl || + !sapAiCoreConfig.sapAiCoreTokenUrl + ) { + provider.postMessageToWebview({ + type: "sapAiCoreModels", + sapAiCoreModels: { + success: false, + error: "SAP AI Core configuration is incomplete", + models: [], + }, + }) + break + } + + const deployedModels = await getSapAiCoreDeployedModels({ + sapAiCoreClientId: sapAiCoreConfig.sapAiCoreClientId, + sapAiCoreClientSecret: sapAiCoreConfig.sapAiCoreClientSecret, + sapAiCoreTokenUrl: sapAiCoreConfig.sapAiCoreTokenUrl, + sapAiCoreBaseUrl: sapAiCoreConfig.sapAiCoreBaseUrl, + sapAiResourceGroup: sapAiCoreConfig.sapAiResourceGroup, + }) + + provider.postMessageToWebview({ + type: "sapAiCoreModels", + sapAiCoreModels: { + success: true, + models: deployedModels, + }, + }) + } catch (error) { + console.error("Error fetching SAP AI Core models:", error) + provider.postMessageToWebview({ + type: "sapAiCoreModels", + sapAiCoreModels: { + success: false, + error: error instanceof Error ? error.message : "Failed to fetch SAP AI Core models", + models: [], + }, + }) + } break case "requestOllamaModels": { // Specific handler for Ollama models only diff --git a/src/extension.ts b/src/extension.ts index f1f413ec36..7addfd8784 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -179,6 +179,7 @@ export async function activate(context: vscode.ExtensionContext) { ) } + // Add to subscriptions for proper cleanup on deactivate. context.subscriptions.push(cloudService) diff --git a/src/shared/ExtensionMessage.ts b/src/shared/ExtensionMessage.ts index 8812187635..8bdce9d50a 100644 --- a/src/shared/ExtensionMessage.ts +++ b/src/shared/ExtensionMessage.ts @@ -73,6 +73,7 @@ export interface ExtensionMessage { | "listApiConfig" | "routerModels" | "openAiModels" + | "sapAiCoreModels" | "ollamaModels" | "lmStudioModels" | "vsCodeLmModels" @@ -145,6 +146,7 @@ export interface ExtensionMessage { clineMessage?: ClineMessage routerModels?: RouterModels openAiModels?: string[] + sapAiCoreModels?: { success: boolean; models: string[]; error?: string } ollamaModels?: string[] lmStudioModels?: ModelRecord vsCodeLmModels?: { vendor?: string; family?: string; version?: string; id?: string }[] diff --git a/src/shared/WebviewMessage.ts b/src/shared/WebviewMessage.ts index 57bad0e402..f7e13b278f 100644 --- a/src/shared/WebviewMessage.ts +++ b/src/shared/WebviewMessage.ts @@ -63,6 +63,7 @@ export interface WebviewMessage { | "resetState" | "flushRouterModels" | "requestRouterModels" + | "requestSapAiCoreModels" | "requestOpenAiModels" | "requestOllamaModels" | "requestLmStudioModels" diff --git a/src/shared/api.ts b/src/shared/api.ts index 30dfd7393b..2333ed2845 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -153,3 +153,11 @@ export type GetModelsOptions = | { provider: "lmstudio"; baseUrl?: string } | { provider: "io-intelligence"; apiKey: string } | { provider: "vercel-ai-gateway" } + | { + provider: "sapaicore" + sapAiCoreClientId: string + sapAiCoreClientSecret: string + sapAiCoreTokenUrl: string + sapAiCoreBaseUrl: string + sapAiResourceGroup?: string + } diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 80ecd75ae4..4dfafc5fc7 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -35,6 +35,7 @@ import { featherlessDefaultModelId, ioIntelligenceDefaultModelId, rooDefaultModelId, + sapAiCoreDefaultModelId, vercelAiGatewayDefaultModelId, } from "@roo-code/types" @@ -92,6 +93,7 @@ import { ZAi, Fireworks, Featherless, + SapAiCore, VercelAiGateway, } from "./providers" @@ -337,6 +339,7 @@ const ApiOptions = ({ featherless: { field: "apiModelId", default: featherlessDefaultModelId }, "io-intelligence": { field: "ioIntelligenceModelId", default: ioIntelligenceDefaultModelId }, roo: { field: "apiModelId", default: rooDefaultModelId }, + sapaicore: { field: "apiModelId", default: sapAiCoreDefaultModelId }, "vercel-ai-gateway": { field: "vercelAiGatewayModelId", default: vercelAiGatewayDefaultModelId }, openai: { field: "openAiModelId" }, ollama: { field: "ollamaModelId" }, @@ -658,7 +661,11 @@ const ApiOptions = ({ )} - {selectedProviderModels.length > 0 && ( + {selectedProvider === "sapaicore" && ( + + )} + + {selectedProviderModels.length > 0 && selectedProvider !== "sapaicore" && ( <>
diff --git a/webview-ui/src/components/settings/SapAiCoreModelPicker.tsx b/webview-ui/src/components/settings/SapAiCoreModelPicker.tsx new file mode 100644 index 0000000000..14c67c2fe3 --- /dev/null +++ b/webview-ui/src/components/settings/SapAiCoreModelPicker.tsx @@ -0,0 +1,119 @@ +import React, { useMemo } from "react" + +import { sapAiCoreModels } from "@roo-code/types" +import { useAppTranslation } from "@src/i18n/TranslationContext" + +import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@src/components/ui/select" + +export interface SapAiCoreModelPickerProps { + selectedModelId: string + onModelChange: (modelId: string) => void + placeholder?: string + deployedModels?: string[] // Optional: for future deployment status support +} + +interface CategorizedModel { + id: string + isDeployed: boolean + section: "deployed" | "supported" +} + +const SapAiCoreModelPicker: React.FC = ({ + selectedModelId, + onModelChange, + placeholder, + deployedModels = [], // Default to empty array when no deployment info is available +}) => { + const { t } = useAppTranslation() + const placeholderText = placeholder ?? t("settings:common.select") + const categorizedModels = useMemo(() => { + const allSupportedModels = Object.keys(sapAiCoreModels) + + // Models that are both deployed AND supported in Roo-Code + const deployedAndSupported = deployedModels.filter((deployedModel: string) => + allSupportedModels.includes(deployedModel), + ) + + // Models that are supported in Roo-Code but NOT deployed (or no deployment status available) + const supportedButNotDeployed = allSupportedModels.filter( + (supportedModel: string) => !deployedModels.includes(supportedModel), + ) + + const deployed: CategorizedModel[] = deployedAndSupported.map((id: string) => ({ + id, + isDeployed: true, + section: "deployed" as const, + })) + + const supported: CategorizedModel[] = supportedButNotDeployed.map((id: string) => ({ + id, + isDeployed: false, + section: "supported" as const, + })) + + return { deployed, supported } + }, [deployedModels]) + + return ( +
+ + +
+ ) +} + +export default SapAiCoreModelPicker diff --git a/webview-ui/src/components/settings/__tests__/SapAiCoreModelPicker.spec.tsx b/webview-ui/src/components/settings/__tests__/SapAiCoreModelPicker.spec.tsx new file mode 100644 index 0000000000..fcba299183 --- /dev/null +++ b/webview-ui/src/components/settings/__tests__/SapAiCoreModelPicker.spec.tsx @@ -0,0 +1,91 @@ +import React from "react" +import { render, screen } from "@testing-library/react" +import { describe, it, expect, vi } from "vitest" + +import SapAiCoreModelPicker from "../SapAiCoreModelPicker" + +// Mock sapAiCoreModels (partial mock preserving other exports like DEFAULT_MODES) +vi.mock("@roo-code/types", async () => { + const actual = await vi.importActual("@roo-code/types") + return { + ...actual, + sapAiCoreModels: { + "anthropic--claude-3.5-sonnet": { + id: "anthropic--claude-3.5-sonnet", + name: "Claude 3.5 Sonnet", + }, + "gpt-4o": { + id: "gpt-4o", + name: "GPT-4o", + }, + "gemini-1.5-pro": { + id: "gemini-1.5-pro", + name: "Gemini 1.5 Pro", + }, + }, + } +}) + +describe("SapAiCoreModelPicker", () => { + const mockOnModelChange = vi.fn() + + beforeEach(() => { + mockOnModelChange.mockClear() + }) + + it("renders with default placeholder", () => { + render() + + expect(screen.getByText("settings:modelPicker.label")).toBeInTheDocument() + expect(screen.getByRole("combobox")).toBeInTheDocument() + }) + + it("shows only supported models when no deployed models", () => { + render() + + // Should not show section headers when no deployed models + expect(screen.queryByText("🟢 Deployed Models (Ready to Use)")).not.toBeInTheDocument() + expect(screen.queryByText("🔴 Not Deployed Models (Require Deployment)")).not.toBeInTheDocument() + }) + + it("shows deployed models first when deployment info is available", () => { + render( + , + ) + + // Click to open dropdown and check that options are properly categorized + // Note: Since we're using a custom Select component, we'll need to adapt this test + // based on how the Select component works in the actual implementation + expect(screen.getByText("settings:modelPicker.label")).toBeInTheDocument() + }) + + it("renders with custom placeholder", () => { + render( + , + ) + + expect(screen.getByText("settings:modelPicker.label")).toBeInTheDocument() + }) + + it("handles model selection", () => { + render( + , + ) + + // Check that the component renders with the selected model + expect(screen.getByText("settings:modelPicker.label")).toBeInTheDocument() + }) +}) diff --git a/webview-ui/src/components/settings/constants.ts b/webview-ui/src/components/settings/constants.ts index 9aa02bbf53..303db4fc9e 100644 --- a/webview-ui/src/components/settings/constants.ts +++ b/webview-ui/src/components/settings/constants.ts @@ -21,6 +21,7 @@ import { fireworksModels, rooModels, featherlessModels, + sapAiCoreModels, } from "@roo-code/types" export const MODELS_BY_PROVIDER: Partial>> = { @@ -44,6 +45,7 @@ export const MODELS_BY_PROVIDER: Partial a.label.localeCompare(b.label)) diff --git a/webview-ui/src/components/settings/providers/SapAiCore.tsx b/webview-ui/src/components/settings/providers/SapAiCore.tsx new file mode 100644 index 0000000000..bf7f52b21a --- /dev/null +++ b/webview-ui/src/components/settings/providers/SapAiCore.tsx @@ -0,0 +1,135 @@ +import { useCallback, useMemo } from "react" +import { VSCodeTextField } from "@vscode/webview-ui-toolkit/react" + +import type { ProviderSettings } from "@roo-code/types" + +import { useAppTranslation } from "@src/i18n/TranslationContext" +import { VSCodeButtonLink } from "@src/components/common/VSCodeButtonLink" +import { useSapAiCoreModels } from "@src/components/ui/hooks/useSapAiCoreModels" +import SapAiCoreModelPicker from "../SapAiCoreModelPicker" + +import { inputEventTransform } from "../transforms" + +type SapAiCoreProps = { + apiConfiguration: ProviderSettings + setApiConfigurationField: (field: keyof ProviderSettings, value: ProviderSettings[keyof ProviderSettings]) => void +} + +export const SapAiCore = ({ apiConfiguration, setApiConfigurationField }: SapAiCoreProps) => { + const { t } = useAppTranslation() + + // Check if all required fields are provided to enable model fetching + const hasRequiredConfig = useMemo(() => { + return !!( + apiConfiguration?.sapAiCoreClientId && + apiConfiguration?.sapAiCoreClientSecret && + apiConfiguration?.sapAiCoreBaseUrl && + apiConfiguration?.sapAiCoreTokenUrl + ) + }, [ + apiConfiguration?.sapAiCoreClientId, + apiConfiguration?.sapAiCoreClientSecret, + apiConfiguration?.sapAiCoreBaseUrl, + apiConfiguration?.sapAiCoreTokenUrl, + ]) + + // Fetch deployed models when configuration is complete + const { data: deployedModelsResponse, isLoading, error } = useSapAiCoreModels(hasRequiredConfig) + + // Extract deployed models array + const deployedModels = useMemo(() => { + return deployedModelsResponse?.success ? deployedModelsResponse.models : [] + }, [deployedModelsResponse]) + + const handleInputChange = useCallback( + ( + field: K, + transform: (event: E) => ProviderSettings[K] = inputEventTransform, + ) => + (event: E | Event) => { + setApiConfigurationField(field, transform(event as E)) + }, + [setApiConfigurationField], + ) + + return ( + <> + + + + + + + + + + + + + + + + + + + + + { + setApiConfigurationField("apiModelId", value) + }} + placeholder={t("settings:common.select")} + deployedModels={deployedModels} + /> + + {/* Show loading/error states for model fetching */} + {hasRequiredConfig && isLoading && ( +
+ {t("settings:providers.sapAiCoreFetchingModels")} +
+ )} + {hasRequiredConfig && error && ( +
+ {t("settings:providers.sapAiCoreFetchModelsFailed")} {error.message} +
+ )} + {hasRequiredConfig && deployedModelsResponse && !deployedModelsResponse.success && ( +
+ {deployedModelsResponse.error || t("settings:providers.sapAiCoreFetchModelsFailedGeneric")} +
+ )} + +
+ {t("settings:providers.apiKeyStorageNotice")} +
+ + + {t("settings:providers.sapAiCoreLearnMore")} + + + ) +} diff --git a/webview-ui/src/components/settings/providers/index.ts b/webview-ui/src/components/settings/providers/index.ts index eedbba0c29..c91bacc327 100644 --- a/webview-ui/src/components/settings/providers/index.ts +++ b/webview-ui/src/components/settings/providers/index.ts @@ -28,4 +28,5 @@ export { ZAi } from "./ZAi" export { LiteLLM } from "./LiteLLM" export { Fireworks } from "./Fireworks" export { Featherless } from "./Featherless" +export { SapAiCore } from "./SapAiCore" export { VercelAiGateway } from "./VercelAiGateway" diff --git a/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts b/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts index 9d8c627b7f..c820a1ceaf 100644 --- a/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts +++ b/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts @@ -514,3 +514,82 @@ describe("useSelectedModel", () => { }) }) }) + +describe("sapaicore provider", () => { + it("returns default SAP AI Core model id and info when apiModelId is not set", () => { + // Provide minimal router/openrouter data so the hook computes selection + mockUseRouterModels.mockReturnValue({ + data: { + openrouter: {}, + requesty: {}, + glama: {}, + unbound: {}, + litellm: {}, + "io-intelligence": {}, + }, + isLoading: false, + isError: false, + } as any) + + mockUseOpenRouterModelProviders.mockReturnValue({ + data: {}, + isLoading: false, + isError: false, + } as any) + + const apiConfiguration: ProviderSettings = { + apiProvider: "sapaicore", + // no apiModelId provided + } + + const wrapper = createWrapper() + const { result } = renderHook(() => useSelectedModel(apiConfiguration), { wrapper }) + + // Default currently defined by sapAiCoreDefaultModelId = "anthropic--claude-3.5-sonnet" + expect(result.current.provider).toBe("sapaicore") + expect(result.current.id).toBe("anthropic--claude-3.5-sonnet") + // Ensure info is populated from catalog + expect(result.current.info).toBeDefined() + expect(result.current.info?.contextWindow).toBe(200000) + expect(result.current.info?.supportsImages).toBe(true) + expect(result.current.info?.supportsPromptCache).toBe(false) + }) + + it("returns explicit SAP AI Core model id and correct info when apiModelId is set", () => { + // Provide minimal router/openrouter data so the hook computes selection + mockUseRouterModels.mockReturnValue({ + data: { + openrouter: {}, + requesty: {}, + glama: {}, + unbound: {}, + litellm: {}, + "io-intelligence": {}, + }, + isLoading: false, + isError: false, + } as any) + + mockUseOpenRouterModelProviders.mockReturnValue({ + data: {}, + isLoading: false, + isError: false, + } as any) + + const apiConfiguration: ProviderSettings = { + apiProvider: "sapaicore", + apiModelId: "anthropic--claude-3.7-sonnet", + } + + const wrapper = createWrapper() + const { result } = renderHook(() => useSelectedModel(apiConfiguration), { wrapper }) + + expect(result.current.provider).toBe("sapaicore") + expect(result.current.id).toBe("anthropic--claude-3.7-sonnet") + // Ensure info is populated from catalog for 3.7 Sonnet (1M ctx, prompt cache true) + expect(result.current.info).toBeDefined() + expect(result.current.info?.contextWindow).toBe(200000) + expect(result.current.info?.supportsImages).toBe(true) + expect(result.current.info?.supportsPromptCache).toBe(true) + }) +}) diff --git a/webview-ui/src/components/ui/hooks/useSapAiCoreModels.ts b/webview-ui/src/components/ui/hooks/useSapAiCoreModels.ts new file mode 100644 index 0000000000..fb71031ed9 --- /dev/null +++ b/webview-ui/src/components/ui/hooks/useSapAiCoreModels.ts @@ -0,0 +1,44 @@ +import { useQuery } from "@tanstack/react-query" + +import { ExtensionMessage } from "@roo/ExtensionMessage" + +import { vscode } from "@src/utils/vscode" + +const getSapAiCoreModels = async () => + new Promise<{ success: boolean; models: string[]; error?: string }>((resolve, reject) => { + const cleanup = () => { + window.removeEventListener("message", handler) + } + + const timeout = setTimeout(() => { + cleanup() + reject(new Error("SAP AI Core models request timed out")) + }, 15000) // Longer timeout for API calls + + const handler = (event: MessageEvent) => { + const message: ExtensionMessage = event.data + + if (message.type === "sapAiCoreModels") { + clearTimeout(timeout) + cleanup() + + if (message.sapAiCoreModels) { + resolve(message.sapAiCoreModels) + } else { + reject(new Error("No SAP AI Core models in response")) + } + } + } + + window.addEventListener("message", handler) + vscode.postMessage({ type: "requestSapAiCoreModels" }) + }) + +export const useSapAiCoreModels = (enabled: boolean = false) => + useQuery({ + queryKey: ["sapAiCoreModels"], + queryFn: getSapAiCoreModels, + enabled, + retry: 1, + staleTime: 5 * 60 * 1000, // 5 minutes + }) diff --git a/webview-ui/src/components/ui/hooks/useSelectedModel.ts b/webview-ui/src/components/ui/hooks/useSelectedModel.ts index e9470e0902..b697ca1d55 100644 --- a/webview-ui/src/components/ui/hooks/useSelectedModel.ts +++ b/webview-ui/src/components/ui/hooks/useSelectedModel.ts @@ -56,6 +56,8 @@ import { qwenCodeModels, vercelAiGatewayDefaultModelId, BEDROCK_CLAUDE_SONNET_4_MODEL_ID, + sapAiCoreDefaultModelId, + sapAiCoreModels, } from "@roo-code/types" import type { ModelRecord, RouterModels } from "@roo/api" @@ -325,6 +327,12 @@ function getSelectedModel({ info: rooModels[rooDefaultModelId as keyof typeof rooModels], } } + + case "sapaicore": { + const id = apiConfiguration.apiModelId ?? sapAiCoreDefaultModelId + const info = sapAiCoreModels[id as keyof typeof sapAiCoreModels] + return { id, info } + } case "qwen-code": { const id = apiConfiguration.apiModelId ?? qwenCodeDefaultModelId const info = qwenCodeModels[id as keyof typeof qwenCodeModels] @@ -339,7 +347,7 @@ function getSelectedModel({ // case "human-relay": // case "fake-ai": default: { - provider satisfies "anthropic" | "gemini-cli" | "qwen-code" | "human-relay" | "fake-ai" + provider satisfies "anthropic" | "gemini-cli" | "qwen-code" | "human-relay" | "fake-ai" | "sapaicore" const id = apiConfiguration.apiModelId ?? anthropicDefaultModelId const baseInfo = anthropicModels[id as keyof typeof anthropicModels] diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index d6cef7045d..6c4618144a 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -119,7 +119,11 @@ "vercelAiGatewayApiKeyRequired": "Es requereix la clau API de Vercel AI Gateway", "ollamaBaseUrlRequired": "Cal una URL base d'Ollama", "baseUrlRequired": "Cal una URL base", - "modelDimensionMinValue": "La dimensió del model ha de ser superior a 0" + "modelDimensionMinValue": "La dimensió del model ha de ser superior a 0", + "sapAiCoreBaseUrl": "Cal una URL base de SAP AI Core.", + "sapAiCoreClientId": "Cal un ID de client de SAP AI Core.", + "sapAiCoreClientSecret": "Cal un secret de client de SAP AI Core.", + "sapAiCoreTokenUrl": "Cal una URL de token de SAP AI Core." }, "advancedConfigLabel": "Configuració avançada", "searchMinScoreLabel": "Llindar de puntuació de cerca", @@ -296,6 +300,17 @@ "groqApiKey": "Clau API de Groq", "getSambaNovaApiKey": "Obtenir clau API de SambaNova", "sambaNovaApiKey": "Clau API de SambaNova", + "sapAiCoreBaseUrl": "URL base de SAP AI Core", + "sapAiCoreClientId": "ID de client de SAP AI Core", + "sapAiCoreClientSecret": "Secret de client de SAP AI Core", + "sapAiCoreTokenUrl": "URL de token de SAP AI Core", + "sapAiResourceGroup": "Grup de recursos de SAP AI", + "sapAiCoreFetchingModels": "Obtenint models desplegats de SAP AI Core...", + "sapAiCoreFetchModelsFailed": "Error en obtenir models desplegats:", + "sapAiCoreFetchModelsFailedGeneric": "Error en obtenir models desplegats", + "sapAiCoreLearnMore": "Més informació", + "sapAiCoreDeployedModelsHeader": "Models desplegats (llestos per usar)", + "sapAiCoreSupportedModelsHeader": "Models no desplegats (requereixen desplegament)", "getHuggingFaceApiKey": "Obtenir clau API de Hugging Face", "huggingFaceApiKey": "Clau API de Hugging Face", "huggingFaceModelId": "ID del model", @@ -805,6 +820,10 @@ "modelId": "Heu de proporcionar un ID de model vàlid.", "modelSelector": "Heu de proporcionar un selector de model vàlid.", "openAi": "Heu de proporcionar una URL base, clau API i ID de model vàlids.", + "sapAiCoreBaseUrl": "Cal una URL base de SAP AI Core", + "sapAiCoreClientId": "Cal un ID de client de SAP AI Core", + "sapAiCoreClientSecret": "Cal un secret de client de SAP AI Core", + "sapAiCoreTokenUrl": "Cal una URL de token de SAP AI Core", "arn": { "invalidFormat": "Format ARN no vàlid. Si us plau, comproveu els requisits del format.", "regionMismatch": "Avís: La regió del vostre ARN ({{arnRegion}}) no coincideix amb la regió seleccionada ({{region}}). Això pot causar problemes d'accés. El proveïdor utilitzarà la regió de l'ARN." @@ -826,6 +845,8 @@ "projectId": "Introduïu l'ID del projecte...", "customArn": "Introduïu l'ARN (p. ex. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)", "baseUrl": "Introduïu l'URL base...", + "clientId": "Introduïu l'ID del client...", + "clientSecret": "Introduïu el secret del client...", "modelId": { "lmStudio": "p. ex. meta-llama-3.1-8b-instruct", "lmStudioDraft": "p. ex. lmstudio-community/llama-3.2-1b-instruct", diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index e222810031..28c35946a2 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -119,7 +119,11 @@ "vercelAiGatewayApiKeyRequired": "Vercel AI Gateway API-Schlüssel ist erforderlich", "ollamaBaseUrlRequired": "Ollama-Basis-URL ist erforderlich", "baseUrlRequired": "Basis-URL ist erforderlich", - "modelDimensionMinValue": "Modellabmessung muss größer als 0 sein" + "modelDimensionMinValue": "Modellabmessung muss größer als 0 sein", + "sapAiCoreBaseUrl": "SAP AI Core Basis-URL ist erforderlich.", + "sapAiCoreClientId": "SAP AI Core Client-ID ist erforderlich.", + "sapAiCoreClientSecret": "SAP AI Core Client-Secret ist erforderlich.", + "sapAiCoreTokenUrl": "SAP AI Core Token-URL ist erforderlich." }, "advancedConfigLabel": "Erweiterte Konfiguration", "searchMinScoreLabel": "Suchergebnis-Schwellenwert", @@ -296,6 +300,17 @@ "groqApiKey": "Groq API-Schlüssel", "getSambaNovaApiKey": "SambaNova API-Schlüssel erhalten", "sambaNovaApiKey": "SambaNova API-Schlüssel", + "sapAiCoreBaseUrl": "SAP AI Core Basis-URL", + "sapAiCoreClientId": "SAP AI Core Client-ID", + "sapAiCoreClientSecret": "SAP AI Core Client-Secret", + "sapAiCoreTokenUrl": "SAP AI Core Token-URL", + "sapAiResourceGroup": "SAP AI Ressourcengruppe", + "sapAiCoreFetchingModels": "Bereitgestellte Modelle von SAP AI Core werden abgerufen...", + "sapAiCoreFetchModelsFailed": "Fehler beim Abrufen der bereitgestellten Modelle:", + "sapAiCoreFetchModelsFailedGeneric": "Fehler beim Abrufen der bereitgestellten Modelle", + "sapAiCoreLearnMore": "Mehr erfahren", + "sapAiCoreDeployedModelsHeader": "Bereitgestellte Modelle (Einsatzbereit)", + "sapAiCoreSupportedModelsHeader": "Nicht bereitgestellte Modelle (Erfordern Bereitstellung)", "getHuggingFaceApiKey": "Hugging Face API-Schlüssel erhalten", "huggingFaceApiKey": "Hugging Face API-Schlüssel", "huggingFaceModelId": "Modell-ID", @@ -805,6 +820,10 @@ "modelId": "Du musst eine gültige Modell-ID angeben.", "modelSelector": "Du musst einen gültigen Modellselektor angeben.", "openAi": "Du musst eine gültige Basis-URL, einen API-Schlüssel und eine Modell-ID angeben.", + "sapAiCoreBaseUrl": "SAP AI Core Basis-URL ist erforderlich", + "sapAiCoreClientId": "SAP AI Core Client-ID ist erforderlich", + "sapAiCoreClientSecret": "SAP AI Core Client-Secret ist erforderlich", + "sapAiCoreTokenUrl": "SAP AI Core Token-URL ist erforderlich", "arn": { "invalidFormat": "Ungültiges ARN-Format. Bitte überprüfe die Formatanforderungen.", "regionMismatch": "Warnung: Die Region in deiner ARN ({{arnRegion}}) stimmt nicht mit deiner ausgewählten Region ({{region}}) überein. Dies kann zu Zugriffsproblemen führen. Der Anbieter wird die Region aus der ARN verwenden." @@ -826,6 +845,8 @@ "projectId": "Projekt-ID eingeben...", "customArn": "ARN eingeben (z.B. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)", "baseUrl": "Basis-URL eingeben...", + "clientId": "Client-ID eingeben...", + "clientSecret": "Client-Secret eingeben...", "modelId": { "lmStudio": "z.B. meta-llama-3.1-8b-instruct", "lmStudioDraft": "z.B. lmstudio-community/llama-3.2-1b-instruct", diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index 33fba24b8e..4df08742b8 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -126,7 +126,11 @@ "vercelAiGatewayApiKeyRequired": "Vercel AI Gateway API key is required", "ollamaBaseUrlRequired": "Ollama base URL is required", "baseUrlRequired": "Base URL is required", - "modelDimensionMinValue": "Model dimension must be greater than 0" + "modelDimensionMinValue": "Model dimension must be greater than 0", + "sapAiCoreBaseUrl": "SAP AI Core Base URL is required.", + "sapAiCoreClientId": "SAP AI Core Client ID is required.", + "sapAiCoreClientSecret": "SAP AI Core Client Secret is required.", + "sapAiCoreTokenUrl": "SAP AI Core Token URL is required." } }, "autoApprove": { @@ -295,6 +299,17 @@ "groqApiKey": "Groq API Key", "getSambaNovaApiKey": "Get SambaNova API Key", "sambaNovaApiKey": "SambaNova API Key", + "sapAiCoreBaseUrl": "SAP AI Core Base URL", + "sapAiCoreClientId": "SAP AI Core Client ID", + "sapAiCoreClientSecret": "SAP AI Core Client Secret", + "sapAiCoreTokenUrl": "SAP AI Core Token URL", + "sapAiResourceGroup": "SAP AI Resource Group", + "sapAiCoreFetchingModels": "Fetching deployed models from SAP AI Core...", + "sapAiCoreFetchModelsFailed": "Failed to fetch deployed models:", + "sapAiCoreFetchModelsFailedGeneric": "Failed to fetch deployed models", + "sapAiCoreLearnMore": "Learn More", + "sapAiCoreDeployedModelsHeader": "Deployed Models (Ready to Use)", + "sapAiCoreSupportedModelsHeader": "Not Deployed Models (Require Deployment)", "getHuggingFaceApiKey": "Get Hugging Face API Key", "huggingFaceApiKey": "Hugging Face API Key", "huggingFaceModelId": "Model ID", @@ -804,6 +819,10 @@ "modelId": "You must provide a valid model ID.", "modelSelector": "You must provide a valid model selector.", "openAi": "You must provide a valid base URL, API key, and model ID.", + "sapAiCoreBaseUrl": "SAP AI Core Base URL is required", + "sapAiCoreClientId": "SAP AI Core Client ID is required", + "sapAiCoreClientSecret": "SAP AI Core Client Secret is required", + "sapAiCoreTokenUrl": "SAP AI Core Token URL is required", "arn": { "invalidFormat": "Invalid ARN format. Please check the format requirements.", "regionMismatch": "Warning: The region in your ARN ({{arnRegion}}) does not match your selected region ({{region}}). This may cause access issues. The provider will use the region from the ARN." @@ -825,6 +844,8 @@ "projectId": "Enter Project ID...", "customArn": "Enter ARN (e.g. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)", "baseUrl": "Enter base URL...", + "clientId": "Enter Client ID...", + "clientSecret": "Enter Client Secret...", "modelId": { "lmStudio": "e.g. meta-llama-3.1-8b-instruct", "lmStudioDraft": "e.g. lmstudio-community/llama-3.2-1b-instruct", diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index b2deeda932..0fb29184e9 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -119,7 +119,11 @@ "vercelAiGatewayApiKeyRequired": "Se requiere la clave API de Vercel AI Gateway", "ollamaBaseUrlRequired": "Se requiere la URL base de Ollama", "baseUrlRequired": "Se requiere la URL base", - "modelDimensionMinValue": "La dimensión del modelo debe ser mayor que 0" + "modelDimensionMinValue": "La dimensión del modelo debe ser mayor que 0", + "sapAiCoreBaseUrl": "Se requiere la URL base de SAP AI Core.", + "sapAiCoreClientId": "Se requiere el ID de cliente de SAP AI Core.", + "sapAiCoreClientSecret": "Se requiere el secreto de cliente de SAP AI Core.", + "sapAiCoreTokenUrl": "Se requiere la URL de token de SAP AI Core." }, "advancedConfigLabel": "Configuración avanzada", "searchMinScoreLabel": "Umbral de puntuación de búsqueda", @@ -296,6 +300,17 @@ "groqApiKey": "Clave API de Groq", "getSambaNovaApiKey": "Obtener clave API de SambaNova", "sambaNovaApiKey": "Clave API de SambaNova", + "sapAiCoreBaseUrl": "URL base de SAP AI Core", + "sapAiCoreClientId": "ID de cliente de SAP AI Core", + "sapAiCoreClientSecret": "Secreto de cliente de SAP AI Core", + "sapAiCoreTokenUrl": "URL de token de SAP AI Core", + "sapAiResourceGroup": "Grupo de recursos de SAP AI", + "sapAiCoreFetchingModels": "Obteniendo modelos desplegados de SAP AI Core...", + "sapAiCoreFetchModelsFailed": "Error al obtener modelos desplegados:", + "sapAiCoreFetchModelsFailedGeneric": "Error al obtener modelos desplegados", + "sapAiCoreLearnMore": "Más información", + "sapAiCoreDeployedModelsHeader": "Modelos desplegados (listos para usar)", + "sapAiCoreSupportedModelsHeader": "Modelos no desplegados (requieren despliegue)", "getHuggingFaceApiKey": "Obtener clave API de Hugging Face", "huggingFaceApiKey": "Clave API de Hugging Face", "huggingFaceModelId": "ID del modelo", @@ -805,6 +820,10 @@ "modelId": "Debe proporcionar un ID de modelo válido.", "modelSelector": "Debe proporcionar un selector de modelo válido.", "openAi": "Debe proporcionar una URL base, clave API y ID de modelo válidos.", + "sapAiCoreBaseUrl": "Se requiere la URL base de SAP AI Core", + "sapAiCoreClientId": "Se requiere el ID de cliente de SAP AI Core", + "sapAiCoreClientSecret": "Se requiere el secreto de cliente de SAP AI Core", + "sapAiCoreTokenUrl": "Se requiere la URL de token de SAP AI Core", "arn": { "invalidFormat": "Formato de ARN no válido. Por favor, verifique los requisitos de formato.", "regionMismatch": "Advertencia: La región en su ARN ({{arnRegion}}) no coincide con su región seleccionada ({{region}}). Esto puede causar problemas de acceso. El proveedor usará la región del ARN." @@ -826,6 +845,8 @@ "projectId": "Ingrese ID del proyecto...", "customArn": "Ingrese ARN (ej. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)", "baseUrl": "Ingrese URL base...", + "clientId": "Ingrese ID de cliente...", + "clientSecret": "Ingrese secreto de cliente...", "modelId": { "lmStudio": "ej. meta-llama-3.1-8b-instruct", "lmStudioDraft": "ej. lmstudio-community/llama-3.2-1b-instruct", diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index f36a1850dd..0463913283 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -119,7 +119,11 @@ "vercelAiGatewayApiKeyRequired": "La clé API Vercel AI Gateway est requise", "ollamaBaseUrlRequired": "L'URL de base Ollama est requise", "baseUrlRequired": "L'URL de base est requise", - "modelDimensionMinValue": "La dimension du modèle doit être supérieure à 0" + "modelDimensionMinValue": "La dimension du modèle doit être supérieure à 0", + "sapAiCoreBaseUrl": "L'URL de base SAP AI Core est requise.", + "sapAiCoreClientId": "L'ID client SAP AI Core est requis.", + "sapAiCoreClientSecret": "Le secret client SAP AI Core est requis.", + "sapAiCoreTokenUrl": "L'URL de token SAP AI Core est requise." }, "advancedConfigLabel": "Configuration avancée", "searchMinScoreLabel": "Seuil de score de recherche", @@ -296,6 +300,17 @@ "groqApiKey": "Clé API Groq", "getSambaNovaApiKey": "Obtenir la clé API SambaNova", "sambaNovaApiKey": "Clé API SambaNova", + "sapAiCoreBaseUrl": "URL de base SAP AI Core", + "sapAiCoreClientId": "ID client SAP AI Core", + "sapAiCoreClientSecret": "Secret client SAP AI Core", + "sapAiCoreTokenUrl": "URL de token SAP AI Core", + "sapAiResourceGroup": "Groupe de ressources SAP AI", + "sapAiCoreFetchingModels": "Récupération des modèles déployés depuis SAP AI Core...", + "sapAiCoreFetchModelsFailed": "Échec de la récupération des modèles déployés :", + "sapAiCoreFetchModelsFailedGeneric": "Échec de la récupération des modèles déployés", + "sapAiCoreLearnMore": "En savoir plus", + "sapAiCoreDeployedModelsHeader": "Modèles déployés (prêts à utiliser)", + "sapAiCoreSupportedModelsHeader": "Modèles non déployés (nécessitent un déploiement)", "getHuggingFaceApiKey": "Obtenir la clé API Hugging Face", "huggingFaceApiKey": "Clé API Hugging Face", "huggingFaceModelId": "ID du modèle", @@ -805,6 +820,10 @@ "modelId": "Vous devez fournir un ID de modèle valide.", "modelSelector": "Vous devez fournir un sélecteur de modèle valide.", "openAi": "Vous devez fournir une URL de base, une clé API et un ID de modèle valides.", + "sapAiCoreBaseUrl": "L'URL de base SAP AI Core est requise", + "sapAiCoreClientId": "L'ID client SAP AI Core est requis", + "sapAiCoreClientSecret": "Le secret client SAP AI Core est requis", + "sapAiCoreTokenUrl": "L'URL de token SAP AI Core est requise", "arn": { "invalidFormat": "Format ARN invalide. Veuillez vérifier les exigences de format.", "regionMismatch": "Attention : La région dans votre ARN ({{arnRegion}}) ne correspond pas à votre région sélectionnée ({{region}}). Cela peut causer des problèmes d'accès. Le fournisseur utilisera la région de l'ARN." @@ -826,6 +845,8 @@ "projectId": "Saisissez l'ID du projet...", "customArn": "Saisissez l'ARN (ex. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)", "baseUrl": "Saisissez l'URL de base...", + "clientId": "Saisissez l'ID client...", + "clientSecret": "Saisissez le secret client...", "modelId": { "lmStudio": "ex. meta-llama-3.1-8b-instruct", "lmStudioDraft": "ex. lmstudio-community/llama-3.2-1b-instruct", diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index 21b390aadd..2710dabac4 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -119,7 +119,11 @@ "vercelAiGatewayApiKeyRequired": "Vercel AI Gateway API कुंजी आवश्यक है", "ollamaBaseUrlRequired": "Ollama आधार URL आवश्यक है", "baseUrlRequired": "आधार URL आवश्यक है", - "modelDimensionMinValue": "मॉडल आयाम 0 से बड़ा होना चाहिए" + "modelDimensionMinValue": "मॉडल आयाम 0 से बड़ा होना चाहिए", + "sapAiCoreBaseUrl": "SAP AI Core बेस URL आवश्यक है", + "sapAiCoreClientId": "SAP AI Core क्लाइंट ID आवश्यक है", + "sapAiCoreClientSecret": "SAP AI Core क्लाइंट सीक्रेट आवश्यक है", + "sapAiCoreTokenUrl": "SAP AI Core टोकन URL आवश्यक है" }, "advancedConfigLabel": "उन्नत कॉन्फ़िगरेशन", "searchMinScoreLabel": "खोज स्कोर थ्रेसहोल्ड", @@ -296,6 +300,17 @@ "groqApiKey": "Groq API कुंजी", "getSambaNovaApiKey": "SambaNova API कुंजी प्राप्त करें", "sambaNovaApiKey": "SambaNova API कुंजी", + "sapAiCoreBaseUrl": "SAP AI Core बेस URL", + "sapAiCoreClientId": "SAP AI Core क्लाइंट ID", + "sapAiCoreClientSecret": "SAP AI Core क्लाइंट सीक्रेट", + "sapAiCoreTokenUrl": "SAP AI Core टोकन URL", + "sapAiResourceGroup": "SAP AI रिसोर्स ग्रुप", + "sapAiCoreFetchingModels": "SAP AI Core से तैनात मॉडल प्राप्त कर रहे हैं...", + "sapAiCoreFetchModelsFailed": "तैनात मॉडल प्राप्त करने में विफल:", + "sapAiCoreFetchModelsFailedGeneric": "तैनात मॉडल प्राप्त करने में विफल", + "sapAiCoreLearnMore": "और जानें", + "sapAiCoreDeployedModelsHeader": "तैनात मॉडल (उपयोग के लिए तैयार)", + "sapAiCoreSupportedModelsHeader": "गैर-तैनात मॉडल (तैनाती की आवश्यकता)", "getHuggingFaceApiKey": "Hugging Face API कुंजी प्राप्त करें", "huggingFaceApiKey": "Hugging Face API कुंजी", "huggingFaceModelId": "मॉडल ID", @@ -806,6 +821,10 @@ "modelId": "आपको एक मान्य मॉडल ID प्रदान करनी होगी।", "modelSelector": "आपको एक मान्य मॉडल चयनकर्ता प्रदान करना होगा।", "openAi": "आपको एक मान्य बेस URL, API कुंजी और मॉडल ID प्रदान करनी होगी।", + "sapAiCoreBaseUrl": "SAP AI Core बेस URL आवश्यक है", + "sapAiCoreClientId": "SAP AI Core क्लाइंट ID आवश्यक है", + "sapAiCoreClientSecret": "SAP AI Core क्लाइंट सीक्रेट आवश्यक है", + "sapAiCoreTokenUrl": "SAP AI Core टोकन URL आवश्यक है", "arn": { "invalidFormat": "अमान्य ARN प्रारूप। कृपया प्रारूप आवश्यकताएं जांचें।", "regionMismatch": "चेतावनी: आपके ARN में क्षेत्र ({{arnRegion}}) आपके चयनित क्षेत्र ({{region}}) से मेल नहीं खाता। इससे पहुंच संबंधी समस्याएं हो सकती हैं। प्रदाता ARN से क्षेत्र का उपयोग करेगा।" @@ -827,6 +846,8 @@ "projectId": "प्रोजेक्ट ID दर्ज करें...", "customArn": "ARN दर्ज करें (उदा. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)", "baseUrl": "बेस URL दर्ज करें...", + "clientId": "क्लाइंट ID दर्ज करें...", + "clientSecret": "क्लाइंट सीक्रेट दर्ज करें...", "modelId": { "lmStudio": "उदा. meta-llama-3.1-8b-instruct", "lmStudioDraft": "उदा. lmstudio-community/llama-3.2-1b-instruct", diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index 61c7670078..151fdeadbc 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -119,7 +119,11 @@ "vercelAiGatewayApiKeyRequired": "Kunci API Vercel AI Gateway diperlukan", "ollamaBaseUrlRequired": "URL dasar Ollama diperlukan", "baseUrlRequired": "URL dasar diperlukan", - "modelDimensionMinValue": "Dimensi model harus lebih besar dari 0" + "modelDimensionMinValue": "Dimensi model harus lebih besar dari 0", + "sapAiCoreBaseUrl": "SAP AI Core Base URL diperlukan", + "sapAiCoreClientId": "SAP AI Core Client ID diperlukan", + "sapAiCoreClientSecret": "SAP AI Core Client Secret diperlukan", + "sapAiCoreTokenUrl": "SAP AI Core Token URL diperlukan" }, "advancedConfigLabel": "Konfigurasi Lanjutan", "searchMinScoreLabel": "Ambang Batas Skor Pencarian", @@ -300,6 +304,17 @@ "groqApiKey": "Groq API Key", "getSambaNovaApiKey": "Dapatkan SambaNova API Key", "sambaNovaApiKey": "SambaNova API Key", + "sapAiCoreBaseUrl": "SAP AI Core Base URL", + "sapAiCoreClientId": "SAP AI Core Client ID", + "sapAiCoreClientSecret": "SAP AI Core Client Secret", + "sapAiCoreTokenUrl": "SAP AI Core Token URL", + "sapAiResourceGroup": "SAP AI Resource Group", + "sapAiCoreFetchingModels": "Mengambil model yang di-deploy dari SAP AI Core...", + "sapAiCoreFetchModelsFailed": "Gagal mengambil model yang di-deploy:", + "sapAiCoreFetchModelsFailedGeneric": "Gagal mengambil model yang di-deploy", + "sapAiCoreLearnMore": "Pelajari Lebih Lanjut", + "sapAiCoreDeployedModelsHeader": "Model yang Di-deploy (Siap Digunakan)", + "sapAiCoreSupportedModelsHeader": "Model yang Tidak Di-deploy (Memerlukan Deployment)", "getHuggingFaceApiKey": "Dapatkan Kunci API Hugging Face", "huggingFaceApiKey": "Kunci API Hugging Face", "huggingFaceModelId": "ID Model", @@ -843,6 +858,10 @@ "providerNotAllowed": "Provider '{{provider}}' tidak diizinkan oleh organisasi kamu", "modelNotAllowed": "Model '{{model}}' tidak diizinkan untuk provider '{{provider}}' oleh organisasi kamu", "profileInvalid": "Profil ini berisi provider atau model yang tidak diizinkan oleh organisasi kamu", + "sapAiCoreBaseUrl": "SAP AI Core Base URL diperlukan.", + "sapAiCoreClientId": "SAP AI Core Client ID diperlukan.", + "sapAiCoreClientSecret": "SAP AI Core Client Secret diperlukan.", + "sapAiCoreTokenUrl": "SAP AI Core Token URL diperlukan.", "qwenCodeOauthPath": "Kamu harus memberikan jalur kredensial OAuth yang valid" }, "placeholders": { @@ -856,6 +875,8 @@ "projectId": "Masukkan Project ID...", "customArn": "Masukkan ARN (misalnya arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)", "baseUrl": "Masukkan base URL...", + "clientId": "Masukkan Client ID...", + "clientSecret": "Masukkan Client Secret...", "modelId": { "lmStudio": "misalnya meta-llama-3.1-8b-instruct", "lmStudioDraft": "misalnya lmstudio-community/llama-3.2-1b-instruct", diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index 3e383fc564..6ac2096b04 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -119,7 +119,11 @@ "vercelAiGatewayApiKeyRequired": "È richiesta la chiave API Vercel AI Gateway", "ollamaBaseUrlRequired": "È richiesto l'URL di base di Ollama", "baseUrlRequired": "È richiesto l'URL di base", - "modelDimensionMinValue": "La dimensione del modello deve essere maggiore di 0" + "modelDimensionMinValue": "La dimensione del modello deve essere maggiore di 0", + "sapAiCoreBaseUrl": "È richiesto l'URL di base di SAP AI Core", + "sapAiCoreClientId": "È richiesto l'ID client di SAP AI Core", + "sapAiCoreClientSecret": "È richiesto il segreto client di SAP AI Core", + "sapAiCoreTokenUrl": "È richiesto l'URL del token di SAP AI Core" }, "advancedConfigLabel": "Configurazione avanzata", "searchMinScoreLabel": "Soglia punteggio di ricerca", @@ -296,6 +300,17 @@ "groqApiKey": "Chiave API Groq", "getSambaNovaApiKey": "Ottieni chiave API SambaNova", "sambaNovaApiKey": "Chiave API SambaNova", + "sapAiCoreBaseUrl": "URL base SAP AI Core", + "sapAiCoreClientId": "ID client SAP AI Core", + "sapAiCoreClientSecret": "Segreto client SAP AI Core", + "sapAiCoreTokenUrl": "URL token SAP AI Core", + "sapAiResourceGroup": "Gruppo risorse SAP AI", + "sapAiCoreFetchingModels": "Recupero modelli distribuiti da SAP AI Core...", + "sapAiCoreFetchModelsFailed": "Recupero modelli distribuiti fallito:", + "sapAiCoreFetchModelsFailedGeneric": "Recupero modelli distribuiti fallito", + "sapAiCoreLearnMore": "Scopri di più", + "sapAiCoreDeployedModelsHeader": "Modelli distribuiti (pronti all'uso)", + "sapAiCoreSupportedModelsHeader": "Modelli non distribuiti (richiedono distribuzione)", "getHuggingFaceApiKey": "Ottieni chiave API Hugging Face", "huggingFaceApiKey": "Chiave API Hugging Face", "huggingFaceModelId": "ID modello", @@ -806,6 +821,10 @@ "modelId": "È necessario fornire un ID modello valido.", "modelSelector": "È necessario fornire un selettore di modello valido.", "openAi": "È necessario fornire un URL base, una chiave API e un ID modello validi.", + "sapAiCoreBaseUrl": "È richiesto l'URL di base di SAP AI Core", + "sapAiCoreClientId": "È richiesto l'ID client di SAP AI Core", + "sapAiCoreClientSecret": "È richiesto il segreto client di SAP AI Core", + "sapAiCoreTokenUrl": "È richiesto l'URL del token di SAP AI Core", "arn": { "invalidFormat": "Formato ARN non valido. Verificare i requisiti del formato.", "regionMismatch": "Attenzione: La regione nel tuo ARN ({{arnRegion}}) non corrisponde alla regione selezionata ({{region}}). Questo potrebbe causare problemi di accesso. Il provider utilizzerà la regione dall'ARN." @@ -827,6 +846,8 @@ "projectId": "Inserisci ID progetto...", "customArn": "Inserisci ARN (es. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)", "baseUrl": "Inserisci URL base...", + "clientId": "Inserisci ID client...", + "clientSecret": "Inserisci segreto client...", "modelId": { "lmStudio": "es. meta-llama-3.1-8b-instruct", "lmStudioDraft": "es. lmstudio-community/llama-3.2-1b-instruct", diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index 33dfc8be35..928d85f7d4 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -119,7 +119,11 @@ "vercelAiGatewayApiKeyRequired": "Vercel AI Gateway APIキーが必要です", "ollamaBaseUrlRequired": "OllamaのベースURLが必要です", "baseUrlRequired": "ベースURLが必要です", - "modelDimensionMinValue": "モデルの次元は0より大きくなければなりません" + "modelDimensionMinValue": "モデルの次元は0より大きくなければなりません", + "sapAiCoreBaseUrl": "SAP AI Core ベースURLが必要です。", + "sapAiCoreClientId": "SAP AI Core クライアントIDが必要です。", + "sapAiCoreClientSecret": "SAP AI Core クライアントシークレットが必要です。", + "sapAiCoreTokenUrl": "SAP AI Core トークンURLが必要です。" }, "advancedConfigLabel": "詳細設定", "searchMinScoreLabel": "検索スコアのしきい値", @@ -296,6 +300,17 @@ "groqApiKey": "Groq APIキー", "getSambaNovaApiKey": "SambaNova APIキーを取得", "sambaNovaApiKey": "SambaNova APIキー", + "sapAiCoreBaseUrl": "SAP AI Core ベースURL", + "sapAiCoreClientId": "SAP AI Core クライアントID", + "sapAiCoreClientSecret": "SAP AI Core クライアントシークレット", + "sapAiCoreTokenUrl": "SAP AI Core トークンURL", + "sapAiResourceGroup": "SAP AI リソースグループ", + "sapAiCoreFetchingModels": "SAP AI Coreからデプロイ済みモデルを取得中...", + "sapAiCoreFetchModelsFailed": "デプロイ済みモデルの取得に失敗しました:", + "sapAiCoreFetchModelsFailedGeneric": "デプロイ済みモデルの取得に失敗しました", + "sapAiCoreLearnMore": "詳細情報", + "sapAiCoreDeployedModelsHeader": "デプロイ済みモデル(使用可能)", + "sapAiCoreSupportedModelsHeader": "未デプロイモデル(デプロイが必要)", "getHuggingFaceApiKey": "Hugging Face APIキーを取得", "huggingFaceApiKey": "Hugging Face APIキー", "huggingFaceModelId": "モデルID", @@ -806,6 +821,10 @@ "modelId": "有効なモデルIDを入力してください。", "modelSelector": "有効なモデルセレクターを入力してください。", "openAi": "有効なベースURL、APIキー、モデルIDを入力してください。", + "sapAiCoreBaseUrl": "SAP AI Core ベースURLが必要です", + "sapAiCoreClientId": "SAP AI Core クライアントIDが必要です", + "sapAiCoreClientSecret": "SAP AI Core クライアントシークレットが必要です", + "sapAiCoreTokenUrl": "SAP AI Core トークンURLが必要です", "arn": { "invalidFormat": "ARNの形式が無効です。フォーマット要件を確認してください。", "regionMismatch": "警告:ARN内のリージョン({{arnRegion}})が選択したリージョン({{region}})と一致しません。これによりアクセスの問題が発生する可能性があります。プロバイダーはARNのリージョンを使用します。" @@ -827,6 +846,8 @@ "projectId": "プロジェクト ID を入力...", "customArn": "ARN を入力(例:arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)", "baseUrl": "ベース URL を入力...", + "clientId": "クライアントIDを入力...", + "clientSecret": "クライアントシークレットを入力...", "modelId": { "lmStudio": "例:meta-llama-3.1-8b-instruct", "lmStudioDraft": "例:lmstudio-community/llama-3.2-1b-instruct", diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index 969da2c9ae..25bbfc0c1b 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -119,7 +119,11 @@ "vercelAiGatewayApiKeyRequired": "Vercel AI Gateway API 키가 필요합니다", "ollamaBaseUrlRequired": "Ollama 기본 URL이 필요합니다", "baseUrlRequired": "기본 URL이 필요합니다", - "modelDimensionMinValue": "모델 차원은 0보다 커야 합니다" + "modelDimensionMinValue": "모델 차원은 0보다 커야 합니다", + "sapAiCoreBaseUrl": "SAP AI Core Base URL이 필요합니다.", + "sapAiCoreClientId": "SAP AI Core Client ID가 필요합니다.", + "sapAiCoreClientSecret": "SAP AI Core Client Secret이 필요합니다.", + "sapAiCoreTokenUrl": "SAP AI Core Token URL이 필요합니다." }, "advancedConfigLabel": "고급 구성", "searchMinScoreLabel": "검색 점수 임계값", @@ -296,6 +300,17 @@ "groqApiKey": "Groq API 키", "getSambaNovaApiKey": "SambaNova API 키 받기", "sambaNovaApiKey": "SambaNova API 키", + "sapAiCoreBaseUrl": "SAP AI Core Base URL", + "sapAiCoreClientId": "SAP AI Core Client ID", + "sapAiCoreClientSecret": "SAP AI Core Client Secret", + "sapAiCoreTokenUrl": "SAP AI Core Token URL", + "sapAiResourceGroup": "SAP AI Resource Group", + "sapAiCoreFetchingModels": "SAP AI Core에서 배포된 모델을 가져오는 중...", + "sapAiCoreFetchModelsFailed": "배포된 모델을 가져오지 못했습니다:", + "sapAiCoreFetchModelsFailedGeneric": "배포된 모델을 가져오지 못했습니다", + "sapAiCoreLearnMore": "더 알아보기", + "sapAiCoreDeployedModelsHeader": "배포된 모델 (사용 가능)", + "sapAiCoreSupportedModelsHeader": "배포되지 않은 모델 (배포 필요)", "getGeminiApiKey": "Gemini API 키 받기", "getHuggingFaceApiKey": "Hugging Face API 키 받기", "huggingFaceApiKey": "Hugging Face API 키", @@ -814,6 +829,10 @@ "providerNotAllowed": "제공자 '{{provider}}'는 조직에서 허용되지 않습니다", "modelNotAllowed": "모델 '{{model}}'은 제공자 '{{provider}}'에 대해 조직에서 허용되지 않습니다", "profileInvalid": "이 프로필에는 조직에서 허용되지 않는 제공자 또는 모델이 포함되어 있습니다", + "sapAiCoreBaseUrl": "SAP AI Core Base URL이 필요합니다.", + "sapAiCoreClientId": "SAP AI Core Client ID가 필요합니다.", + "sapAiCoreClientSecret": "SAP AI Core Client Secret이 필요합니다.", + "sapAiCoreTokenUrl": "SAP AI Core Token URL이 필요합니다.", "qwenCodeOauthPath": "유효한 OAuth 자격 증명 경로를 제공해야 합니다" }, "placeholders": { @@ -827,6 +846,8 @@ "projectId": "프로젝트 ID 입력...", "customArn": "ARN 입력 (예: arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)", "baseUrl": "기본 URL 입력...", + "clientId": "클라이언트 ID 입력...", + "clientSecret": "클라이언트 시크릿 입력...", "modelId": { "lmStudio": "예: meta-llama-3.1-8b-instruct", "lmStudioDraft": "예: lmstudio-community/llama-3.2-1b-instruct", diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index aa962a56ab..c15f37cc58 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -119,7 +119,11 @@ "vercelAiGatewayApiKeyRequired": "Vercel AI Gateway API-sleutel is vereist", "ollamaBaseUrlRequired": "Ollama basis-URL is vereist", "baseUrlRequired": "Basis-URL is vereist", - "modelDimensionMinValue": "Modelafmeting moet groter zijn dan 0" + "modelDimensionMinValue": "Modelafmeting moet groter zijn dan 0", + "sapAiCoreBaseUrl": "SAP AI Core Basis-URL is vereist.", + "sapAiCoreClientId": "SAP AI Core Client ID is vereist.", + "sapAiCoreClientSecret": "SAP AI Core Client Secret is vereist.", + "sapAiCoreTokenUrl": "SAP AI Core Token URL is vereist." }, "advancedConfigLabel": "Geavanceerde configuratie", "searchMinScoreLabel": "Zoekscore drempel", @@ -296,6 +300,17 @@ "groqApiKey": "Groq API-sleutel", "getSambaNovaApiKey": "SambaNova API-sleutel ophalen", "sambaNovaApiKey": "SambaNova API-sleutel", + "sapAiCoreBaseUrl": "SAP AI Core Basis-URL", + "sapAiCoreClientId": "SAP AI Core Client ID", + "sapAiCoreClientSecret": "SAP AI Core Client Secret", + "sapAiCoreTokenUrl": "SAP AI Core Token URL", + "sapAiResourceGroup": "SAP AI Resource Group", + "sapAiCoreFetchingModels": "Geïmplementeerde modellen ophalen van SAP AI Core...", + "sapAiCoreFetchModelsFailed": "Kan geïmplementeerde modellen niet ophalen:", + "sapAiCoreFetchModelsFailedGeneric": "Kan geïmplementeerde modellen niet ophalen", + "sapAiCoreLearnMore": "Meer informatie", + "sapAiCoreDeployedModelsHeader": "Geïmplementeerde modellen (klaar voor gebruik)", + "sapAiCoreSupportedModelsHeader": "Niet-geïmplementeerde modellen (vereisen implementatie)", "getGeminiApiKey": "Gemini API-sleutel ophalen", "getHuggingFaceApiKey": "Hugging Face API-sleutel ophalen", "huggingFaceApiKey": "Hugging Face API-sleutel", @@ -814,6 +829,10 @@ "providerNotAllowed": "Provider '{{provider}}' is niet toegestaan door je organisatie", "modelNotAllowed": "Model '{{model}}' is niet toegestaan voor provider '{{provider}}' door je organisatie", "profileInvalid": "Dit profiel bevat een provider of model dat niet is toegestaan door je organisatie", + "sapAiCoreBaseUrl": "SAP AI Core Basis-URL is vereist.", + "sapAiCoreClientId": "SAP AI Core Client ID is vereist.", + "sapAiCoreClientSecret": "SAP AI Core Client Secret is vereist.", + "sapAiCoreTokenUrl": "SAP AI Core Token URL is vereist.", "qwenCodeOauthPath": "Je moet een geldig OAuth-referentiepad opgeven" }, "placeholders": { @@ -827,6 +846,8 @@ "projectId": "Voer project-ID in...", "customArn": "Voer ARN in (bijv. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)", "baseUrl": "Voer basis-URL in...", + "clientId": "Voer Client ID in...", + "clientSecret": "Voer Client Secret in...", "modelId": { "lmStudio": "bijv. meta-llama-3.1-8b-instruct", "lmStudioDraft": "bijv. lmstudio-community/llama-3.2-1b-instruct", diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index ccc3868fb8..9ca52edf18 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -119,7 +119,11 @@ "vercelAiGatewayApiKeyRequired": "Klucz API Vercel AI Gateway jest wymagany", "ollamaBaseUrlRequired": "Wymagany jest bazowy adres URL Ollama", "baseUrlRequired": "Wymagany jest bazowy adres URL", - "modelDimensionMinValue": "Wymiar modelu musi być większy niż 0" + "modelDimensionMinValue": "Wymiar modelu musi być większy niż 0", + "sapAiCoreBaseUrl": "Bazowy URL SAP AI Core jest wymagany.", + "sapAiCoreClientId": "ID klienta SAP AI Core jest wymagany.", + "sapAiCoreClientSecret": "Sekret klienta SAP AI Core jest wymagany.", + "sapAiCoreTokenUrl": "URL tokena SAP AI Core jest wymagany." }, "advancedConfigLabel": "Konfiguracja zaawansowana", "searchMinScoreLabel": "Próg wyniku wyszukiwania", @@ -296,6 +300,17 @@ "groqApiKey": "Klucz API Groq", "getSambaNovaApiKey": "Uzyskaj klucz API SambaNova", "sambaNovaApiKey": "Klucz API SambaNova", + "sapAiCoreBaseUrl": "Bazowy URL SAP AI Core", + "sapAiCoreClientId": "ID klienta SAP AI Core", + "sapAiCoreClientSecret": "Sekret klienta SAP AI Core", + "sapAiCoreTokenUrl": "URL tokena SAP AI Core", + "sapAiResourceGroup": "Grupa zasobów SAP AI", + "sapAiCoreFetchingModels": "Pobieranie wdrożonych modeli z SAP AI Core...", + "sapAiCoreFetchModelsFailed": "Nie udało się pobrać wdrożonych modeli:", + "sapAiCoreFetchModelsFailedGeneric": "Nie udało się pobrać wdrożonych modeli", + "sapAiCoreLearnMore": "Dowiedz się więcej", + "sapAiCoreDeployedModelsHeader": "Wdrożone modele (gotowe do użycia)", + "sapAiCoreSupportedModelsHeader": "Niewdrożone modele (wymagają wdrożenia)", "getGeminiApiKey": "Uzyskaj klucz API Gemini", "getHuggingFaceApiKey": "Uzyskaj klucz API Hugging Face", "huggingFaceApiKey": "Klucz API Hugging Face", @@ -814,6 +829,10 @@ "providerNotAllowed": "Dostawca '{{provider}}' nie jest dozwolony przez Twoją organizację", "modelNotAllowed": "Model '{{model}}' nie jest dozwolony dla dostawcy '{{provider}}' przez Twoją organizację", "profileInvalid": "Ten profil zawiera dostawcę lub model, który nie jest dozwolony przez Twoją organizację", + "sapAiCoreBaseUrl": "Bazowy URL SAP AI Core jest wymagany.", + "sapAiCoreClientId": "ID klienta SAP AI Core jest wymagany.", + "sapAiCoreClientSecret": "Sekret klienta SAP AI Core jest wymagany.", + "sapAiCoreTokenUrl": "URL tokena SAP AI Core jest wymagany.", "qwenCodeOauthPath": "Musisz podać prawidłową ścieżkę do poświadczeń OAuth" }, "placeholders": { @@ -827,6 +846,8 @@ "projectId": "Wprowadź ID projektu...", "customArn": "Wprowadź ARN (np. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)", "baseUrl": "Wprowadź podstawowy URL...", + "clientId": "Wprowadź ID klienta...", + "clientSecret": "Wprowadź sekret klienta...", "modelId": { "lmStudio": "np. meta-llama-3.1-8b-instruct", "lmStudioDraft": "np. lmstudio-community/llama-3.2-1b-instruct", diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index b6674e682e..a9f3ac7de4 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -119,7 +119,11 @@ "vercelAiGatewayApiKeyRequired": "A chave de API do Vercel AI Gateway é obrigatória", "ollamaBaseUrlRequired": "A URL base do Ollama é obrigatória", "baseUrlRequired": "A URL base é obrigatória", - "modelDimensionMinValue": "A dimensão do modelo deve ser maior que 0" + "modelDimensionMinValue": "A dimensão do modelo deve ser maior que 0", + "sapAiCoreBaseUrl": "A URL Base do SAP AI Core é obrigatória", + "sapAiCoreClientId": "O ID do Cliente SAP AI Core é obrigatório", + "sapAiCoreClientSecret": "O Segredo do Cliente SAP AI Core é obrigatório", + "sapAiCoreTokenUrl": "A URL do Token SAP AI Core é obrigatória" }, "advancedConfigLabel": "Configuração Avançada", "searchMinScoreLabel": "Limite de pontuação de busca", @@ -296,6 +300,17 @@ "groqApiKey": "Chave de API Groq", "getSambaNovaApiKey": "Obter chave de API SambaNova", "sambaNovaApiKey": "Chave de API SambaNova", + "sapAiCoreBaseUrl": "URL Base SAP AI Core", + "sapAiCoreClientId": "ID do Cliente SAP AI Core", + "sapAiCoreClientSecret": "Segredo do Cliente SAP AI Core", + "sapAiCoreTokenUrl": "URL do Token SAP AI Core", + "sapAiResourceGroup": "Grupo de Recursos SAP AI", + "sapAiCoreFetchingModels": "Buscando modelos implantados do SAP AI Core...", + "sapAiCoreFetchModelsFailed": "Falha ao buscar modelos implantados:", + "sapAiCoreFetchModelsFailedGeneric": "Falha ao buscar modelos implantados", + "sapAiCoreLearnMore": "Saiba mais", + "sapAiCoreDeployedModelsHeader": "Modelos Implantados (Prontos para Uso)", + "sapAiCoreSupportedModelsHeader": "Modelos Não Implantados (Requerem Implantação)", "getGeminiApiKey": "Obter chave de API Gemini", "getHuggingFaceApiKey": "Obter chave de API Hugging Face", "huggingFaceApiKey": "Chave de API Hugging Face", @@ -814,6 +829,10 @@ "providerNotAllowed": "O provedor '{{provider}}' não é permitido pela sua organização", "modelNotAllowed": "O modelo '{{model}}' não é permitido para o provedor '{{provider}}' pela sua organização", "profileInvalid": "Este perfil contém um provedor ou modelo que não é permitido pela sua organização", + "sapAiCoreBaseUrl": "URL Base SAP AI Core é obrigatória.", + "sapAiCoreClientId": "ID do Cliente SAP AI Core é obrigatório.", + "sapAiCoreClientSecret": "Segredo do Cliente SAP AI Core é obrigatório.", + "sapAiCoreTokenUrl": "URL do Token SAP AI Core é obrigatória.", "qwenCodeOauthPath": "Você deve fornecer um caminho válido de credenciais OAuth" }, "placeholders": { @@ -827,6 +846,8 @@ "projectId": "Digite o ID do projeto...", "customArn": "Digite o ARN (ex: arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)", "baseUrl": "Digite a URL base...", + "clientId": "Digite o ID do Cliente...", + "clientSecret": "Digite o Segredo do Cliente...", "modelId": { "lmStudio": "ex: meta-llama-3.1-8b-instruct", "lmStudioDraft": "ex: lmstudio-community/llama-3.2-1b-instruct", diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index e416777719..74820aeec5 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -119,7 +119,11 @@ "vercelAiGatewayApiKeyRequired": "Требуется API-ключ Vercel AI Gateway", "ollamaBaseUrlRequired": "Требуется базовый URL Ollama", "baseUrlRequired": "Требуется базовый URL", - "modelDimensionMinValue": "Размерность модели должна быть больше 0" + "modelDimensionMinValue": "Размерность модели должна быть больше 0", + "sapAiCoreBaseUrl": "Базовый URL SAP AI Core обязателен.", + "sapAiCoreClientId": "ID клиента SAP AI Core обязателен.", + "sapAiCoreClientSecret": "Секрет клиента SAP AI Core обязателен.", + "sapAiCoreTokenUrl": "URL токена SAP AI Core обязателен." }, "advancedConfigLabel": "Расширенная конфигурация", "searchMinScoreLabel": "Порог оценки поиска", @@ -296,6 +300,17 @@ "groqApiKey": "Groq API-ключ", "getSambaNovaApiKey": "Получить SambaNova API-ключ", "sambaNovaApiKey": "SambaNova API-ключ", + "sapAiCoreBaseUrl": "Базовый URL SAP AI Core", + "sapAiCoreClientId": "ID клиента SAP AI Core", + "sapAiCoreClientSecret": "Секрет клиента SAP AI Core", + "sapAiCoreTokenUrl": "URL токена SAP AI Core", + "sapAiResourceGroup": "Группа ресурсов SAP AI", + "sapAiCoreFetchingModels": "Получение развернутых моделей SAP AI Core...", + "sapAiCoreFetchModelsFailed": "Не удалось получить развернутые модели:", + "sapAiCoreFetchModelsFailedGeneric": "Не удалось получить развернутые модели", + "sapAiCoreLearnMore": "Узнать больше", + "sapAiCoreDeployedModelsHeader": "Развернутые модели (готовы к использованию)", + "sapAiCoreSupportedModelsHeader": "Неразвернутые модели (требуют развертывания)", "getGeminiApiKey": "Получить Gemini API-ключ", "getHuggingFaceApiKey": "Получить Hugging Face API-ключ", "huggingFaceApiKey": "Hugging Face API-ключ", @@ -814,6 +829,10 @@ "providerNotAllowed": "Провайдер '{{provider}}' не разрешен вашей организацией", "modelNotAllowed": "Модель '{{model}}' не разрешена для провайдера '{{provider}}' вашей организацией", "profileInvalid": "Этот профиль содержит провайдера или модель, которые не разрешены вашей организацией", + "sapAiCoreBaseUrl": "Базовый URL SAP AI Core обязателен.", + "sapAiCoreClientId": "ID клиента SAP AI Core обязателен.", + "sapAiCoreClientSecret": "Секрет клиента SAP AI Core обязателен.", + "sapAiCoreTokenUrl": "URL токена SAP AI Core обязателен.", "qwenCodeOauthPath": "Вы должны указать допустимый путь к учетным данным OAuth" }, "placeholders": { @@ -827,6 +846,8 @@ "projectId": "Введите Project ID...", "customArn": "Введите ARN (например, arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)", "baseUrl": "Введите базовый URL...", + "clientId": "Введите ID клиента...", + "clientSecret": "Введите секрет клиента...", "modelId": { "lmStudio": "например, meta-llama-3.1-8b-instruct", "lmStudioDraft": "например, lmstudio-community/llama-3.2-1b-instruct", diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 0335802ba0..5e159c2c45 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -119,7 +119,11 @@ "vercelAiGatewayApiKeyRequired": "Vercel AI Gateway API anahtarı gereklidir", "ollamaBaseUrlRequired": "Ollama temel URL'si gereklidir", "baseUrlRequired": "Temel URL'si gereklidir", - "modelDimensionMinValue": "Model boyutu 0'dan büyük olmalıdır" + "modelDimensionMinValue": "Model boyutu 0'dan büyük olmalıdır", + "sapAiCoreBaseUrl": "SAP AI Core Temel URL gereklidir", + "sapAiCoreClientId": "SAP AI Core İstemci Kimliği gereklidir", + "sapAiCoreClientSecret": "SAP AI Core İstemci Sırrı gereklidir", + "sapAiCoreTokenUrl": "SAP AI Core Token URL gereklidir" }, "advancedConfigLabel": "Gelişmiş Yapılandırma", "searchMinScoreLabel": "Arama Skoru Eşiği", @@ -296,6 +300,17 @@ "groqApiKey": "Groq API Anahtarı", "getSambaNovaApiKey": "SambaNova API Anahtarı Al", "sambaNovaApiKey": "SambaNova API Anahtarı", + "sapAiCoreBaseUrl": "SAP AI Core Temel URL", + "sapAiCoreClientId": "SAP AI Core İstemci Kimliği", + "sapAiCoreClientSecret": "SAP AI Core İstemci Sırrı", + "sapAiCoreTokenUrl": "SAP AI Core Token URL", + "sapAiResourceGroup": "SAP AI Kaynak Grubu", + "sapAiCoreFetchingModels": "SAP AI Core dağıtılmış modelleri getiriliyor...", + "sapAiCoreFetchModelsFailed": "Dağıtılmış modeller getirilemedi:", + "sapAiCoreFetchModelsFailedGeneric": "Dağıtılmış modeller getirilemedi", + "sapAiCoreLearnMore": "Daha fazla bilgi", + "sapAiCoreDeployedModelsHeader": "Dağıtılmış Modeller (Kullanıma Hazır)", + "sapAiCoreSupportedModelsHeader": "Dağıtılmamış Modeller (Dağıtım Gerektirir)", "getHuggingFaceApiKey": "Hugging Face API Anahtarı Al", "huggingFaceApiKey": "Hugging Face API Anahtarı", "huggingFaceModelId": "Model ID", @@ -814,6 +829,10 @@ "providerNotAllowed": "Sağlayıcı '{{provider}}' kuruluşunuz tarafından izin verilmiyor", "modelNotAllowed": "Model '{{model}}' sağlayıcı '{{provider}}' için kuruluşunuz tarafından izin verilmiyor", "profileInvalid": "Bu profil, kuruluşunuz tarafından izin verilmeyen bir sağlayıcı veya model içeriyor", + "sapAiCoreBaseUrl": "SAP AI Core Temel URL gereklidir.", + "sapAiCoreClientId": "SAP AI Core İstemci Kimliği gereklidir.", + "sapAiCoreClientSecret": "SAP AI Core İstemci Sırrı gereklidir.", + "sapAiCoreTokenUrl": "SAP AI Core Token URL gereklidir.", "qwenCodeOauthPath": "Geçerli bir OAuth kimlik bilgileri yolu sağlamalısın" }, "placeholders": { @@ -827,6 +846,8 @@ "projectId": "Proje ID'sini girin...", "customArn": "ARN girin (örn. arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)", "baseUrl": "Temel URL'yi girin...", + "clientId": "İstemci Kimliğini Girin...", + "clientSecret": "İstemci Sırrını Girin...", "modelId": { "lmStudio": "örn. meta-llama-3.1-8b-instruct", "lmStudioDraft": "örn. lmstudio-community/llama-3.2-1b-instruct", diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index 92565cb85e..69d6751b09 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -119,7 +119,11 @@ "vercelAiGatewayApiKeyRequired": "Cần có khóa API Vercel AI Gateway", "ollamaBaseUrlRequired": "Yêu cầu URL cơ sở Ollama", "baseUrlRequired": "Yêu cầu URL cơ sở", - "modelDimensionMinValue": "Kích thước mô hình phải lớn hơn 0" + "modelDimensionMinValue": "Kích thước mô hình phải lớn hơn 0", + "sapAiCoreBaseUrl": "Yêu cầu URL cơ sở SAP AI Core.", + "sapAiCoreClientId": "Yêu cầu ID khách hàng SAP AI Core.", + "sapAiCoreClientSecret": "Yêu cầu bí mật khách hàng SAP AI Core.", + "sapAiCoreTokenUrl": "Yêu cầu URL token SAP AI Core." }, "advancedConfigLabel": "Cấu hình nâng cao", "searchMinScoreLabel": "Ngưỡng điểm tìm kiếm", @@ -296,6 +300,17 @@ "groqApiKey": "Khóa API Groq", "getSambaNovaApiKey": "Lấy khóa API SambaNova", "sambaNovaApiKey": "Khóa API SambaNova", + "sapAiCoreBaseUrl": "URL cơ sở SAP AI Core", + "sapAiCoreClientId": "ID khách hàng SAP AI Core", + "sapAiCoreClientSecret": "Bí mật khách hàng SAP AI Core", + "sapAiCoreTokenUrl": "URL token SAP AI Core", + "sapAiResourceGroup": "Nhóm tài nguyên SAP AI", + "sapAiCoreFetchingModels": "Đang lấy các mô hình đã triển khai từ SAP AI Core...", + "sapAiCoreFetchModelsFailed": "Không thể lấy các mô hình đã triển khai:", + "sapAiCoreFetchModelsFailedGeneric": "Không thể lấy các mô hình đã triển khai", + "sapAiCoreLearnMore": "Tìm hiểu thêm", + "sapAiCoreDeployedModelsHeader": "Các mô hình đã triển khai (sẵn sàng sử dụng)", + "sapAiCoreSupportedModelsHeader": "Các mô hình chưa triển khai (yêu cầu triển khai)", "getHuggingFaceApiKey": "Lấy Khóa API Hugging Face", "huggingFaceApiKey": "Khóa API Hugging Face", "huggingFaceModelId": "ID Mô hình", @@ -814,11 +829,17 @@ "providerNotAllowed": "Nhà cung cấp '{{provider}}' không được phép bởi tổ chức của bạn", "modelNotAllowed": "Mô hình '{{model}}' không được phép cho nhà cung cấp '{{provider}}' bởi tổ chức của bạn", "profileInvalid": "Hồ sơ này chứa một nhà cung cấp hoặc mô hình không được phép bởi tổ chức của bạn", - "qwenCodeOauthPath": "Bạn phải cung cấp đường dẫn thông tin xác thực OAuth hợp lệ" + "qwenCodeOauthPath": "Bạn phải cung cấp đường dẫn thông tin xác thực OAuth hợp lệ", + "sapAiCoreBaseUrl": "Yêu cầu URL cơ sở SAP AI Core", + "sapAiCoreClientId": "Yêu cầu ID khách hàng SAP AI Core", + "sapAiCoreClientSecret": "Yêu cầu bí mật khách hàng SAP AI Core", + "sapAiCoreTokenUrl": "Yêu cầu URL token SAP AI Core" }, "placeholders": { "apiKey": "Nhập khóa API...", "profileName": "Nhập tên hồ sơ", + "clientId": "Nhập ID khách hàng...", + "clientSecret": "Nhập bí mật khách hàng...", "accessKey": "Nhập khóa truy cập...", "secretKey": "Nhập khóa bí mật...", "sessionToken": "Nhập token phiên...", diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index 9a3879523d..fc1ef8bd07 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -119,7 +119,11 @@ "vercelAiGatewayApiKeyRequired": "需要 Vercel AI Gateway API 密钥", "ollamaBaseUrlRequired": "需要 Ollama 基础 URL", "baseUrlRequired": "需要基础 URL", - "modelDimensionMinValue": "模型维度必须大于 0" + "modelDimensionMinValue": "模型维度必须大于 0", + "sapAiCoreBaseUrl": "需要 SAP AI Core 基础 URL。", + "sapAiCoreClientId": "需要 SAP AI Core 客户端 ID。", + "sapAiCoreClientSecret": "需要 SAP AI Core 客户端密钥。", + "sapAiCoreTokenUrl": "需要 SAP AI Core 令牌 URL。" }, "advancedConfigLabel": "高级配置", "searchMinScoreLabel": "搜索分数阈值", @@ -296,6 +300,17 @@ "groqApiKey": "Groq API 密钥", "getSambaNovaApiKey": "获取 SambaNova API 密钥", "sambaNovaApiKey": "SambaNova API 密钥", + "sapAiCoreBaseUrl": "SAP AI Core 基础 URL", + "sapAiCoreClientId": "SAP AI Core 客户端 ID", + "sapAiCoreClientSecret": "SAP AI Core 客户端密钥", + "sapAiCoreTokenUrl": "SAP AI Core 令牌 URL", + "sapAiResourceGroup": "SAP AI 资源组", + "sapAiCoreFetchingModels": "正在从 SAP AI Core 获取已部署的模型...", + "sapAiCoreFetchModelsFailed": "获取已部署模型失败:", + "sapAiCoreFetchModelsFailedGeneric": "获取已部署模型失败", + "sapAiCoreLearnMore": "了解更多", + "sapAiCoreDeployedModelsHeader": "已部署模型(可用)", + "sapAiCoreSupportedModelsHeader": "未部署模型(需要部署)", "getHuggingFaceApiKey": "获取 Hugging Face API 密钥", "huggingFaceApiKey": "Hugging Face API 密钥", "huggingFaceModelId": "模型 ID", @@ -806,6 +821,10 @@ "modelId": "您必须提供有效的模型 ID。", "modelSelector": "您必须提供有效的模型选择器。", "openAi": "您必须提供有效的基础 URL、API 密钥和模型 ID。", + "sapAiCoreBaseUrl": "需要 SAP AI Core 基础 URL", + "sapAiCoreClientId": "需要 SAP AI Core 客户端 ID", + "sapAiCoreClientSecret": "需要 SAP AI Core 客户端密钥", + "sapAiCoreTokenUrl": "需要 SAP AI Core 令牌 URL", "arn": { "invalidFormat": "ARN 格式无效。请检查格式要求。", "regionMismatch": "警告:您的 ARN 中的区域 ({{arnRegion}}) 与您选择的区域 ({{region}}) 不匹配。这可能会导致访问问题。提供程序将使用 ARN 中的区域。" @@ -827,6 +846,8 @@ "projectId": "请输入项目 ID...", "customArn": "请输入 ARN(例:arn:aws:bedrock:us-east-1:123456789012:foundation-model/my-model)", "baseUrl": "请输入基础 URL...", + "clientId": "请输入客户端 ID...", + "clientSecret": "请输入客户端密钥...", "modelId": { "lmStudio": "例:meta-llama-3.1-8b-instruct", "lmStudioDraft": "例:lmstudio-community/llama-3.2-1b-instruct", diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index b8ade3ca8b..467ba0dd7a 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -119,7 +119,11 @@ "vercelAiGatewayApiKeyRequired": "需要 Vercel AI Gateway API 金鑰", "ollamaBaseUrlRequired": "需要 Ollama 基礎 URL", "baseUrlRequired": "需要基礎 URL", - "modelDimensionMinValue": "模型維度必須大於 0" + "modelDimensionMinValue": "模型維度必須大於 0", + "sapAiCoreBaseUrl": "需要 SAP AI Core 基礎 URL", + "sapAiCoreClientId": "需要 SAP AI Core 客戶端 ID", + "sapAiCoreClientSecret": "需要 SAP AI Core 客戶端密鑰", + "sapAiCoreTokenUrl": "需要 SAP AI Core Token URL" }, "advancedConfigLabel": "進階設定", "searchMinScoreLabel": "搜尋分數閾值", @@ -296,6 +300,17 @@ "groqApiKey": "Groq API 金鑰", "getSambaNovaApiKey": "取得 SambaNova API 金鑰", "sambaNovaApiKey": "SambaNova API 金鑰", + "sapAiCoreBaseUrl": "SAP AI Core 基礎 URL", + "sapAiCoreClientId": "SAP AI Core 客戶端 ID", + "sapAiCoreClientSecret": "SAP AI Core 客戶端密鑰", + "sapAiCoreTokenUrl": "SAP AI Core Token URL", + "sapAiResourceGroup": "SAP AI 資源群組", + "sapAiCoreFetchingModels": "正在從 SAP AI Core 取得已部署的模型...", + "sapAiCoreFetchModelsFailed": "無法取得已部署的模型:", + "sapAiCoreFetchModelsFailedGeneric": "無法取得已部署的模型", + "sapAiCoreLearnMore": "了解更多", + "sapAiCoreDeployedModelsHeader": "已部署的模型(可立即使用)", + "sapAiCoreSupportedModelsHeader": "未部署的模型(需要部署)", "getHuggingFaceApiKey": "取得 Hugging Face API 金鑰", "huggingFaceApiKey": "Hugging Face API 金鑰", "huggingFaceModelId": "模型 ID", @@ -814,11 +829,17 @@ "providerNotAllowed": "供應商 '{{provider}}' 不允許用於您的組織。", "modelNotAllowed": "模型 '{{model}}' 不允許用於供應商 '{{provider}}',您的組織不允許", "profileInvalid": "此設定檔包含您的組織不允許的供應商或模型", - "qwenCodeOauthPath": "您必須提供有效的 OAuth 憑證路徑" + "qwenCodeOauthPath": "您必須提供有效的 OAuth 憑證路徑", + "sapAiCoreBaseUrl": "需要 SAP AI Core 基礎 URL", + "sapAiCoreClientId": "需要 SAP AI Core 客戶端 ID", + "sapAiCoreClientSecret": "需要 SAP AI Core 客戶端密鑰", + "sapAiCoreTokenUrl": "需要 SAP AI Core Token URL" }, "placeholders": { "apiKey": "請輸入 API 金鑰...", "profileName": "請輸入設定檔名稱", + "clientId": "請輸入客戶端 ID...", + "clientSecret": "請輸入客戶端密鑰...", "accessKey": "請輸入存取金鑰...", "secretKey": "請輸入金鑰...", "sessionToken": "請輸入工作階段權杖...", diff --git a/webview-ui/src/utils/__tests__/validate.test.ts b/webview-ui/src/utils/__tests__/validate.test.ts index 30ccfd4463..dbc330508b 100644 --- a/webview-ui/src/utils/__tests__/validate.test.ts +++ b/webview-ui/src/utils/__tests__/validate.test.ts @@ -1,7 +1,7 @@ import type { ProviderSettings } from "@roo-code/types" import type { OrganizationAllowList } from "@roo/cloud" -import { RouterModels } from "@roo/api" +import type { RouterModels } from "@roo/api" import { getModelValidationError, validateApiConfigurationExcludingModelErrors } from "../validate" @@ -211,3 +211,34 @@ describe("Model Validation Functions", () => { }) }) }) + +describe("SAP AI Core validation", () => { + it("returns error for missing modelId when SAP AI Core credentials are provided", () => { + const config: ProviderSettings = { + apiProvider: "sapaicore", + sapAiCoreBaseUrl: "https://aicore.example.com", + sapAiCoreClientId: "client-id", + sapAiCoreClientSecret: "client-secret", + sapAiCoreTokenUrl: "https://auth.example.com/oauth/token", + // apiModelId intentionally missing + } + + const result = validateApiConfigurationExcludingModelErrors(config, undefined, undefined) + // i18n returns un-namespaced keys in tests (e.g., "validation.modelId") + expect(result).toBe("validation.modelId") + }) + + it("returns undefined when SAP AI Core credentials and modelId are provided", () => { + const config: ProviderSettings = { + apiProvider: "sapaicore", + sapAiCoreBaseUrl: "https://aicore.example.com", + sapAiCoreClientId: "client-id", + sapAiCoreClientSecret: "client-secret", + sapAiCoreTokenUrl: "https://auth.example.com/oauth/token", + apiModelId: "anthropic--claude-3.5-sonnet", + } + + const result = validateApiConfigurationExcludingModelErrors(config, undefined, undefined) + expect(result).toBeUndefined() + }) +}) diff --git a/webview-ui/src/utils/validate.ts b/webview-ui/src/utils/validate.ts index 5613eb9eb8..507878c16f 100644 --- a/webview-ui/src/utils/validate.ts +++ b/webview-ui/src/utils/validate.ts @@ -3,7 +3,8 @@ import i18next from "i18next" import type { ProviderSettings } from "@roo-code/types" import type { OrganizationAllowList } from "@roo/cloud" -import { isRouterName, RouterModels } from "@roo/api" +import { isRouterName } from "@roo/api" +import type { RouterModels } from "@roo/api" export function validateApiConfiguration( apiConfiguration: ProviderSettings, @@ -131,6 +132,23 @@ function validateModelsAndKeysProvided(apiConfiguration: ProviderSettings): stri return i18next.t("settings:validation.apiKey") } break + case "sapaicore": + if (!apiConfiguration.sapAiCoreBaseUrl) { + return i18next.t("settings:validation.sapAiCoreBaseUrl") + } + if (!apiConfiguration.sapAiCoreClientId) { + return i18next.t("settings:validation.sapAiCoreClientId") + } + if (!apiConfiguration.sapAiCoreClientSecret) { + return i18next.t("settings:validation.sapAiCoreClientSecret") + } + if (!apiConfiguration.sapAiCoreTokenUrl) { + return i18next.t("settings:validation.sapAiCoreTokenUrl") + } + if (!apiConfiguration.apiModelId) { + return i18next.t("settings:validation.modelId") + } + break case "qwen-code": if (!apiConfiguration.qwenCodeOauthPath) { return i18next.t("settings:validation.qwenCodeOauthPath")