Skip to content

Commit 6d5d3a7

Browse files
committed
feat: Add SAP AI Core provider integration
- Add comprehensive SAP AI Core provider - Implement authentication using OAuth 2.0 client credentials flow - Add deployment detection and model mapping for SAP AI Core services - Include streaming support for real-time responses - Add comprehensive test coverage for all provider functionality - Implement UI components for SAP AI Core configuration and model selection - Add model picker with deployment status indication - Include validation for required configuration fields - Add internationalization support for SAP AI Core settings - Integrate with existing provider architecture and settings system This enables users to connect to SAP AI Core's enterprise AI platform, supporting both Azure OpenAI and GCP Vertex AI model deployments with proper authentication and deployment management. Aligns with roadmap goals: - Enhanced AI provider support for enterprise platforms - Reliable authentication and connection handling - Improved user experience with clear deployment status
1 parent 0c481a3 commit 6d5d3a7

File tree

25 files changed

+2723
-5
lines changed

25 files changed

+2723
-5
lines changed

packages/types/src/provider-settings.ts

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ import {
2020
openAiNativeModels,
2121
rooModels,
2222
sambaNovaModels,
23+
sapAiCoreModels,
2324
vertexModels,
2425
vscodeLlmModels,
2526
xaiModels,
@@ -64,6 +65,7 @@ export const providerNames = [
6465
"featherless",
6566
"io-intelligence",
6667
"roo",
68+
"sapaicore",
6769
] as const
6870

6971
export const providerNamesSchema = z.enum(providerNames)
@@ -315,6 +317,16 @@ const rooSchema = apiModelIdProviderModelSchema.extend({
315317
// No additional fields needed - uses cloud authentication
316318
})
317319

320+
const sapAiCoreSchema = apiModelIdProviderModelSchema.extend({
321+
sapAiCoreClientId: z.string().optional(),
322+
sapAiCoreClientSecret: z.string().optional(),
323+
sapAiCoreTokenUrl: z.string().optional(),
324+
sapAiResourceGroup: z.string().optional(),
325+
sapAiCoreBaseUrl: z.string().optional(),
326+
reasoningEffort: z.string().optional(),
327+
thinkingBudgetTokens: z.number().optional(),
328+
})
329+
318330
const defaultSchema = z.object({
319331
apiProvider: z.undefined(),
320332
})
@@ -353,6 +365,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv
353365
featherlessSchema.merge(z.object({ apiProvider: z.literal("featherless") })),
354366
ioIntelligenceSchema.merge(z.object({ apiProvider: z.literal("io-intelligence") })),
355367
rooSchema.merge(z.object({ apiProvider: z.literal("roo") })),
368+
sapAiCoreSchema.merge(z.object({ apiProvider: z.literal("sapaicore") })),
356369
defaultSchema,
357370
])
358371

@@ -391,6 +404,7 @@ export const providerSettingsSchema = z.object({
391404
...featherlessSchema.shape,
392405
...ioIntelligenceSchema.shape,
393406
...rooSchema.shape,
407+
...sapAiCoreSchema.shape,
394408
...codebaseIndexProviderSchema.shape,
395409
})
396410

@@ -512,6 +526,11 @@ export const MODELS_BY_PROVIDER: Record<
512526
label: "SambaNova",
513527
models: Object.keys(sambaNovaModels),
514528
},
529+
sapaicore: {
530+
id: "sapaicore",
531+
label: "SAP AI Core",
532+
models: Object.keys(sapAiCoreModels),
533+
},
515534
vertex: {
516535
id: "vertex",
517536
label: "GCP Vertex AI",

packages/types/src/providers/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ export * from "./openrouter.js"
2222
export * from "./requesty.js"
2323
export * from "./roo.js"
2424
export * from "./sambanova.js"
25+
export * from "./sapaicore.js"
2526
export * from "./unbound.js"
2627
export * from "./vertex.js"
2728
export * from "./vscode-llm.js"
Lines changed: 187 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,187 @@
1+
import type { ModelInfo } from "../model.js"
2+
3+
export type SapAiCoreModelId = keyof typeof sapAiCoreModels
4+
5+
export const sapAiCoreDefaultModelId: SapAiCoreModelId = "anthropic--claude-3.5-sonnet"
6+
7+
export const sapAiCoreModels = {
8+
// Anthropic models
9+
"anthropic--claude-4-sonnet": {
10+
maxTokens: 8192,
11+
contextWindow: 2000000,
12+
supportsImages: true,
13+
supportsPromptCache: true,
14+
inputPrice: 5.0,
15+
outputPrice: 15.0,
16+
cacheReadsPrice: 0.5,
17+
cacheWritesPrice: 7.5,
18+
},
19+
"anthropic--claude-4-opus": {
20+
maxTokens: 8192,
21+
contextWindow: 2000000,
22+
supportsImages: true,
23+
supportsPromptCache: true,
24+
inputPrice: 20.0,
25+
outputPrice: 60.0,
26+
cacheReadsPrice: 2.0,
27+
cacheWritesPrice: 30.0,
28+
},
29+
"anthropic--claude-3.7-sonnet": {
30+
maxTokens: 8192,
31+
contextWindow: 1000000,
32+
supportsImages: true,
33+
supportsPromptCache: true,
34+
inputPrice: 3.0,
35+
outputPrice: 15.0,
36+
cacheReadsPrice: 0.3,
37+
cacheWritesPrice: 4.5,
38+
},
39+
"anthropic--claude-3.5-sonnet": {
40+
maxTokens: 8192,
41+
contextWindow: 200000,
42+
supportsImages: true,
43+
supportsPromptCache: false,
44+
inputPrice: 3.0,
45+
outputPrice: 15.0,
46+
},
47+
"anthropic--claude-3-sonnet": {
48+
maxTokens: 4096,
49+
contextWindow: 200000,
50+
supportsImages: true,
51+
supportsPromptCache: false,
52+
inputPrice: 3.0,
53+
outputPrice: 15.0,
54+
},
55+
"anthropic--claude-3-haiku": {
56+
maxTokens: 4096,
57+
contextWindow: 200000,
58+
supportsImages: true,
59+
supportsPromptCache: false,
60+
inputPrice: 0.25,
61+
outputPrice: 1.25,
62+
},
63+
"anthropic--claude-3-opus": {
64+
maxTokens: 4096,
65+
contextWindow: 200000,
66+
supportsImages: true,
67+
supportsPromptCache: false,
68+
inputPrice: 15.0,
69+
outputPrice: 75.0,
70+
},
71+
// OpenAI models
72+
"gpt-4o": {
73+
maxTokens: 16384,
74+
contextWindow: 128000,
75+
supportsImages: true,
76+
supportsPromptCache: false,
77+
inputPrice: 5.0,
78+
outputPrice: 15.0,
79+
},
80+
"gpt-4": {
81+
maxTokens: 8192,
82+
contextWindow: 8192,
83+
supportsImages: false,
84+
supportsPromptCache: false,
85+
inputPrice: 30.0,
86+
outputPrice: 60.0,
87+
},
88+
"gpt-4o-mini": {
89+
maxTokens: 16384,
90+
contextWindow: 128000,
91+
supportsImages: true,
92+
supportsPromptCache: false,
93+
inputPrice: 0.15,
94+
outputPrice: 0.6,
95+
},
96+
o1: {
97+
maxTokens: 100000,
98+
contextWindow: 200000,
99+
supportsImages: false,
100+
supportsPromptCache: false,
101+
inputPrice: 15.0,
102+
outputPrice: 60.0,
103+
},
104+
"gpt-4.1": {
105+
maxTokens: 32768,
106+
contextWindow: 128000,
107+
supportsImages: true,
108+
supportsPromptCache: false,
109+
inputPrice: 5.0,
110+
outputPrice: 15.0,
111+
},
112+
"gpt-4.1-nano": {
113+
maxTokens: 16384,
114+
contextWindow: 128000,
115+
supportsImages: true,
116+
supportsPromptCache: false,
117+
inputPrice: 0.15,
118+
outputPrice: 0.6,
119+
},
120+
"gpt-5": {
121+
maxTokens: 32768,
122+
contextWindow: 256000,
123+
supportsImages: true,
124+
supportsPromptCache: false,
125+
inputPrice: 10.0,
126+
outputPrice: 30.0,
127+
},
128+
"gpt-5-nano": {
129+
maxTokens: 16384,
130+
contextWindow: 256000,
131+
supportsImages: true,
132+
supportsPromptCache: false,
133+
inputPrice: 0.5,
134+
outputPrice: 1.5,
135+
},
136+
"gpt-5-mini": {
137+
maxTokens: 16384,
138+
contextWindow: 256000,
139+
supportsImages: true,
140+
supportsPromptCache: false,
141+
inputPrice: 1.0,
142+
outputPrice: 3.0,
143+
},
144+
"o3-mini": {
145+
maxTokens: 100000,
146+
contextWindow: 200000,
147+
supportsImages: false,
148+
supportsPromptCache: false,
149+
inputPrice: 1.0,
150+
outputPrice: 4.0,
151+
},
152+
o3: {
153+
maxTokens: 100000,
154+
contextWindow: 200000,
155+
supportsImages: false,
156+
supportsPromptCache: false,
157+
inputPrice: 60.0,
158+
outputPrice: 240.0,
159+
},
160+
"o4-mini": {
161+
maxTokens: 100000,
162+
contextWindow: 200000,
163+
supportsImages: false,
164+
supportsPromptCache: false,
165+
inputPrice: 2.0,
166+
outputPrice: 8.0,
167+
},
168+
// Gemini models
169+
"gemini-2.5-flash": {
170+
maxTokens: 8192,
171+
contextWindow: 1000000,
172+
supportsImages: true,
173+
supportsPromptCache: false,
174+
inputPrice: 0.5,
175+
outputPrice: 1.5,
176+
maxThinkingTokens: 32768,
177+
},
178+
"gemini-2.5-pro": {
179+
maxTokens: 8192,
180+
contextWindow: 2000000,
181+
supportsImages: true,
182+
supportsPromptCache: false,
183+
inputPrice: 2.5,
184+
outputPrice: 10.0,
185+
maxThinkingTokens: 65536,
186+
},
187+
} satisfies Record<string, ModelInfo>

src/api/index.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ import {
3737
FireworksHandler,
3838
RooHandler,
3939
FeatherlessHandler,
40+
SapAiCoreHandler,
4041
} from "./providers"
4142
import { NativeOllamaHandler } from "./providers/native-ollama"
4243

@@ -148,6 +149,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
148149
return new RooHandler(options)
149150
case "featherless":
150151
return new FeatherlessHandler(options)
152+
case "sapaicore":
153+
return new SapAiCoreHandler(options)
151154
default:
152155
apiProvider satisfies "gemini-cli" | undefined
153156
return new AnthropicHandler(options)

0 commit comments

Comments
 (0)