Skip to content

Commit 1018b88

Browse files
Add IO Intelligence Provider (#6875)
Co-authored-by: daniel-lxs <[email protected]>
1 parent bce579f commit 1018b88

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+850
-4
lines changed

packages/types/src/global-settings.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -194,6 +194,7 @@ export const SECRET_STATE_KEYS = [
194194
"huggingFaceApiKey",
195195
"sambaNovaApiKey",
196196
"fireworksApiKey",
197+
"ioIntelligenceApiKey",
197198
] as const satisfies readonly (keyof ProviderSettings)[]
198199
export type SecretState = Pick<ProviderSettings, (typeof SECRET_STATE_KEYS)[number]>
199200

packages/types/src/provider-settings.ts

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ export const providerNames = [
4343
"sambanova",
4444
"zai",
4545
"fireworks",
46+
"io-intelligence",
4647
] as const
4748

4849
export const providerNamesSchema = z.enum(providerNames)
@@ -276,6 +277,11 @@ const fireworksSchema = apiModelIdProviderModelSchema.extend({
276277
fireworksApiKey: z.string().optional(),
277278
})
278279

280+
const ioIntelligenceSchema = apiModelIdProviderModelSchema.extend({
281+
ioIntelligenceModelId: z.string().optional(),
282+
ioIntelligenceApiKey: z.string().optional(),
283+
})
284+
279285
const defaultSchema = z.object({
280286
apiProvider: z.undefined(),
281287
})
@@ -311,6 +317,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv
311317
sambaNovaSchema.merge(z.object({ apiProvider: z.literal("sambanova") })),
312318
zaiSchema.merge(z.object({ apiProvider: z.literal("zai") })),
313319
fireworksSchema.merge(z.object({ apiProvider: z.literal("fireworks") })),
320+
ioIntelligenceSchema.merge(z.object({ apiProvider: z.literal("io-intelligence") })),
314321
defaultSchema,
315322
])
316323

@@ -346,6 +353,7 @@ export const providerSettingsSchema = z.object({
346353
...sambaNovaSchema.shape,
347354
...zaiSchema.shape,
348355
...fireworksSchema.shape,
356+
...ioIntelligenceSchema.shape,
349357
...codebaseIndexProviderSchema.shape,
350358
})
351359

@@ -371,6 +379,7 @@ export const MODEL_ID_KEYS: Partial<keyof ProviderSettings>[] = [
371379
"requestyModelId",
372380
"litellmModelId",
373381
"huggingFaceModelId",
382+
"ioIntelligenceModelId",
374383
]
375384

376385
export const getModelId = (settings: ProviderSettings): string | undefined => {

packages/types/src/providers/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ export * from "./gemini.js"
88
export * from "./glama.js"
99
export * from "./groq.js"
1010
export * from "./huggingface.js"
11+
export * from "./io-intelligence.js"
1112
export * from "./lite-llm.js"
1213
export * from "./lm-studio.js"
1314
export * from "./mistral.js"
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
import type { ModelInfo } from "../model.js"
2+
3+
export type IOIntelligenceModelId =
4+
| "deepseek-ai/DeepSeek-R1-0528"
5+
| "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8"
6+
| "Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar"
7+
| "openai/gpt-oss-120b"
8+
9+
export const ioIntelligenceDefaultModelId: IOIntelligenceModelId = "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8"
10+
11+
export const ioIntelligenceDefaultBaseUrl = "https://api.intelligence.io.solutions/api/v1"
12+
13+
export const IO_INTELLIGENCE_CACHE_DURATION = 1000 * 60 * 60 // 1 hour
14+
15+
export const ioIntelligenceModels = {
16+
"deepseek-ai/DeepSeek-R1-0528": {
17+
maxTokens: 8192,
18+
contextWindow: 128000,
19+
supportsImages: false,
20+
supportsPromptCache: false,
21+
description: "DeepSeek R1 reasoning model",
22+
},
23+
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8": {
24+
maxTokens: 8192,
25+
contextWindow: 430000,
26+
supportsImages: true,
27+
supportsPromptCache: false,
28+
description: "Llama 4 Maverick 17B model",
29+
},
30+
"Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar": {
31+
maxTokens: 8192,
32+
contextWindow: 106000,
33+
supportsImages: false,
34+
supportsPromptCache: false,
35+
description: "Qwen3 Coder 480B specialized for coding",
36+
},
37+
"openai/gpt-oss-120b": {
38+
maxTokens: 8192,
39+
contextWindow: 131072,
40+
supportsImages: false,
41+
supportsPromptCache: false,
42+
description: "OpenAI GPT-OSS 120B model",
43+
},
44+
} as const satisfies Record<string, ModelInfo>

src/api/index.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ import {
3232
LiteLLMHandler,
3333
ClaudeCodeHandler,
3434
SambaNovaHandler,
35+
IOIntelligenceHandler,
3536
DoubaoHandler,
3637
ZAiHandler,
3738
FireworksHandler,
@@ -137,6 +138,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
137138
return new ZAiHandler(options)
138139
case "fireworks":
139140
return new FireworksHandler(options)
141+
case "io-intelligence":
142+
return new IOIntelligenceHandler(options)
140143
default:
141144
apiProvider satisfies "gemini-cli" | undefined
142145
return new AnthropicHandler(options)

0 commit comments

Comments
 (0)