Skip to content

Commit 5e5c5d5

Browse files
committed
feat(api): add cognima provider support
Add support for the Cognima AI provider, including API key handling, model fetching, and handler integration across the codebase.
1 parent 7926dcf commit 5e5c5d5

File tree

10 files changed

+231
-0
lines changed

10 files changed

+231
-0
lines changed

packages/types/src/global-settings.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -214,6 +214,7 @@ export const SECRET_STATE_KEYS = [
214214
"chutesApiKey",
215215
"litellmApiKey",
216216
"deepInfraApiKey",
217+
"cognimaApiKey",
217218
"codeIndexOpenAiKey",
218219
"codeIndexQdrantApiKey",
219220
"codebaseIndexOpenAiCompatibleApiKey",

packages/types/src/provider-settings.ts

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ export const DEFAULT_CONSECUTIVE_MISTAKE_LIMIT = 3
4040

4141
export const dynamicProviders = [
4242
"openrouter",
43+
"cognima",
4344
"vercel-ai-gateway",
4445
"huggingface",
4546
"litellm",
@@ -122,6 +123,7 @@ export const providerNames = [
122123
"cerebras",
123124
"chutes",
124125
"claude-code",
126+
"cognima",
125127
"doubao",
126128
"deepseek",
127129
"featherless",
@@ -352,6 +354,10 @@ const groqSchema = apiModelIdProviderModelSchema.extend({
352354
groqApiKey: z.string().optional(),
353355
})
354356

357+
const cognimaSchema = apiModelIdProviderModelSchema.extend({
358+
cognimaApiKey: z.string().optional(),
359+
})
360+
355361
const huggingFaceSchema = baseProviderSettingsSchema.extend({
356362
huggingFaceApiKey: z.string().optional(),
357363
huggingFaceModelId: z.string().optional(),
@@ -441,6 +447,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv
441447
fakeAiSchema.merge(z.object({ apiProvider: z.literal("fake-ai") })),
442448
xaiSchema.merge(z.object({ apiProvider: z.literal("xai") })),
443449
groqSchema.merge(z.object({ apiProvider: z.literal("groq") })),
450+
cognimaSchema.merge(z.object({ apiProvider: z.literal("cognima") })),
444451
huggingFaceSchema.merge(z.object({ apiProvider: z.literal("huggingface") })),
445452
chutesSchema.merge(z.object({ apiProvider: z.literal("chutes") })),
446453
litellmSchema.merge(z.object({ apiProvider: z.literal("litellm") })),
@@ -482,6 +489,7 @@ export const providerSettingsSchema = z.object({
482489
...fakeAiSchema.shape,
483490
...xaiSchema.shape,
484491
...groqSchema.shape,
492+
...cognimaSchema.shape,
485493
...huggingFaceSchema.shape,
486494
...chutesSchema.shape,
487495
...litellmSchema.shape,
@@ -568,6 +576,7 @@ export const modelIdKeysByProvider: Record<TypicalProvider, ModelIdKey> = {
568576
requesty: "requestyModelId",
569577
xai: "apiModelId",
570578
groq: "apiModelId",
579+
cognima: "apiModelId",
571580
chutes: "apiModelId",
572581
litellm: "litellmModelId",
573582
huggingface: "huggingFaceModelId",
@@ -661,6 +670,7 @@ export const MODELS_BY_PROVIDER: Record<
661670
models: Object.keys(geminiModels),
662671
},
663672
groq: { id: "groq", label: "Groq", models: Object.keys(groqModels) },
673+
"cognima": { id: "cognima", label: "Cognima", models: [] },
664674
"io-intelligence": {
665675
id: "io-intelligence",
666676
label: "IO Intelligence",
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
import type { ModelInfo } from "../model.js"
2+
3+
export type CognimaModelId = string
4+
5+
export const cognimaDefaultModelId: CognimaModelId = "gpt-4o"

packages/types/src/providers/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ export * from "./anthropic.js"
22
export * from "./bedrock.js"
33
export * from "./cerebras.js"
44
export * from "./chutes.js"
5+
export * from "./cognima.js"
56
export * from "./claude-code.js"
67
export * from "./deepseek.js"
78
export * from "./doubao.js"

src/api/index.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ import {
99
AnthropicHandler,
1010
AwsBedrockHandler,
1111
CerebrasHandler,
12+
CognimaHandler,
1213
OpenRouterHandler,
1314
VertexHandler,
1415
AnthropicVertexHandler,
@@ -139,6 +140,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
139140
return new XAIHandler(options)
140141
case "groq":
141142
return new GroqHandler(options)
143+
case "cognima":
144+
return new CognimaHandler(options)
142145
case "deepinfra":
143146
return new DeepInfraHandler(options)
144147
case "huggingface":

src/api/providers/cognima.ts

Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
import { Anthropic } from "@anthropic-ai/sdk"
2+
import OpenAI from "openai"
3+
4+
import { type CognimaModelId, cognimaDefaultModelId } from "@roo-code/types"
5+
6+
import type { ApiHandlerOptions } from "../../shared/api"
7+
8+
import { convertToOpenAiMessages } from "../transform/openai-format"
9+
import { ApiStreamChunk } from "../transform/stream"
10+
import { RouterProvider } from "./router-provider"
11+
import { handleOpenAIError } from "./utils/openai-error-handler"
12+
13+
export class CognimaHandler extends RouterProvider {
14+
private readonly providerName = "Cognima"
15+
16+
constructor(options: ApiHandlerOptions) {
17+
super({
18+
options,
19+
name: "cognima",
20+
baseURL: "https://cog2.cognima.com.br/openai/v1",
21+
apiKey: options.cognimaApiKey,
22+
modelId: options.cognimaModelId,
23+
defaultModelId: cognimaDefaultModelId,
24+
defaultModelInfo: {
25+
maxTokens: 16384,
26+
contextWindow: 128000,
27+
supportsImages: true,
28+
supportsPromptCache: false,
29+
inputPrice: 2.5,
30+
outputPrice: 10,
31+
supportsTemperature: true,
32+
},
33+
})
34+
}
35+
36+
override async *createMessage(
37+
systemPrompt: string,
38+
messages: Anthropic.Messages.MessageParam[],
39+
): AsyncGenerator<ApiStreamChunk> {
40+
const model = await this.fetchModel()
41+
const modelId = model.id
42+
const maxTokens = model.info.maxTokens
43+
const temperature = 0 // Default temperature
44+
45+
// Convert Anthropic messages to OpenAI format
46+
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
47+
{ role: "system", content: systemPrompt },
48+
...convertToOpenAiMessages(messages),
49+
]
50+
51+
const completionParams: OpenAI.Chat.ChatCompletionCreateParams = {
52+
model: modelId,
53+
...(maxTokens && maxTokens > 0 && { max_tokens: maxTokens }),
54+
temperature,
55+
messages: openAiMessages,
56+
stream: true,
57+
stream_options: { include_usage: true },
58+
}
59+
60+
let stream
61+
try {
62+
stream = await this.client.chat.completions.create(completionParams)
63+
} catch (error) {
64+
throw handleOpenAIError(error, this.providerName)
65+
}
66+
67+
for await (const chunk of stream) {
68+
// Handle OpenAI error responses
69+
if ("error" in chunk) {
70+
const error = chunk.error as { message?: string; code?: number }
71+
console.error(`Cognima API Error: ${error?.code} - ${error?.message}`)
72+
throw new Error(`Cognima API Error ${error?.code}: ${error?.message}`)
73+
}
74+
75+
const delta = chunk.choices[0]?.delta
76+
77+
if (delta?.content) {
78+
yield { type: "text", text: delta.content }
79+
}
80+
81+
if (chunk.usage) {
82+
const usage = chunk.usage
83+
yield {
84+
type: "usage",
85+
inputTokens: usage.prompt_tokens || 0,
86+
outputTokens: usage.completion_tokens || 0,
87+
totalCost: 0, // Cognima doesn't provide cost info in usage
88+
}
89+
}
90+
}
91+
}
92+
}
Lines changed: 113 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,113 @@
1+
import axios from "axios"
2+
import { z } from "zod"
3+
4+
import type { ModelInfo } from "@roo-code/types"
5+
6+
import { parseApiPrice } from "../../../shared/cost"
7+
8+
/**
9+
* CognimaModel
10+
*/
11+
12+
const cognimaModelSchema = z.object({
13+
id: z.string(),
14+
owned_by: z.string(),
15+
object: z.string(),
16+
created: z.number().optional(),
17+
updated: z.number().optional(),
18+
})
19+
20+
export type CognimaModel = z.infer<typeof cognimaModelSchema>
21+
22+
/**
23+
* CognimaModelsResponse
24+
*/
25+
26+
const cognimaModelsResponseSchema = z.object({
27+
data: z.array(cognimaModelSchema),
28+
object: z.string(),
29+
})
30+
31+
type CognimaModelsResponse = z.infer<typeof cognimaModelsResponseSchema>
32+
33+
/**
34+
* getCognimaModels
35+
*/
36+
37+
export async function getCognimaModels(apiKey?: string, baseUrl?: string): Promise<Record<string, ModelInfo>> {
38+
const models: Record<string, ModelInfo> = {}
39+
const baseURL = baseUrl || "https://cog2.cognima.com.br/openai/v1"
40+
41+
try {
42+
const response = await axios.get<CognimaModelsResponse>(`${baseURL}/models`, {
43+
headers: {
44+
Authorization: `Bearer ${apiKey || "not-provided"}`,
45+
"Content-Type": "application/json",
46+
},
47+
})
48+
49+
const result = cognimaModelsResponseSchema.safeParse(response.data)
50+
const data = result.success ? result.data.data : response.data.data
51+
52+
if (!result.success) {
53+
console.error("Cognima models response is invalid", result.error.format())
54+
}
55+
56+
for (const model of data) {
57+
models[model.id] = parseCognimaModel(model)
58+
}
59+
} catch (error) {
60+
console.error(
61+
`Error fetching Cognima models: ${JSON.stringify(error, Object.getOwnPropertyNames(error), 2)}`,
62+
)
63+
}
64+
65+
return models
66+
}
67+
68+
/**
69+
* parseCognimaModel
70+
*/
71+
72+
const parseCognimaModel = (model: CognimaModel): ModelInfo => {
73+
// Provide basic ModelInfo with default values since Cognima API doesn't provide detailed pricing/info
74+
// These defaults can be adjusted based on the actual models available
75+
const modelInfo: ModelInfo = {
76+
maxTokens: 4096, // Default value, can be adjusted per model if needed
77+
contextWindow: 128000, // Default value, can be adjusted per model if needed
78+
supportsImages: false, // Default to false, can be determined by model id patterns
79+
supportsPromptCache: false, // Default to false
80+
inputPrice: 0, // Default pricing, should be determined by actual API response or config
81+
outputPrice: 0, // Default pricing, should be determined by actual API response or config
82+
supportsTemperature: true,
83+
}
84+
85+
// Add model-specific overrides based on ID patterns
86+
if (model.id.includes("gpt-4o")) {
87+
modelInfo.maxTokens = 16384
88+
modelInfo.contextWindow = 128000
89+
modelInfo.supportsImages = true
90+
modelInfo.inputPrice = 2.5
91+
modelInfo.outputPrice = 10
92+
} else if (model.id.includes("gpt-4o-mini")) {
93+
modelInfo.maxTokens = 16384
94+
modelInfo.contextWindow = 128000
95+
modelInfo.supportsImages = true
96+
modelInfo.inputPrice = 0.15
97+
modelInfo.outputPrice = 0.6
98+
} else if (model.id.includes("claude-3-5-sonnet")) {
99+
modelInfo.maxTokens = 8192
100+
modelInfo.contextWindow = 200000
101+
modelInfo.supportsImages = true
102+
modelInfo.inputPrice = 3.0
103+
modelInfo.outputPrice = 15.0
104+
} else if (model.id.includes("llama-3.1-70b")) {
105+
modelInfo.maxTokens = 4096
106+
modelInfo.contextWindow = 128000
107+
modelInfo.supportsImages = false
108+
modelInfo.inputPrice = 0.52
109+
modelInfo.outputPrice = 0.75
110+
}
111+
112+
return modelInfo
113+
}

src/api/providers/fetchers/modelCache.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ import { getIOIntelligenceModels } from "./io-intelligence"
2525
import { getDeepInfraModels } from "./deepinfra"
2626
import { getHuggingFaceModels } from "./huggingface"
2727
import { getRooModels } from "./roo"
28+
import { getCognimaModels } from "./cognima"
2829

2930
const memoryCache = new NodeCache({ stdTTL: 5 * 60, checkperiod: 5 * 60 })
3031

@@ -67,6 +68,9 @@ export const getModels = async (options: GetModelsOptions): Promise<ModelRecord>
6768
case "openrouter":
6869
models = await getOpenRouterModels()
6970
break
71+
case "cognima":
72+
models = await getCognimaModels(options.apiKey, options.baseUrl)
73+
break
7074
case "requesty":
7175
// Requesty models endpoint requires an API key for per-user custom policies.
7276
models = await getRequestyModels(options.baseUrl, options.apiKey)

src/api/providers/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ export { AnthropicVertexHandler } from "./anthropic-vertex"
22
export { AnthropicHandler } from "./anthropic"
33
export { AwsBedrockHandler } from "./bedrock"
44
export { CerebrasHandler } from "./cerebras"
5+
export { CognimaHandler } from "./cognima"
56
export { ChutesHandler } from "./chutes"
67
export { ClaudeCodeHandler } from "./claude-code"
78
export { DeepSeekHandler } from "./deepseek"

src/shared/api.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,7 @@ type CommonFetchParams = {
153153
// until a corresponding entry is added here.
154154
const dynamicProviderExtras = {
155155
openrouter: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type
156+
cognima: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type
156157
"vercel-ai-gateway": {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type
157158
huggingface: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type
158159
litellm: {} as { apiKey: string; baseUrl: string },

0 commit comments

Comments
 (0)