-
Notifications
You must be signed in to change notification settings - Fork 2.4k
feat(api): add cognima provider support #8868
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
5e5c5d5
3e179b6
0076e71
253cbb1
ad187f7
ab0d7c2
61546b5
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,3 @@ | ||
| export type CognimaModelId = string | ||
|
|
||
| export const cognimaDefaultModelId: CognimaModelId = "gpt-4o" | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,92 @@ | ||
| import { Anthropic } from "@anthropic-ai/sdk" | ||
| import OpenAI from "openai" | ||
|
|
||
| import { type CognimaModelId, cognimaDefaultModelId } from "@roo-code/types" | ||
|
|
||
| import type { ApiHandlerOptions } from "../../shared/api" | ||
|
|
||
| import { convertToOpenAiMessages } from "../transform/openai-format" | ||
| import { ApiStreamChunk } from "../transform/stream" | ||
| import { RouterProvider } from "./router-provider" | ||
| import { handleOpenAIError } from "./utils/openai-error-handler" | ||
|
|
||
| export class CognimaHandler extends RouterProvider { | ||
| private readonly providerName = "Cognima" | ||
|
|
||
| constructor(options: ApiHandlerOptions) { | ||
| super({ | ||
| options, | ||
| name: "cognima", | ||
| baseURL: "https://cog2.cognima.com.br/openai/v1", | ||
| apiKey: options.cognimaApiKey, | ||
| modelId: options.cognimaModelId, | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The |
||
| defaultModelId: cognimaDefaultModelId, | ||
| defaultModelInfo: { | ||
| maxTokens: 16384, | ||
| contextWindow: 128000, | ||
| supportsImages: true, | ||
| supportsPromptCache: false, | ||
| inputPrice: 2.5, | ||
| outputPrice: 10, | ||
| supportsTemperature: true, | ||
| }, | ||
| }) | ||
| } | ||
|
|
||
| override async *createMessage( | ||
| systemPrompt: string, | ||
| messages: Anthropic.Messages.MessageParam[], | ||
| ): AsyncGenerator<ApiStreamChunk> { | ||
| const model = await this.fetchModel() | ||
| const modelId = model.id | ||
| const maxTokens = model.info.maxTokens | ||
| const temperature = 0 // Default temperature | ||
|
|
||
| // Convert Anthropic messages to OpenAI format | ||
| const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [ | ||
| { role: "system", content: systemPrompt }, | ||
| ...convertToOpenAiMessages(messages), | ||
| ] | ||
|
|
||
| const completionParams: OpenAI.Chat.ChatCompletionCreateParams = { | ||
| model: modelId, | ||
| ...(maxTokens && maxTokens > 0 && { max_tokens: maxTokens }), | ||
| temperature, | ||
| messages: openAiMessages, | ||
| stream: true, | ||
| stream_options: { include_usage: true }, | ||
| } | ||
|
|
||
| let stream | ||
| try { | ||
| stream = await this.client.chat.completions.create(completionParams) | ||
| } catch (error) { | ||
| throw handleOpenAIError(error, this.providerName) | ||
| } | ||
|
|
||
| for await (const chunk of stream) { | ||
| // Handle OpenAI error responses | ||
| if ("error" in chunk) { | ||
| const error = chunk.error as { message?: string; code?: number } | ||
| console.error(`Cognima API Error: ${error?.code} - ${error?.message}`) | ||
| throw new Error(`Cognima API Error ${error?.code}: ${error?.message}`) | ||
| } | ||
|
|
||
| const delta = chunk.choices[0]?.delta | ||
|
|
||
| if (delta?.content) { | ||
| yield { type: "text", text: delta.content } | ||
| } | ||
|
|
||
| if (chunk.usage) { | ||
| const usage = chunk.usage | ||
| yield { | ||
| type: "usage", | ||
| inputTokens: usage.prompt_tokens || 0, | ||
| outputTokens: usage.completion_tokens || 0, | ||
| totalCost: 0, // Cognima doesn't provide cost info in usage | ||
| } | ||
| } | ||
| } | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,111 @@ | ||
| import axios from "axios" | ||
| import { z } from "zod" | ||
|
|
||
| import type { ModelInfo } from "@roo-code/types" | ||
|
|
||
| /** | ||
| * CognimaModel | ||
| */ | ||
|
|
||
| const cognimaModelSchema = z.object({ | ||
| id: z.string(), | ||
| owned_by: z.string(), | ||
| object: z.string(), | ||
| created: z.number().optional(), | ||
| updated: z.number().optional(), | ||
| }) | ||
|
|
||
| export type CognimaModel = z.infer<typeof cognimaModelSchema> | ||
|
|
||
| /** | ||
| * CognimaModelsResponse | ||
| */ | ||
|
|
||
| const cognimaModelsResponseSchema = z.object({ | ||
| data: z.array(cognimaModelSchema), | ||
| object: z.string(), | ||
| }) | ||
|
|
||
| type CognimaModelsResponse = z.infer<typeof cognimaModelsResponseSchema> | ||
|
|
||
| /** | ||
| * getCognimaModels | ||
| */ | ||
|
|
||
| export async function getCognimaModels(apiKey?: string, baseUrl?: string): Promise<Record<string, ModelInfo>> { | ||
| const models: Record<string, ModelInfo> = {} | ||
| const baseURL = baseUrl || "https://cog2.cognima.com.br/openai/v1" | ||
|
|
||
| try { | ||
| const response = await axios.get<CognimaModelsResponse>(`${baseURL}/models`, { | ||
| headers: { | ||
| Authorization: `Bearer ${apiKey || "not-provided"}`, | ||
| "Content-Type": "application/json", | ||
| }, | ||
| }) | ||
|
|
||
| const result = cognimaModelsResponseSchema.safeParse(response.data) | ||
| const data = result.success ? result.data.data : response.data.data | ||
|
|
||
| if (!result.success) { | ||
| console.error("Cognima models response is invalid", result.error.format()) | ||
| } | ||
|
|
||
| for (const model of data) { | ||
| models[model.id] = parseCognimaModel(model) | ||
| } | ||
| } catch (error) { | ||
| console.error( | ||
| `Error fetching Cognima models: ${JSON.stringify(error, Object.getOwnPropertyNames(error), 2)}`, | ||
| ) | ||
| } | ||
|
|
||
| return models | ||
| } | ||
|
|
||
| /** | ||
| * parseCognimaModel | ||
| */ | ||
|
|
||
| const parseCognimaModel = (model: CognimaModel): ModelInfo => { | ||
| // Provide basic ModelInfo with default values since Cognima API doesn't provide detailed pricing/info | ||
| // These defaults can be adjusted based on the actual models available | ||
| const modelInfo: ModelInfo = { | ||
| maxTokens: 4096, // Default value, can be adjusted per model if needed | ||
| contextWindow: 128000, // Default value, can be adjusted per model if needed | ||
| supportsImages: false, // Default to false, can be determined by model id patterns | ||
| supportsPromptCache: false, // Default to false | ||
| inputPrice: 0, // Default pricing, should be determined by actual API response or config | ||
| outputPrice: 0, // Default pricing, should be determined by actual API response or config | ||
| supportsTemperature: true, | ||
| } | ||
|
|
||
| // Add model-specific overrides based on ID patterns | ||
| if (model.id.includes("gpt-4o")) { | ||
| modelInfo.maxTokens = 16384 | ||
| modelInfo.contextWindow = 128000 | ||
| modelInfo.supportsImages = true | ||
| modelInfo.inputPrice = 2.5 | ||
| modelInfo.outputPrice = 10 | ||
| } else if (model.id.includes("gpt-4o-mini")) { | ||
| modelInfo.maxTokens = 16384 | ||
| modelInfo.contextWindow = 128000 | ||
| modelInfo.supportsImages = true | ||
| modelInfo.inputPrice = 0.15 | ||
| modelInfo.outputPrice = 0.6 | ||
| } else if (model.id.includes("claude-3-5-sonnet")) { | ||
| modelInfo.maxTokens = 8192 | ||
| modelInfo.contextWindow = 200000 | ||
| modelInfo.supportsImages = true | ||
| modelInfo.inputPrice = 3.0 | ||
| modelInfo.outputPrice = 15.0 | ||
| } else if (model.id.includes("llama-3.1-70b")) { | ||
| modelInfo.maxTokens = 4096 | ||
| modelInfo.contextWindow = 128000 | ||
| modelInfo.supportsImages = false | ||
| modelInfo.inputPrice = 0.52 | ||
| modelInfo.outputPrice = 0.75 | ||
| } | ||
|
|
||
| return modelInfo | ||
| } |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Missing newline at end of file. Add a newline after the last line to follow standard conventions.