1+ import axios from "axios"
2+ import { z } from "zod"
3+
4+ import type { ModelInfo } from "@roo-code/types"
5+
6+ import { parseApiPrice } from "../../../shared/cost"
7+
8+ /**
9+ * CognimaModel
10+ */
11+
12+ const cognimaModelSchema = z . object ( {
13+ id : z . string ( ) ,
14+ owned_by : z . string ( ) ,
15+ object : z . string ( ) ,
16+ created : z . number ( ) . optional ( ) ,
17+ updated : z . number ( ) . optional ( ) ,
18+ } )
19+
20+ export type CognimaModel = z . infer < typeof cognimaModelSchema >
21+
22+ /**
23+ * CognimaModelsResponse
24+ */
25+
26+ const cognimaModelsResponseSchema = z . object ( {
27+ data : z . array ( cognimaModelSchema ) ,
28+ object : z . string ( ) ,
29+ } )
30+
31+ type CognimaModelsResponse = z . infer < typeof cognimaModelsResponseSchema >
32+
33+ /**
34+ * getCognimaModels
35+ */
36+
37+ export async function getCognimaModels ( apiKey ?: string , baseUrl ?: string ) : Promise < Record < string , ModelInfo > > {
38+ const models : Record < string , ModelInfo > = { }
39+ const baseURL = baseUrl || "https://cog2.cognima.com.br/openai/v1"
40+
41+ try {
42+ const response = await axios . get < CognimaModelsResponse > ( `${ baseURL } /models` , {
43+ headers : {
44+ Authorization : `Bearer ${ apiKey || "not-provided" } ` ,
45+ "Content-Type" : "application/json" ,
46+ } ,
47+ } )
48+
49+ const result = cognimaModelsResponseSchema . safeParse ( response . data )
50+ const data = result . success ? result . data . data : response . data . data
51+
52+ if ( ! result . success ) {
53+ console . error ( "Cognima models response is invalid" , result . error . format ( ) )
54+ }
55+
56+ for ( const model of data ) {
57+ models [ model . id ] = parseCognimaModel ( model )
58+ }
59+ } catch ( error ) {
60+ console . error (
61+ `Error fetching Cognima models: ${ JSON . stringify ( error , Object . getOwnPropertyNames ( error ) , 2 ) } ` ,
62+ )
63+ }
64+
65+ return models
66+ }
67+
68+ /**
69+ * parseCognimaModel
70+ */
71+
72+ const parseCognimaModel = ( model : CognimaModel ) : ModelInfo => {
73+ // Provide basic ModelInfo with default values since Cognima API doesn't provide detailed pricing/info
74+ // These defaults can be adjusted based on the actual models available
75+ const modelInfo : ModelInfo = {
76+ maxTokens : 4096 , // Default value, can be adjusted per model if needed
77+ contextWindow : 128000 , // Default value, can be adjusted per model if needed
78+ supportsImages : false , // Default to false, can be determined by model id patterns
79+ supportsPromptCache : false , // Default to false
80+ inputPrice : 0 , // Default pricing, should be determined by actual API response or config
81+ outputPrice : 0 , // Default pricing, should be determined by actual API response or config
82+ supportsTemperature : true ,
83+ }
84+
85+ // Add model-specific overrides based on ID patterns
86+ if ( model . id . includes ( "gpt-4o" ) ) {
87+ modelInfo . maxTokens = 16384
88+ modelInfo . contextWindow = 128000
89+ modelInfo . supportsImages = true
90+ modelInfo . inputPrice = 2.5
91+ modelInfo . outputPrice = 10
92+ } else if ( model . id . includes ( "gpt-4o-mini" ) ) {
93+ modelInfo . maxTokens = 16384
94+ modelInfo . contextWindow = 128000
95+ modelInfo . supportsImages = true
96+ modelInfo . inputPrice = 0.15
97+ modelInfo . outputPrice = 0.6
98+ } else if ( model . id . includes ( "claude-3-5-sonnet" ) ) {
99+ modelInfo . maxTokens = 8192
100+ modelInfo . contextWindow = 200000
101+ modelInfo . supportsImages = true
102+ modelInfo . inputPrice = 3.0
103+ modelInfo . outputPrice = 15.0
104+ } else if ( model . id . includes ( "llama-3.1-70b" ) ) {
105+ modelInfo . maxTokens = 4096
106+ modelInfo . contextWindow = 128000
107+ modelInfo . supportsImages = false
108+ modelInfo . inputPrice = 0.52
109+ modelInfo . outputPrice = 0.75
110+ }
111+
112+ return modelInfo
113+ }
0 commit comments