1- import { ModelInfo , REGION_TO_URL } from "@roo-code/types"
1+ import { ModelInfo , REGION_TO_URL , WATSONX_NON_INFERENCE_MODELS } from "@roo-code/types"
22import { IamAuthenticator , CloudPakForDataAuthenticator , UserOptions } from "ibm-cloud-sdk-core"
33import { WatsonXAI } from "@ibm-cloud/watsonx-ai"
44import WatsonxAiMlVml_v1 from "@ibm-cloud/watsonx-ai/dist/watsonx-ai-ml/vml_v1.js"
@@ -67,20 +67,18 @@ export async function getWatsonxModels(
6767 throw new Error ( "Password is required for IBM Cloud Pak for Data" )
6868 }
6969 options . serviceUrl = baseUrl
70- if ( username ) {
71- if ( password ) {
72- options . authenticator = new CloudPakForDataAuthenticator ( {
73- url : `${ baseUrl } /icp4d-api` ,
74- username : username ,
75- password : password ,
76- } )
77- } else if ( apiKey ) {
78- options . authenticator = new CloudPakForDataAuthenticator ( {
79- url : `${ baseUrl } /icp4d-api` ,
80- username : username ,
81- apikey : apiKey ,
82- } )
83- }
70+ if ( password ) {
71+ options . authenticator = new CloudPakForDataAuthenticator ( {
72+ url : `${ baseUrl } /icp4d-api` ,
73+ username,
74+ password,
75+ } )
76+ } else {
77+ options . authenticator = new CloudPakForDataAuthenticator ( {
78+ url : `${ baseUrl } /icp4d-api` ,
79+ username,
80+ apikey : apiKey ,
81+ } )
8482 }
8583 }
8684
@@ -96,39 +94,21 @@ export async function getWatsonxModels(
9694 if ( Array . isArray ( modelsList ) && modelsList . length > 0 ) {
9795 for ( const model of modelsList ) {
9896 const modelId = model . model_id
99- let contextWindow = 131072
100- if ( model . model_limits && model . model_limits . max_sequence_length ) {
101- contextWindow = model . model_limits . max_sequence_length
102- }
103- let maxTokens = Math . floor ( contextWindow / 16 )
104- if (
105- model . model_limits &&
106- model . training_parameters &&
107- model . training_parameters . max_output_tokens &&
108- model . training_parameters . max_output_tokens . max
109- ) {
110- maxTokens = model . training_parameters . max_output_tokens . max
111- }
11297
113- let description = ""
114- if ( model . long_description ) {
115- description = model . long_description
116- } else if ( model . short_description ) {
117- description = model . short_description
98+ if ( WATSONX_NON_INFERENCE_MODELS . includes ( modelId as any ) ) {
99+ continue
118100 }
119- if (
120- ! (
121- modelId === "meta-llama/llama-guard-3-11b-vision" ||
122- modelId === "ibm/granite-guardian-3-8b" ||
123- modelId === "ibm/granite-guardian-3-2b"
124- )
125- ) {
126- knownModels [ modelId ] = {
127- contextWindow,
128- maxTokens,
129- supportsPromptCache : false ,
130- description,
131- }
101+
102+ const contextWindow = model . model_limits ?. max_sequence_length || 131072
103+ const maxTokens =
104+ model . training_parameters ?. max_output_tokens ?. max || Math . floor ( contextWindow / 16 )
105+ const description = model . long_description || model . short_description || ""
106+
107+ knownModels [ modelId ] = {
108+ contextWindow,
109+ maxTokens,
110+ supportsPromptCache : false ,
111+ description,
132112 }
133113 }
134114 }
0 commit comments