@@ -38,18 +38,14 @@ import { ExtensionMessage } from "../../../../src/shared/ExtensionMessage"
3838
3939import { vscode } from "../../utils/vscode"
4040import VSCodeButtonLink from "../common/VSCodeButtonLink"
41- import { OpenRouterModelPicker } from "./OpenRouterModelPicker"
42- import OpenAiModelPicker from "./OpenAiModelPicker"
43- import { GlamaModelPicker } from "./GlamaModelPicker"
44- import { UnboundModelPicker } from "./UnboundModelPicker"
4541import { ModelInfoView } from "./ModelInfoView"
4642import { DROPDOWN_Z_INDEX } from "./styles"
47- import { RequestyModelPicker } from "./RequestyModelPicker "
43+ import { ModelPicker } from "./ModelPicker "
4844import { TemperatureControl } from "./TemperatureControl"
4945
5046interface ApiOptionsProps {
5147 uriScheme : string | undefined
52- apiConfiguration : ApiConfiguration | undefined
48+ apiConfiguration : ApiConfiguration
5349 setApiConfigurationField : < K extends keyof ApiConfiguration > ( field : K , value : ApiConfiguration [ K ] ) => void
5450 apiErrorMessage ?: string
5551 modelIdErrorMessage ?: string
@@ -67,6 +63,20 @@ const ApiOptions = ({
6763 const [ ollamaModels , setOllamaModels ] = useState < string [ ] > ( [ ] )
6864 const [ lmStudioModels , setLmStudioModels ] = useState < string [ ] > ( [ ] )
6965 const [ vsCodeLmModels , setVsCodeLmModels ] = useState < vscodemodels . LanguageModelChatSelector [ ] > ( [ ] )
66+ const [ openRouterModels , setOpenRouterModels ] = useState < Record < string , ModelInfo > > ( {
67+ [ openRouterDefaultModelId ] : openRouterDefaultModelInfo ,
68+ } )
69+ const [ glamaModels , setGlamaModels ] = useState < Record < string , ModelInfo > > ( {
70+ [ glamaDefaultModelId ] : glamaDefaultModelInfo ,
71+ } )
72+ const [ unboundModels , setUnboundModels ] = useState < Record < string , ModelInfo > > ( {
73+ [ unboundDefaultModelId ] : unboundDefaultModelInfo ,
74+ } )
75+ const [ requestyModels , setRequestyModels ] = useState < Record < string , ModelInfo > > ( {
76+ [ requestyDefaultModelId ] : requestyDefaultModelInfo ,
77+ } )
78+ const [ openAiModels , setOpenAiModels ] = useState < Record < string , ModelInfo > | null > ( null )
79+
7080 const [ anthropicBaseUrlSelected , setAnthropicBaseUrlSelected ] = useState ( ! ! apiConfiguration ?. anthropicBaseUrl )
7181 const [ azureApiVersionSelected , setAzureApiVersionSelected ] = useState ( ! ! apiConfiguration ?. azureApiVersion )
7282 const [ openRouterBaseUrlSelected , setOpenRouterBaseUrlSelected ] = useState ( ! ! apiConfiguration ?. openRouterBaseUrl )
@@ -104,24 +114,93 @@ const ApiOptions = ({
104114 vscode . postMessage ( { type : "requestLmStudioModels" , text : apiConfiguration ?. lmStudioBaseUrl } )
105115 } else if ( selectedProvider === "vscode-lm" ) {
106116 vscode . postMessage ( { type : "requestVsCodeLmModels" } )
117+ } else if ( selectedProvider === "openai" ) {
118+ vscode . postMessage ( {
119+ type : "refreshOpenAiModels" ,
120+ values : {
121+ baseUrl : apiConfiguration ?. openAiBaseUrl ,
122+ apiKey : apiConfiguration ?. openAiApiKey ,
123+ } ,
124+ } )
125+ } else if ( selectedProvider === "openrouter" ) {
126+ vscode . postMessage ( { type : "refreshOpenRouterModels" , values : { } } )
127+ } else if ( selectedProvider === "glama" ) {
128+ vscode . postMessage ( { type : "refreshGlamaModels" , values : { } } )
129+ } else if ( selectedProvider === "requesty" ) {
130+ vscode . postMessage ( {
131+ type : "refreshRequestyModels" ,
132+ values : {
133+ apiKey : apiConfiguration ?. requestyApiKey ,
134+ } ,
135+ } )
107136 }
108137 } ,
109138 250 ,
110- [ selectedProvider , apiConfiguration ?. ollamaBaseUrl , apiConfiguration ?. lmStudioBaseUrl ] ,
139+ [
140+ selectedProvider ,
141+ apiConfiguration ?. ollamaBaseUrl ,
142+ apiConfiguration ?. lmStudioBaseUrl ,
143+ apiConfiguration ?. openAiBaseUrl ,
144+ apiConfiguration ?. openAiApiKey ,
145+ apiConfiguration ?. requestyApiKey ,
146+ ] ,
111147 )
112148
113149 const handleMessage = useCallback ( ( event : MessageEvent ) => {
114150 const message : ExtensionMessage = event . data
115-
116- if ( message . type === "ollamaModels" && Array . isArray ( message . ollamaModels ) ) {
117- const newModels = message . ollamaModels
118- setOllamaModels ( newModels )
119- } else if ( message . type === "lmStudioModels" && Array . isArray ( message . lmStudioModels ) ) {
120- const newModels = message . lmStudioModels
121- setLmStudioModels ( newModels )
122- } else if ( message . type === "vsCodeLmModels" && Array . isArray ( message . vsCodeLmModels ) ) {
123- const newModels = message . vsCodeLmModels
124- setVsCodeLmModels ( newModels )
151+ switch ( message . type ) {
152+ case "ollamaModels" :
153+ {
154+ const newModels = message . ollamaModels ?? [ ]
155+ setOllamaModels ( newModels )
156+ }
157+ break
158+ case "lmStudioModels" :
159+ {
160+ const newModels = message . lmStudioModels ?? [ ]
161+ setLmStudioModels ( newModels )
162+ }
163+ break
164+ case "vsCodeLmModels" :
165+ {
166+ const newModels = message . vsCodeLmModels ?? [ ]
167+ setVsCodeLmModels ( newModels )
168+ }
169+ break
170+ case "glamaModels" : {
171+ const updatedModels = message . glamaModels ?? { }
172+ setGlamaModels ( {
173+ [ glamaDefaultModelId ] : glamaDefaultModelInfo , // in case the extension sent a model list without the default model
174+ ...updatedModels ,
175+ } )
176+ break
177+ }
178+ case "openRouterModels" : {
179+ const updatedModels = message . openRouterModels ?? { }
180+ setOpenRouterModels ( {
181+ [ openRouterDefaultModelId ] : openRouterDefaultModelInfo , // in case the extension sent a model list without the default model
182+ ...updatedModels ,
183+ } )
184+ break
185+ }
186+ case "openAiModels" : {
187+ const updatedModels = message . openAiModels ?? [ ]
188+ setOpenAiModels ( Object . fromEntries ( updatedModels . map ( ( item ) => [ item , openAiModelInfoSaneDefaults ] ) ) )
189+ break
190+ }
191+ case "unboundModels" : {
192+ const updatedModels = message . unboundModels ?? { }
193+ setUnboundModels ( updatedModels )
194+ break
195+ }
196+ case "requestyModels" : {
197+ const updatedModels = message . requestyModels ?? { }
198+ setRequestyModels ( {
199+ [ requestyDefaultModelId ] : requestyDefaultModelInfo , // in case the extension sent a model list without the default model
200+ ...updatedModels ,
201+ } )
202+ break
203+ }
125204 }
126205 } , [ ] )
127206
@@ -616,7 +695,17 @@ const ApiOptions = ({
616695 placeholder = "Enter API Key..." >
617696 < span style = { { fontWeight : 500 } } > API Key</ span >
618697 </ VSCodeTextField >
619- < OpenAiModelPicker />
698+ < ModelPicker
699+ apiConfiguration = { apiConfiguration }
700+ modelIdKey = "openAiModelId"
701+ modelInfoKey = "openAiCustomModelInfo"
702+ serviceName = "OpenAI"
703+ serviceUrl = "https://platform.openai.com"
704+ recommendedModel = "gpt-4-turbo-preview"
705+ models = { openAiModels }
706+ setApiConfigurationField = { setApiConfigurationField }
707+ defaultModelInfo = { openAiModelInfoSaneDefaults }
708+ />
620709 < div style = { { display : "flex" , alignItems : "center" } } >
621710 < Checkbox
622711 checked = { apiConfiguration ?. openAiStreamingEnabled ?? true }
@@ -704,7 +793,7 @@ const ApiOptions = ({
704793 } ) ( ) ,
705794 } }
706795 title = "Maximum number of tokens the model can generate in a single response"
707- onChange = { handleInputChange ( "openAiCustomModelInfo" , ( e ) => {
796+ onInput = { handleInputChange ( "openAiCustomModelInfo" , ( e ) => {
708797 const value = parseInt ( ( e . target as HTMLInputElement ) . value )
709798 return {
710799 ...( apiConfiguration ?. openAiCustomModelInfo ||
@@ -751,7 +840,7 @@ const ApiOptions = ({
751840 } ) ( ) ,
752841 } }
753842 title = "Total number of tokens (input + output) the model can process in a single request"
754- onChange = { handleInputChange ( "openAiCustomModelInfo" , ( e ) => {
843+ onInput = { handleInputChange ( "openAiCustomModelInfo" , ( e ) => {
755844 const value = ( e . target as HTMLInputElement ) . value
756845 const parsed = parseInt ( value )
757846 return {
@@ -897,7 +986,7 @@ const ApiOptions = ({
897986 : "var(--vscode-errorForeground)"
898987 } ) ( ) ,
899988 } }
900- onChange = { handleInputChange ( "openAiCustomModelInfo" , ( e ) => {
989+ onInput = { handleInputChange ( "openAiCustomModelInfo" , ( e ) => {
901990 const value = ( e . target as HTMLInputElement ) . value
902991 const parsed = parseInt ( value )
903992 return {
@@ -942,7 +1031,7 @@ const ApiOptions = ({
9421031 : "var(--vscode-errorForeground)"
9431032 } ) ( ) ,
9441033 } }
945- onChange = { handleInputChange ( "openAiCustomModelInfo" , ( e ) => {
1034+ onInput = { handleInputChange ( "openAiCustomModelInfo" , ( e ) => {
9461035 const value = ( e . target as HTMLInputElement ) . value
9471036 const parsed = parseInt ( value )
9481037 return {
@@ -1011,6 +1100,7 @@ const ApiOptions = ({
10111100 placeholder = { "e.g. meta-llama-3.1-8b-instruct" } >
10121101 < span style = { { fontWeight : 500 } } > Model ID</ span >
10131102 </ VSCodeTextField >
1103+
10141104 { lmStudioModels . length > 0 && (
10151105 < VSCodeRadioGroup
10161106 value = {
@@ -1220,7 +1310,18 @@ const ApiOptions = ({
12201310 } } >
12211311 This key is stored locally and only used to make API requests from this extension.
12221312 </ p >
1223- < UnboundModelPicker />
1313+ < ModelPicker
1314+ apiConfiguration = { apiConfiguration }
1315+ defaultModelId = { unboundDefaultModelId }
1316+ defaultModelInfo = { unboundDefaultModelInfo }
1317+ models = { unboundModels }
1318+ modelInfoKey = "unboundModelInfo"
1319+ modelIdKey = "unboundModelId"
1320+ serviceName = "Unbound"
1321+ serviceUrl = "https://api.getunbound.ai/models"
1322+ recommendedModel = { unboundDefaultModelId }
1323+ setApiConfigurationField = { setApiConfigurationField }
1324+ />
12241325 </ div >
12251326 ) }
12261327
@@ -1236,9 +1337,49 @@ const ApiOptions = ({
12361337 </ p >
12371338 ) }
12381339
1239- { selectedProvider === "glama" && < GlamaModelPicker /> }
1240- { selectedProvider === "openrouter" && < OpenRouterModelPicker /> }
1241- { selectedProvider === "requesty" && < RequestyModelPicker /> }
1340+ { selectedProvider === "glama" && (
1341+ < ModelPicker
1342+ apiConfiguration = { apiConfiguration ?? { } }
1343+ defaultModelId = { glamaDefaultModelId }
1344+ defaultModelInfo = { glamaDefaultModelInfo }
1345+ models = { glamaModels }
1346+ modelInfoKey = "glamaModelInfo"
1347+ modelIdKey = "glamaModelId"
1348+ serviceName = "Glama"
1349+ serviceUrl = "https://glama.ai/models"
1350+ recommendedModel = "anthropic/claude-3-7-sonnet"
1351+ setApiConfigurationField = { setApiConfigurationField }
1352+ />
1353+ ) }
1354+
1355+ { selectedProvider === "openrouter" && (
1356+ < ModelPicker
1357+ apiConfiguration = { apiConfiguration }
1358+ setApiConfigurationField = { setApiConfigurationField }
1359+ defaultModelId = { openRouterDefaultModelId }
1360+ defaultModelInfo = { openRouterDefaultModelInfo }
1361+ models = { openRouterModels }
1362+ modelIdKey = "openRouterModelId"
1363+ modelInfoKey = "openRouterModelInfo"
1364+ serviceName = "OpenRouter"
1365+ serviceUrl = "https://openrouter.ai/models"
1366+ recommendedModel = "anthropic/claude-3.7-sonnet"
1367+ />
1368+ ) }
1369+ { selectedProvider === "requesty" && (
1370+ < ModelPicker
1371+ apiConfiguration = { apiConfiguration }
1372+ setApiConfigurationField = { setApiConfigurationField }
1373+ defaultModelId = { requestyDefaultModelId }
1374+ defaultModelInfo = { requestyDefaultModelInfo }
1375+ models = { requestyModels }
1376+ modelIdKey = "requestyModelId"
1377+ modelInfoKey = "requestyModelInfo"
1378+ serviceName = "Requesty"
1379+ serviceUrl = "https://requesty.ai"
1380+ recommendedModel = "anthropic/claude-3-7-sonnet-latest"
1381+ />
1382+ ) }
12421383
12431384 { selectedProvider !== "glama" &&
12441385 selectedProvider !== "openrouter" &&
@@ -1260,7 +1401,6 @@ const ApiOptions = ({
12601401 { selectedProvider === "deepseek" && createDropdown ( deepSeekModels ) }
12611402 { selectedProvider === "mistral" && createDropdown ( mistralModels ) }
12621403 </ div >
1263-
12641404 < ModelInfoView
12651405 selectedModelId = { selectedModelId }
12661406 modelInfo = { selectedModelInfo }
0 commit comments