@@ -22,19 +22,21 @@ service ModelsService {
2222 rpc refreshRequestyModels (EmptyRequest ) returns (OpenRouterCompatibleModelInfo );
2323 // Subscribe to OpenRouter models updates
2424 rpc subscribeToOpenRouterModels (EmptyRequest ) returns (stream OpenRouterCompatibleModelInfo );
25+ // Updates API configuration
26+ rpc updateApiConfigurationProto (UpdateApiConfigurationRequest ) returns (Empty );
2527}
2628
2729// List of VS Code LM models
2830message VsCodeLmModelsArray {
29- repeated VsCodeLmModel models = 1 ;
31+ repeated LanguageModelChatSelector models = 1 ;
3032}
3133
32- // Structure representing a VS Code LM model
33- message VsCodeLmModel {
34- string vendor = 1 ;
35- string family = 2 ;
36- string version = 3 ;
37- string id = 4 ;
34+ // Structure representing a language model chat selector
35+ message LanguageModelChatSelector {
36+ optional string vendor = 1 ;
37+ optional string family = 2 ;
38+ optional string version = 3 ;
39+ optional string id = 4 ;
3840}
3941
4042// Price tier for tiered pricing models
@@ -86,3 +88,146 @@ message OpenAiModelsRequest {
8688 string baseUrl = 2 ;
8789 string apiKey = 3 ;
8890}
91+
92+ // Request for updating API configuration
93+ message UpdateApiConfigurationRequest {
94+ Metadata metadata = 1 ;
95+ ModelsApiConfiguration api_configuration = 2 ;
96+ }
97+
98+ // API Provider enumeration
99+ enum ApiProvider {
100+ ANTHROPIC = 0 ;
101+ OPENROUTER = 1 ;
102+ BEDROCK = 2 ;
103+ VERTEX = 3 ;
104+ OPENAI = 4 ;
105+ OLLAMA = 5 ;
106+ LMSTUDIO = 6 ;
107+ GEMINI = 7 ;
108+ OPENAI_NATIVE = 8 ;
109+ REQUESTY = 9 ;
110+ TOGETHER = 10 ;
111+ DEEPSEEK = 11 ;
112+ QWEN = 12 ;
113+ DOUBAO = 13 ;
114+ MISTRAL = 14 ;
115+ VSCODE_LM = 15 ;
116+ CLINE = 16 ;
117+ LITELLM = 17 ;
118+ NEBIUS = 18 ;
119+ FIREWORKS = 19 ;
120+ ASKSAGE = 20 ;
121+ XAI = 21 ;
122+ SAMBANOVA = 22 ;
123+ CEREBRAS = 23 ;
124+ }
125+
126+ // Model info for OpenAI-compatible models
127+ message OpenAiCompatibleModelInfo {
128+ optional int32 max_tokens = 1 ;
129+ optional int32 context_window = 2 ;
130+ optional bool supports_images = 3 ;
131+ bool supports_prompt_cache = 4 ;
132+ optional double input_price = 5 ;
133+ optional double output_price = 6 ;
134+ optional ThinkingConfig thinking_config = 7 ;
135+ optional bool supports_global_endpoint = 8 ;
136+ optional double cache_writes_price = 9 ;
137+ optional double cache_reads_price = 10 ;
138+ optional string description = 11 ;
139+ repeated ModelTier tiers = 12 ;
140+ optional double temperature = 13 ;
141+ optional bool is_r1_format_required = 14 ;
142+ }
143+
144+ // Model info for LiteLLM models
145+ message LiteLLMModelInfo {
146+ optional int32 max_tokens = 1 ;
147+ optional int32 context_window = 2 ;
148+ optional bool supports_images = 3 ;
149+ bool supports_prompt_cache = 4 ;
150+ optional double input_price = 5 ;
151+ optional double output_price = 6 ;
152+ optional ThinkingConfig thinking_config = 7 ;
153+ optional bool supports_global_endpoint = 8 ;
154+ optional double cache_writes_price = 9 ;
155+ optional double cache_reads_price = 10 ;
156+ optional string description = 11 ;
157+ repeated ModelTier tiers = 12 ;
158+ optional double temperature = 13 ;
159+ }
160+
161+ // Main ApiConfiguration message
162+ message ModelsApiConfiguration {
163+ // From ApiHandlerOptions (excluding onRetryAttempt function)
164+ optional string api_model_id = 1 ;
165+ optional string api_key = 2 ;
166+ optional string cline_api_key = 3 ;
167+ optional string task_id = 4 ;
168+ optional string lite_llm_base_url = 5 ;
169+ optional string lite_llm_model_id = 6 ;
170+ optional string lite_llm_api_key = 7 ;
171+ optional bool lite_llm_use_prompt_cache = 8 ;
172+ map <string , string > open_ai_headers = 9 ;
173+ optional LiteLLMModelInfo lite_llm_model_info = 10 ;
174+ optional string anthropic_base_url = 11 ;
175+ optional string open_router_api_key = 12 ;
176+ optional string open_router_model_id = 13 ;
177+ optional OpenRouterModelInfo open_router_model_info = 14 ;
178+ optional string open_router_provider_sorting = 15 ;
179+ optional string aws_access_key = 16 ;
180+ optional string aws_secret_key = 17 ;
181+ optional string aws_session_token = 18 ;
182+ optional string aws_region = 19 ;
183+ optional bool aws_use_cross_region_inference = 20 ;
184+ optional bool aws_bedrock_use_prompt_cache = 21 ;
185+ optional bool aws_use_profile = 22 ;
186+ optional string aws_profile = 23 ;
187+ optional string aws_bedrock_endpoint = 24 ;
188+ optional bool aws_bedrock_custom_selected = 25 ;
189+ optional string aws_bedrock_custom_model_base_id = 26 ;
190+ optional string vertex_project_id = 27 ;
191+ optional string vertex_region = 28 ;
192+ optional string open_ai_base_url = 29 ;
193+ optional string open_ai_api_key = 30 ;
194+ optional string open_ai_model_id = 31 ;
195+ optional OpenAiCompatibleModelInfo open_ai_model_info = 32 ;
196+ optional string ollama_model_id = 33 ;
197+ optional string ollama_base_url = 34 ;
198+ optional string ollama_api_options_ctx_num = 35 ;
199+ optional string lm_studio_model_id = 36 ;
200+ optional string lm_studio_base_url = 37 ;
201+ optional string gemini_api_key = 38 ;
202+ optional string gemini_base_url = 39 ;
203+ optional string open_ai_native_api_key = 40 ;
204+ optional string deep_seek_api_key = 41 ;
205+ optional string requesty_api_key = 42 ;
206+ optional string requesty_model_id = 43 ;
207+ optional OpenRouterModelInfo requesty_model_info = 44 ;
208+ optional string together_api_key = 45 ;
209+ optional string together_model_id = 46 ;
210+ optional string fireworks_api_key = 47 ;
211+ optional string fireworks_model_id = 48 ;
212+ optional int32 fireworks_model_max_completion_tokens = 49 ;
213+ optional int32 fireworks_model_max_tokens = 50 ;
214+ optional string qwen_api_key = 51 ;
215+ optional string doubao_api_key = 52 ;
216+ optional string mistral_api_key = 53 ;
217+ optional string azure_api_version = 54 ;
218+ optional LanguageModelChatSelector vs_code_lm_model_selector = 55 ;
219+ optional string qwen_api_line = 56 ;
220+ optional string nebius_api_key = 57 ;
221+ optional string asksage_api_url = 58 ;
222+ optional string asksage_api_key = 59 ;
223+ optional string xai_api_key = 60 ;
224+ optional int32 thinking_budget_tokens = 61 ;
225+ optional string reasoning_effort = 62 ;
226+ optional string sambanova_api_key = 63 ;
227+ optional string cerebras_api_key = 64 ;
228+ optional int32 request_timeout_ms = 65 ;
229+
230+ // From ApiConfiguration (additional fields)
231+ optional ApiProvider api_provider = 66 ;
232+ repeated string favorited_model_ids = 67 ;
233+ }
0 commit comments