Skip to content

Commit 9733ef7

Browse files
celestial-vaultElephant Lumps
andauthored
migrate apiConfiguration protobus (RooCodeInc#4072)
* migrate apiConfiguration * fix type issue --------- Co-authored-by: Elephant Lumps <[email protected]>
1 parent f16c70e commit 9733ef7

File tree

9 files changed

+684
-33
lines changed

9 files changed

+684
-33
lines changed

proto/models.proto

Lines changed: 152 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -22,19 +22,21 @@ service ModelsService {
2222
rpc refreshRequestyModels(EmptyRequest) returns (OpenRouterCompatibleModelInfo);
2323
// Subscribe to OpenRouter models updates
2424
rpc subscribeToOpenRouterModels(EmptyRequest) returns (stream OpenRouterCompatibleModelInfo);
25+
// Updates API configuration
26+
rpc updateApiConfigurationProto(UpdateApiConfigurationRequest) returns (Empty);
2527
}
2628

2729
// List of VS Code LM models
2830
message VsCodeLmModelsArray {
29-
repeated VsCodeLmModel models = 1;
31+
repeated LanguageModelChatSelector models = 1;
3032
}
3133

32-
// Structure representing a VS Code LM model
33-
message VsCodeLmModel {
34-
string vendor = 1;
35-
string family = 2;
36-
string version = 3;
37-
string id = 4;
34+
// Structure representing a language model chat selector
35+
message LanguageModelChatSelector {
36+
optional string vendor = 1;
37+
optional string family = 2;
38+
optional string version = 3;
39+
optional string id = 4;
3840
}
3941

4042
// Price tier for tiered pricing models
@@ -86,3 +88,146 @@ message OpenAiModelsRequest {
8688
string baseUrl = 2;
8789
string apiKey = 3;
8890
}
91+
92+
// Request for updating API configuration
93+
message UpdateApiConfigurationRequest {
94+
Metadata metadata = 1;
95+
ModelsApiConfiguration api_configuration = 2;
96+
}
97+
98+
// API Provider enumeration
99+
enum ApiProvider {
100+
ANTHROPIC = 0;
101+
OPENROUTER = 1;
102+
BEDROCK = 2;
103+
VERTEX = 3;
104+
OPENAI = 4;
105+
OLLAMA = 5;
106+
LMSTUDIO = 6;
107+
GEMINI = 7;
108+
OPENAI_NATIVE = 8;
109+
REQUESTY = 9;
110+
TOGETHER = 10;
111+
DEEPSEEK = 11;
112+
QWEN = 12;
113+
DOUBAO = 13;
114+
MISTRAL = 14;
115+
VSCODE_LM = 15;
116+
CLINE = 16;
117+
LITELLM = 17;
118+
NEBIUS = 18;
119+
FIREWORKS = 19;
120+
ASKSAGE = 20;
121+
XAI = 21;
122+
SAMBANOVA = 22;
123+
CEREBRAS = 23;
124+
}
125+
126+
// Model info for OpenAI-compatible models
127+
message OpenAiCompatibleModelInfo {
128+
optional int32 max_tokens = 1;
129+
optional int32 context_window = 2;
130+
optional bool supports_images = 3;
131+
bool supports_prompt_cache = 4;
132+
optional double input_price = 5;
133+
optional double output_price = 6;
134+
optional ThinkingConfig thinking_config = 7;
135+
optional bool supports_global_endpoint = 8;
136+
optional double cache_writes_price = 9;
137+
optional double cache_reads_price = 10;
138+
optional string description = 11;
139+
repeated ModelTier tiers = 12;
140+
optional double temperature = 13;
141+
optional bool is_r1_format_required = 14;
142+
}
143+
144+
// Model info for LiteLLM models
145+
message LiteLLMModelInfo {
146+
optional int32 max_tokens = 1;
147+
optional int32 context_window = 2;
148+
optional bool supports_images = 3;
149+
bool supports_prompt_cache = 4;
150+
optional double input_price = 5;
151+
optional double output_price = 6;
152+
optional ThinkingConfig thinking_config = 7;
153+
optional bool supports_global_endpoint = 8;
154+
optional double cache_writes_price = 9;
155+
optional double cache_reads_price = 10;
156+
optional string description = 11;
157+
repeated ModelTier tiers = 12;
158+
optional double temperature = 13;
159+
}
160+
161+
// Main ApiConfiguration message
162+
message ModelsApiConfiguration {
163+
// From ApiHandlerOptions (excluding onRetryAttempt function)
164+
optional string api_model_id = 1;
165+
optional string api_key = 2;
166+
optional string cline_api_key = 3;
167+
optional string task_id = 4;
168+
optional string lite_llm_base_url = 5;
169+
optional string lite_llm_model_id = 6;
170+
optional string lite_llm_api_key = 7;
171+
optional bool lite_llm_use_prompt_cache = 8;
172+
map<string, string> open_ai_headers = 9;
173+
optional LiteLLMModelInfo lite_llm_model_info = 10;
174+
optional string anthropic_base_url = 11;
175+
optional string open_router_api_key = 12;
176+
optional string open_router_model_id = 13;
177+
optional OpenRouterModelInfo open_router_model_info = 14;
178+
optional string open_router_provider_sorting = 15;
179+
optional string aws_access_key = 16;
180+
optional string aws_secret_key = 17;
181+
optional string aws_session_token = 18;
182+
optional string aws_region = 19;
183+
optional bool aws_use_cross_region_inference = 20;
184+
optional bool aws_bedrock_use_prompt_cache = 21;
185+
optional bool aws_use_profile = 22;
186+
optional string aws_profile = 23;
187+
optional string aws_bedrock_endpoint = 24;
188+
optional bool aws_bedrock_custom_selected = 25;
189+
optional string aws_bedrock_custom_model_base_id = 26;
190+
optional string vertex_project_id = 27;
191+
optional string vertex_region = 28;
192+
optional string open_ai_base_url = 29;
193+
optional string open_ai_api_key = 30;
194+
optional string open_ai_model_id = 31;
195+
optional OpenAiCompatibleModelInfo open_ai_model_info = 32;
196+
optional string ollama_model_id = 33;
197+
optional string ollama_base_url = 34;
198+
optional string ollama_api_options_ctx_num = 35;
199+
optional string lm_studio_model_id = 36;
200+
optional string lm_studio_base_url = 37;
201+
optional string gemini_api_key = 38;
202+
optional string gemini_base_url = 39;
203+
optional string open_ai_native_api_key = 40;
204+
optional string deep_seek_api_key = 41;
205+
optional string requesty_api_key = 42;
206+
optional string requesty_model_id = 43;
207+
optional OpenRouterModelInfo requesty_model_info = 44;
208+
optional string together_api_key = 45;
209+
optional string together_model_id = 46;
210+
optional string fireworks_api_key = 47;
211+
optional string fireworks_model_id = 48;
212+
optional int32 fireworks_model_max_completion_tokens = 49;
213+
optional int32 fireworks_model_max_tokens = 50;
214+
optional string qwen_api_key = 51;
215+
optional string doubao_api_key = 52;
216+
optional string mistral_api_key = 53;
217+
optional string azure_api_version = 54;
218+
optional LanguageModelChatSelector vs_code_lm_model_selector = 55;
219+
optional string qwen_api_line = 56;
220+
optional string nebius_api_key = 57;
221+
optional string asksage_api_url = 58;
222+
optional string asksage_api_key = 59;
223+
optional string xai_api_key = 60;
224+
optional int32 thinking_budget_tokens = 61;
225+
optional string reasoning_effort = 62;
226+
optional string sambanova_api_key = 63;
227+
optional string cerebras_api_key = 64;
228+
optional int32 request_timeout_ms = 65;
229+
230+
// From ApiConfiguration (additional fields)
231+
optional ApiProvider api_provider = 66;
232+
repeated string favorited_model_ids = 67;
233+
}

src/core/controller/index.ts

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -204,15 +204,7 @@ export class Controller {
204204
await this.setUserInfo(message.user || undefined)
205205
await this.postStateToWebview()
206206
break
207-
case "apiConfiguration":
208-
if (message.apiConfiguration) {
209-
await updateApiConfiguration(this.context, message.apiConfiguration)
210-
if (this.task) {
211-
this.task.api = buildApiHandler(message.apiConfiguration)
212-
}
213-
}
214-
await this.postStateToWebview()
215-
break
207+
216208
case "fetchUserCreditsData": {
217209
await this.fetchUserCreditsData()
218210
break
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
import type { Controller } from "../index"
2+
import { Empty } from "@shared/proto/common"
3+
import { UpdateApiConfigurationRequest } from "@shared/proto/models"
4+
import { updateApiConfiguration } from "../../storage/state"
5+
import { buildApiHandler } from "@api/index"
6+
import { convertProtoToApiConfiguration } from "@shared/proto-conversions/models/api-configuration-conversion"
7+
8+
/**
9+
* Updates API configuration
10+
* @param controller The controller instance
11+
* @param request The update API configuration request
12+
* @returns Empty response
13+
*/
14+
export async function updateApiConfigurationProto(
15+
controller: Controller,
16+
request: UpdateApiConfigurationRequest,
17+
): Promise<Empty> {
18+
try {
19+
if (!request.apiConfiguration) {
20+
console.log("[APICONFIG: updateApiConfigurationProto] API configuration is required")
21+
throw new Error("API configuration is required")
22+
}
23+
24+
// Convert proto ApiConfiguration to application ApiConfiguration
25+
const appApiConfiguration = convertProtoToApiConfiguration(request.apiConfiguration)
26+
27+
// Update the API configuration in storage
28+
await updateApiConfiguration(controller.context, appApiConfiguration)
29+
30+
// Update the task's API handler if there's an active task
31+
if (controller.task) {
32+
controller.task.api = buildApiHandler(appApiConfiguration)
33+
}
34+
35+
// Post updated state to webview
36+
await controller.postStateToWebview()
37+
38+
return Empty.create()
39+
} catch (error) {
40+
console.error(`Failed to update API configuration: ${error}`)
41+
throw error
42+
}
43+
}

src/shared/WebviewMessage.ts

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ import { McpViewTab } from "./mcp"
88

99
export interface WebviewMessage {
1010
type:
11-
| "apiConfiguration"
1211
| "requestVsCodeLmModels"
1312
| "authStateChanged"
1413
| "fetchMcpMarketplace"

0 commit comments

Comments
 (0)