@@ -2,59 +2,59 @@ import { Anthropic } from "@anthropic-ai/sdk"
22import OpenAI from "openai"
33
44import {
5- siliconCloudModels ,
65 siliconCloudApiLineConfigs ,
76 siliconCloudDefaultModelId ,
87 siliconCloudDefaultApiLine ,
8+ siliconCloudModelsByApiLine ,
99} from "@roo-code/types"
1010
1111import { type ApiHandlerOptions } from "../../shared/api"
1212import { type ApiStream } from "../transform/stream"
1313import { convertToOpenAiMessages } from "../transform/openai-format"
14- import { getModelParams } from "../transform/model-params"
1514import { handleOpenAIError } from "./utils/openai-error-handler"
16- import { OpenAiHandler } from "./openai"
1715import { ApiHandlerCreateMessageMetadata } from ".."
16+ import { BaseOpenAiCompatibleProvider } from "./base-openai-compatible-provider"
1817
19- const SILICON_CLOUD_PROVIDER_NAME = "siliconcloud" as const
20-
21- export class SiliconCloudHandler extends OpenAiHandler {
18+ export class SiliconCloudHandler extends BaseOpenAiCompatibleProvider < string > {
2219 constructor ( options : ApiHandlerOptions ) {
2320 const apiLine = options . siliconCloudApiLine || siliconCloudDefaultApiLine
21+ const baseURL = siliconCloudApiLineConfigs [ apiLine ] . baseUrl
22+ const providerModels = siliconCloudModelsByApiLine [ apiLine ]
2423
2524 super ( {
2625 ...options ,
27- openAiApiKey : options . siliconCloudApiKey ,
28- openAiBaseUrl : siliconCloudApiLineConfigs [ apiLine ] . baseUrl ,
29- openAiModelId : options . apiModelId || siliconCloudDefaultModelId ,
26+ providerName : "SiliconCloud" ,
27+ baseURL,
28+ apiKey : options . siliconCloudApiKey ,
29+ defaultProviderModelId : siliconCloudDefaultModelId ,
30+ providerModels,
31+ defaultTemperature : 0 ,
3032 } )
3133 }
3234
33- override getModel ( ) {
34- const id = this . options . apiModelId || siliconCloudDefaultModelId
35- const info =
36- siliconCloudModels [ id as keyof typeof siliconCloudModels ] ?? siliconCloudModels [ siliconCloudDefaultModelId ]
37- const params = getModelParams ( { format : "openai" , modelId : id , model : info , settings : this . options } )
38- return { id, info, ...params }
39- }
40-
41- override async * createMessage (
35+ protected override createStream (
4236 systemPrompt : string ,
4337 messages : Anthropic . Messages . MessageParam [ ] ,
4438 metadata ?: ApiHandlerCreateMessageMetadata ,
45- ) : ApiStream {
46- const { id : model , info } = this . getModel ( )
39+ requestOptions ?: OpenAI . RequestOptions ,
40+ ) {
41+ const {
42+ id : model ,
43+ info : { maxTokens : max_tokens , supportsReasoningBudget } ,
44+ } = this . getModel ( )
45+
46+ const temperature = this . options . modelTemperature ?? this . defaultTemperature
4747
4848 const params : OpenAI . Chat . Completions . ChatCompletionCreateParamsStreaming = {
4949 model,
50- max_tokens : info . maxTokens ,
51- temperature : this . options . modelTemperature ?? 0 ,
50+ max_tokens,
51+ temperature,
5252 messages : [ { role : "system" , content : systemPrompt } , ...convertToOpenAiMessages ( messages ) ] ,
5353 stream : true ,
5454 stream_options : { include_usage : true } ,
5555 }
5656
57- if ( info . supportsReasoningBudget ) {
57+ if ( supportsReasoningBudget ) {
5858 if ( this . options . enableReasoningEffort ) {
5959 // @ts -ignore
6060 params . enable_thinking = true
@@ -66,13 +66,19 @@ export class SiliconCloudHandler extends OpenAiHandler {
6666 }
6767 }
6868
69- let stream
7069 try {
71- stream = await this . client . chat . completions . create ( params )
70+ return this . client . chat . completions . create ( params , requestOptions )
7271 } catch ( error ) {
73- throw handleOpenAIError ( error , SILICON_CLOUD_PROVIDER_NAME )
72+ throw handleOpenAIError ( error , this . providerName )
7473 }
74+ }
7575
76+ override async * createMessage (
77+ systemPrompt : string ,
78+ messages : Anthropic . Messages . MessageParam [ ] ,
79+ metadata ?: ApiHandlerCreateMessageMetadata ,
80+ ) : ApiStream {
81+ const stream = await this . createStream ( systemPrompt , messages , metadata )
7682 let lastUsage : OpenAI . CompletionUsage | undefined
7783
7884 for await ( const chunk of stream ) {
@@ -98,7 +104,11 @@ export class SiliconCloudHandler extends OpenAiHandler {
98104 }
99105
100106 if ( lastUsage ) {
101- yield super . processUsageMetrics ( lastUsage )
107+ yield {
108+ type : "usage" ,
109+ inputTokens : lastUsage . prompt_tokens || 0 ,
110+ outputTokens : lastUsage . completion_tokens || 0 ,
111+ }
102112 }
103113 }
104114}
0 commit comments