@@ -66,6 +66,11 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
6666 const deepseekReasoner = modelId . includes ( "deepseek-reasoner" )
6767 const ark = modelUrl . includes ( ".volces.com" )
6868
69+ if ( modelId . startsWith ( "o3-mini" ) ) {
70+ yield * this . handleO3FamilyMessage ( modelId , systemPrompt , messages )
71+ return
72+ }
73+
6974 if ( this . options . openAiStreamingEnabled ?? true ) {
7075 const systemMessage : OpenAI . Chat . ChatCompletionSystemMessageParam = {
7176 role : "system" ,
@@ -169,6 +174,69 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
169174 throw error
170175 }
171176 }
177+
178+ private async * handleO3FamilyMessage (
179+ modelId : string ,
180+ systemPrompt : string ,
181+ messages : Anthropic . Messages . MessageParam [ ] ,
182+ ) : ApiStream {
183+ if ( this . options . openAiStreamingEnabled ?? true ) {
184+ const stream = await this . client . chat . completions . create ( {
185+ model : "o3-mini" ,
186+ messages : [
187+ {
188+ role : "developer" ,
189+ content : `Formatting re-enabled\n${ systemPrompt } ` ,
190+ } ,
191+ ...convertToOpenAiMessages ( messages ) ,
192+ ] ,
193+ stream : true ,
194+ stream_options : { include_usage : true } ,
195+ reasoning_effort : this . getModel ( ) . info . reasoningEffort ,
196+ } )
197+
198+ yield * this . handleStreamResponse ( stream )
199+ } else {
200+ const requestOptions : OpenAI . Chat . Completions . ChatCompletionCreateParamsNonStreaming = {
201+ model : modelId ,
202+ messages : [
203+ {
204+ role : "developer" ,
205+ content : `Formatting re-enabled\n${ systemPrompt } ` ,
206+ } ,
207+ ...convertToOpenAiMessages ( messages ) ,
208+ ] ,
209+ }
210+
211+ const response = await this . client . chat . completions . create ( requestOptions )
212+
213+ yield {
214+ type : "text" ,
215+ text : response . choices [ 0 ] ?. message . content || "" ,
216+ }
217+ yield this . processUsageMetrics ( response . usage )
218+ }
219+ }
220+
221+ private async * handleStreamResponse ( stream : AsyncIterable < OpenAI . Chat . Completions . ChatCompletionChunk > ) : ApiStream {
222+ for await ( const chunk of stream ) {
223+ const delta = chunk . choices [ 0 ] ?. delta
224+ if ( delta ?. content ) {
225+ yield {
226+ type : "text" ,
227+ text : delta . content ,
228+ }
229+ }
230+
231+ if ( chunk . usage ) {
232+ yield {
233+ type : "usage" ,
234+ inputTokens : chunk . usage . prompt_tokens || 0 ,
235+ outputTokens : chunk . usage . completion_tokens || 0 ,
236+ }
237+ }
238+ }
239+ }
172240}
173241
174242export async function getOpenAiModels ( baseUrl ?: string , apiKey ?: string ) {
0 commit comments