File tree Expand file tree Collapse file tree 4 files changed +22
-20
lines changed
Expand file tree Collapse file tree 4 files changed +22
-20
lines changed File renamed without changes.
Original file line number Diff line number Diff line change @@ -64,6 +64,8 @@ export class OpenAiNativeHandler implements ApiHandler {
6464
6565 break
6666 }
67+ case "o4-mini" :
68+ case "o3" :
6769 case "o3-mini" : {
6870 const stream = await this . client . chat . completions . create ( {
6971 model : model . id ,
Original file line number Diff line number Diff line change @@ -35,7 +35,7 @@ export class OpenAiHandler implements ApiHandler {
3535 const modelId = this . options . openAiModelId ?? ""
3636 const isDeepseekReasoner = modelId . includes ( "deepseek-reasoner" )
3737 const isR1FormatRequired = this . options . openAiModelInfo ?. isR1FormatRequired ?? false
38- const isO3Mini = modelId . includes ( "o3-mini " )
38+ const isReasoningModelFamily = modelId . includes ( "o3" ) || modelId . includes ( "o4 ")
3939
4040 let openAiMessages : OpenAI . Chat . ChatCompletionMessageParam [ ] = [
4141 { role : "system" , content : systemPrompt } ,
@@ -55,7 +55,7 @@ export class OpenAiHandler implements ApiHandler {
5555 openAiMessages = convertToR1Format ( [ { role : "user" , content : systemPrompt } , ...messages ] )
5656 }
5757
58- if ( isO3Mini ) {
58+ if ( isReasoningModelFamily ) {
5959 openAiMessages = [ { role : "developer" , content : systemPrompt } , ...convertToOpenAiMessages ( messages ) ]
6060 temperature = undefined // does not support temperature
6161 reasoningEffort = ( this . options . o3MiniReasoningEffort as ChatCompletionReasoningEffort ) || "medium"
Original file line number Diff line number Diff line change @@ -606,6 +606,24 @@ export const geminiModels = {
606606export type OpenAiNativeModelId = keyof typeof openAiNativeModels
607607export const openAiNativeDefaultModelId : OpenAiNativeModelId = "gpt-4.1"
608608export const openAiNativeModels = {
609+ o3 : {
610+ maxTokens : 100_000 ,
611+ contextWindow : 200_000 ,
612+ supportsImages : true ,
613+ supportsPromptCache : true ,
614+ inputPrice : 10.0 ,
615+ outputPrice : 40.0 ,
616+ cacheReadsPrice : 2.5 ,
617+ } ,
618+ "o4-mini" : {
619+ maxTokens : 100_000 ,
620+ contextWindow : 200_000 ,
621+ supportsImages : true ,
622+ supportsPromptCache : true ,
623+ inputPrice : 1.1 ,
624+ outputPrice : 4.4 ,
625+ cacheReadsPrice : 0.275 ,
626+ } ,
609627 "gpt-4.1" : {
610628 maxTokens : 32_768 ,
611629 contextWindow : 1_047_576 ,
@@ -633,24 +651,6 @@ export const openAiNativeModels = {
633651 outputPrice : 0.4 ,
634652 cacheReadsPrice : 0.025 ,
635653 } ,
636- o3 : {
637- maxTokens : 100_000 ,
638- contextWindow : 200_000 ,
639- supportsImages : true ,
640- supportsPromptCache : true ,
641- inputPrice : 10.0 ,
642- outputPrice : 40.0 ,
643- cacheReadsPrice : 2.5 ,
644- } ,
645- "o4-mini" : {
646- maxTokens : 100_000 ,
647- contextWindow : 200_000 ,
648- supportsImages : true ,
649- supportsPromptCache : true ,
650- inputPrice : 1.1 ,
651- outputPrice : 4.4 ,
652- cacheReadsPrice : 0.275 ,
653- } ,
654654 "o3-mini" : {
655655 maxTokens : 100_000 ,
656656 contextWindow : 200_000 ,
You can’t perform that action at this time.
0 commit comments