@@ -86,7 +86,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
8686 const deepseekReasoner = modelId . includes ( "deepseek-reasoner" ) || enabledR1Format
8787 const ark = modelUrl . includes ( ".volces.com" )
8888
89- if ( modelId . startsWith ( "o3-mini ") ) {
89+ if ( modelId . includes ( "o1" ) || modelId . includes ( "o3" ) || modelId . includes ( "o4 ") ) {
9090 yield * this . handleO3FamilyMessage ( modelId , systemPrompt , messages )
9191 return
9292 }
@@ -306,7 +306,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
306306 stream : true ,
307307 ...( isGrokXAI ? { } : { stream_options : { include_usage : true } } ) ,
308308 reasoning_effort : modelInfo . reasoningEffort ,
309- temperature : this . options . modelTemperature ?? 0 ,
309+ temperature : undefined ,
310310 }
311311
312312 // O3 family models do not support the deprecated max_tokens parameter
@@ -331,7 +331,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
331331 ...convertToOpenAiMessages ( messages ) ,
332332 ] ,
333333 reasoning_effort : modelInfo . reasoningEffort ,
334- temperature : this . options . modelTemperature ?? 0 ,
334+ temperature : undefined ,
335335 }
336336
337337 // O3 family models do not support the deprecated max_tokens parameter
0 commit comments