@@ -155,6 +155,39 @@ describe("OpenAiHandler", () => {
155155 expect ( textChunks ) . toHaveLength ( 1 )
156156 expect ( textChunks [ 0 ] . text ) . toBe ( "Test response" )
157157 } )
158+ it ( "should include reasoning_effort when reasoning effort is enabled" , async ( ) => {
159+ const reasoningOptions : ApiHandlerOptions = {
160+ ...mockOptions ,
161+ enableReasoningEffort : true ,
162+ openAiCustomModelInfo : { contextWindow : 128_000 , supportsPromptCache : false , reasoningEffort : "high" } ,
163+ }
164+ const reasoningHandler = new OpenAiHandler ( reasoningOptions )
165+ const stream = reasoningHandler . createMessage ( systemPrompt , messages )
166+ // Consume the stream to trigger the API call
167+ for await ( const _chunk of stream ) {
168+ }
169+ // Assert the mockCreate was called with reasoning_effort
170+ expect ( mockCreate ) . toHaveBeenCalled ( )
171+ const callArgs = mockCreate . mock . calls [ 0 ] [ 0 ]
172+ expect ( callArgs . reasoning_effort ) . toBe ( "high" )
173+ } )
174+
175+ it ( "should not include reasoning_effort when reasoning effort is disabled" , async ( ) => {
176+ const noReasoningOptions : ApiHandlerOptions = {
177+ ...mockOptions ,
178+ enableReasoningEffort : false ,
179+ openAiCustomModelInfo : { contextWindow : 128_000 , supportsPromptCache : false } ,
180+ }
181+ const noReasoningHandler = new OpenAiHandler ( noReasoningOptions )
182+ const stream = noReasoningHandler . createMessage ( systemPrompt , messages )
183+ // Consume the stream to trigger the API call
184+ for await ( const _chunk of stream ) {
185+ }
186+ // Assert the mockCreate was called without reasoning_effort
187+ expect ( mockCreate ) . toHaveBeenCalled ( )
188+ const callArgs = mockCreate . mock . calls [ 0 ] [ 0 ]
189+ expect ( callArgs . reasoning_effort ) . toBeUndefined ( )
190+ } )
158191 } )
159192
160193 describe ( "error handling" , ( ) => {
0 commit comments