@@ -392,4 +392,136 @@ describe("OpenAiHandler", () => {
392392 expect ( lastCall [ 0 ] ) . not . toHaveProperty ( "stream_options" )
393393 } )
394394 } )
395+
396+ describe ( "Grok 3 Mini models with reasoning" , ( ) => {
397+ const grokMiniOptions = {
398+ ...mockOptions ,
399+ openAiBaseUrl : "https://api.x.ai/v1" ,
400+ openAiModelId : "grok-3-mini-beta" ,
401+ openAiCustomModelInfo : {
402+ reasoningEffort : "low" as const ,
403+ thinking : true ,
404+ contextWindow : 128_000 ,
405+ supportsPromptCache : false ,
406+ maxTokens : - 1 ,
407+ supportsImages : true ,
408+ inputPrice : 0 ,
409+ outputPrice : 0 ,
410+ } ,
411+ }
412+ it ( "should include reasoning_effort parameter for Grok mini models" , async ( ) => {
413+ const grokHandler = new OpenAiHandler ( grokMiniOptions )
414+ const systemPrompt = "You are a helpful assistant."
415+ const messages : Anthropic . Messages . MessageParam [ ] = [
416+ {
417+ role : "user" ,
418+ content : "Hello!" ,
419+ } ,
420+ ]
421+
422+ const stream = grokHandler . createMessage ( systemPrompt , messages )
423+ await stream . next ( )
424+
425+ expect ( mockCreate ) . toHaveBeenCalledWith (
426+ expect . objectContaining ( {
427+ model : grokMiniOptions . openAiModelId ,
428+ stream : true ,
429+ reasoning_effort : "low" ,
430+ } ) ,
431+ { } ,
432+ )
433+ } )
434+
435+ it ( "should use the specified reasoningEffort value" , async ( ) => {
436+ const grokHandler = new OpenAiHandler ( {
437+ ...grokMiniOptions ,
438+ openAiCustomModelInfo : {
439+ ...grokMiniOptions . openAiCustomModelInfo ,
440+ reasoningEffort : "high" ,
441+ } ,
442+ } )
443+ const systemPrompt = "You are a helpful assistant."
444+ const messages : Anthropic . Messages . MessageParam [ ] = [
445+ {
446+ role : "user" ,
447+ content : "Hello!" ,
448+ } ,
449+ ]
450+
451+ const stream = grokHandler . createMessage ( systemPrompt , messages )
452+ await stream . next ( )
453+
454+ expect ( mockCreate ) . toHaveBeenCalledWith (
455+ expect . objectContaining ( {
456+ model : grokMiniOptions . openAiModelId ,
457+ stream : true ,
458+ reasoning_effort : "high" ,
459+ } ) ,
460+ { } ,
461+ )
462+ } )
463+
464+ it ( "should process reasoning_content from response" , async ( ) => {
465+ // Update the mock to include reasoning_content in the response
466+ mockCreate . mockImplementationOnce ( ( ) => ( {
467+ [ Symbol . asyncIterator ] : async function * ( ) {
468+ yield {
469+ choices : [
470+ {
471+ delta : { content : "Test response" } ,
472+ index : 0 ,
473+ } ,
474+ ] ,
475+ usage : null ,
476+ }
477+ yield {
478+ choices : [
479+ {
480+ delta : { reasoning_content : "This is reasoning content" } ,
481+ index : 0 ,
482+ } ,
483+ ] ,
484+ usage : null ,
485+ }
486+ yield {
487+ choices : [
488+ {
489+ delta : { } ,
490+ index : 0 ,
491+ } ,
492+ ] ,
493+ usage : {
494+ prompt_tokens : 10 ,
495+ completion_tokens : 5 ,
496+ total_tokens : 15 ,
497+ } ,
498+ }
499+ } ,
500+ } ) )
501+
502+ const grokHandler = new OpenAiHandler ( grokMiniOptions )
503+ const systemPrompt = "You are a helpful assistant."
504+ const messages : Anthropic . Messages . MessageParam [ ] = [
505+ {
506+ role : "user" ,
507+ content : "Hello!" ,
508+ } ,
509+ ]
510+
511+ const stream = grokHandler . createMessage ( systemPrompt , messages )
512+ const chunks : any [ ] = [ ]
513+ for await ( const chunk of stream ) {
514+ chunks . push ( chunk )
515+ }
516+
517+ const textChunks = chunks . filter ( ( chunk ) => chunk . type === "text" )
518+ const reasoningChunks = chunks . filter ( ( chunk ) => chunk . type === "reasoning" )
519+
520+ expect ( textChunks ) . toHaveLength ( 1 )
521+ expect ( textChunks [ 0 ] . text ) . toBe ( "Test response" )
522+
523+ expect ( reasoningChunks ) . toHaveLength ( 1 )
524+ expect ( reasoningChunks [ 0 ] . text ) . toBe ( "This is reasoning content" )
525+ } )
526+ } )
395527} )
0 commit comments