@@ -59,7 +59,7 @@ def stub_raw(chunks, body_blk: nil)
5959    stub . to_return ( status : 200 ,  body : chunks ) 
6060  end 
6161
62-   def  stub_streamed_response ( prompt ,  deltas ,  tool_call : false ) 
62+   def  stub_streamed_response ( prompt ,  deltas ,  tool_call : false ,   skip_body_check :  false ) 
6363    chunks  = 
6464      deltas . each_with_index . map  do  |_ ,  index |
6565        if  index  == ( deltas . length  - 1 ) 
@@ -71,10 +71,13 @@ def stub_streamed_response(prompt, deltas, tool_call: false)
7171
7272    chunks  =  ( chunks . join ( "\n \n " )  << "data: [DONE]" ) . split ( "" ) 
7373
74-     WebMock 
75-       . stub_request ( :post ,  "https://api.openai.com/v1/chat/completions" ) 
76-       . with ( body : request_body ( prompt ,  stream : true ,  tool_call : tool_call ) ) 
77-       . to_return ( status : 200 ,  body : chunks ) 
74+     mock  =  WebMock . stub_request ( :post ,  "https://api.openai.com/v1/chat/completions" ) 
75+ 
76+     if  !skip_body_check 
77+       mock  =  mock . with ( body : request_body ( prompt ,  stream : true ,  tool_call : tool_call ) ) 
78+     end 
79+ 
80+     mock . to_return ( status : 200 ,  body : chunks ) 
7881
7982    yield  if  block_given? 
8083  end 
@@ -401,6 +404,41 @@ def request_body(prompt, stream: false, tool_call: false)
401404    end 
402405  end 
403406
407+   describe  "structured outputs"  do 
408+     it  "falls back to best-effort parsing on broken JSON responses"  do 
409+       prompt  =  compliance . generic_prompt 
410+       deltas  =  [ "```json\n { message: 'hel" ,  "lo' }" ] 
411+ 
412+       model_params  =  { 
413+         response_format : { 
414+           json_schema : { 
415+             schema : { 
416+               properties : { 
417+                 message : { 
418+                   type : "string" , 
419+                 } , 
420+               } , 
421+             } , 
422+           } , 
423+         } , 
424+       } 
425+ 
426+       read_properties  =  [ ] 
427+       open_ai_mock . with_chunk_array_support  do 
428+         # skip body check cause of response format 
429+         open_ai_mock . stub_streamed_response ( prompt ,  deltas ,  skip_body_check : true ) 
430+ 
431+         dialect  =  compliance . dialect ( prompt : prompt ) 
432+ 
433+         endpoint . perform_completion! ( dialect ,  user ,  model_params )  do  |partial |
434+           read_properties  << partial . read_buffered_property ( :message ) 
435+         end 
436+       end 
437+ 
438+       expect ( read_properties . join ) . to  eq ( "hello" ) 
439+     end 
440+   end 
441+ 
404442  describe  "disabled tool use"  do 
405443    it  "can properly disable tool use with :none"  do 
406444      llm  =  DiscourseAi ::Completions ::Llm . proxy ( "custom:#{ model . id }  " ) 
0 commit comments