@@ -612,13 +612,20 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
612612 const url = `${ baseUrl } /v1/responses`
613613
614614 try {
615+ // Adjust headers based on streaming mode
616+ const headers : Record < string , string > = {
617+ "Content-Type" : "application/json" ,
618+ Authorization : `Bearer ${ apiKey } ` ,
619+ }
620+
621+ // Only add Accept: text/event-stream for streaming requests
622+ if ( requestBody . stream !== false ) {
623+ headers [ "Accept" ] = "text/event-stream"
624+ }
625+
615626 const response = await fetch ( url , {
616627 method : "POST" ,
617- headers : {
618- "Content-Type" : "application/json" ,
619- Authorization : `Bearer ${ apiKey } ` ,
620- Accept : "text/event-stream" ,
621- } ,
628+ headers,
622629 body : JSON . stringify ( requestBody ) ,
623630 } )
624631
@@ -663,13 +670,19 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
663670 this . resolveResponseId ( undefined )
664671
665672 // Retry the request without the previous_response_id
673+ const retryHeaders : Record < string , string > = {
674+ "Content-Type" : "application/json" ,
675+ Authorization : `Bearer ${ apiKey } ` ,
676+ }
677+
678+ // Only add Accept: text/event-stream for streaming requests
679+ if ( retryRequestBody . stream !== false ) {
680+ retryHeaders [ "Accept" ] = "text/event-stream"
681+ }
682+
666683 const retryResponse = await fetch ( url , {
667684 method : "POST" ,
668- headers : {
669- "Content-Type" : "application/json" ,
670- Authorization : `Bearer ${ apiKey } ` ,
671- Accept : "text/event-stream" ,
672- } ,
685+ headers : retryHeaders ,
673686 body : JSON . stringify ( retryRequestBody ) ,
674687 } )
675688
@@ -678,12 +691,44 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
678691 throw new Error ( `GPT-5 API retry failed (${ retryResponse . status } )` )
679692 }
680693
681- if ( ! retryResponse . body ) {
682- throw new Error ( "GPT-5 Responses API error: No response body from retry request" )
683- }
694+ // Handle the successful retry response based on streaming mode
695+ if ( retryRequestBody . stream === false ) {
696+ // Handle non-streaming retry response
697+ const retryData = await retryResponse . json ( )
698+ const retryResponseData = retryData . response || retryData
699+
700+ // Extract text content from the response
701+ if ( retryResponseData . output && Array . isArray ( retryResponseData . output ) ) {
702+ for ( const output of retryResponseData . output ) {
703+ if ( output . type === "text" && output . content ) {
704+ for ( const content of output . content ) {
705+ if ( content . type === "text" && content . text ) {
706+ yield { type : "text" , text : content . text }
707+ }
708+ }
709+ }
710+ }
711+ }
712+
713+ // Yield usage information if available
714+ if ( retryResponseData . usage ) {
715+ const usageData = this . normalizeGpt5Usage ( retryResponseData . usage , model )
716+ if ( usageData ) {
717+ yield usageData
718+ }
719+ }
684720
685- // Handle the successful retry response
686- yield * this . handleGpt5StreamResponse ( retryResponse . body , model )
721+ // Store response ID for conversation continuity
722+ if ( retryResponseData . id ) {
723+ this . resolveResponseId ( retryResponseData . id )
724+ }
725+ } else {
726+ // Handle streaming retry response
727+ if ( ! retryResponse . body ) {
728+ throw new Error ( "GPT-5 Responses API error: No response body from retry request" )
729+ }
730+ yield * this . handleGpt5StreamResponse ( retryResponse . body , model )
731+ }
687732 return
688733 }
689734
@@ -722,12 +767,46 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
722767 throw new Error ( errorMessage )
723768 }
724769
725- if ( ! response . body ) {
726- throw new Error ( "GPT-5 Responses API error: No response body" )
727- }
770+ // Check if this is a non-streaming response
771+ if ( requestBody . stream === false ) {
772+ // Handle non-streaming response
773+ const data = await response . json ( )
774+
775+ // Handle both response formats (wrapped and unwrapped)
776+ const responseData = data . response || data
777+
778+ // Extract text content from the response
779+ if ( responseData . output && Array . isArray ( responseData . output ) ) {
780+ for ( const output of responseData . output ) {
781+ if ( output . type === "text" && output . content ) {
782+ for ( const content of output . content ) {
783+ if ( content . type === "text" && content . text ) {
784+ yield { type : "text" , text : content . text }
785+ }
786+ }
787+ }
788+ }
789+ }
790+
791+ // Yield usage information if available
792+ if ( responseData . usage ) {
793+ const usageData = this . normalizeGpt5Usage ( responseData . usage , model )
794+ if ( usageData ) {
795+ yield usageData
796+ }
797+ }
728798
729- // Handle streaming response
730- yield * this . handleGpt5StreamResponse ( response . body , model )
799+ // Store response ID for conversation continuity
800+ if ( responseData . id ) {
801+ this . resolveResponseId ( responseData . id )
802+ }
803+ } else {
804+ // Handle streaming response
805+ if ( ! response . body ) {
806+ throw new Error ( "GPT-5 Responses API error: No response body" )
807+ }
808+ yield * this . handleGpt5StreamResponse ( response . body , model )
809+ }
731810 } catch ( error ) {
732811 if ( error instanceof Error ) {
733812 // Re-throw with the original error message if it's already formatted
0 commit comments