@@ -101,6 +101,7 @@ test("Test with built-in web search", async () => {
101101 // Use OpenAI's stateful API
102102 const response = await llm . invoke ( "what about a negative one" , {
103103 tools : [ { type : "web_search_preview" } ] ,
104+ // @ts -expect-error - FIXME(hntrl): bad unknown type
104105 previous_response_id : firstResponse . response_metadata . id ,
105106 } ) ;
106107 assertResponse ( response ) ;
@@ -308,6 +309,7 @@ test("Test stateful API", async () => {
308309 expect ( response . response_metadata ) . toHaveProperty ( "id" ) ;
309310
310311 const secondResponse = await llm . invoke ( "what's my name" , {
312+ // @ts -expect-error - FIXME(hntrl): bad unknown type
311313 previous_response_id : response . response_metadata . id ,
312314 } ) ;
313315 expect ( Array . isArray ( secondResponse . content ) ) . toBe ( true ) ;
@@ -437,6 +439,7 @@ test("Test Remote MCP", async () => {
437439 const response2 = await model . invoke (
438440 [ new HumanMessage ( { content : approvals } ) ] ,
439441 {
442+ // @ts -expect-error - FIXME(hntrl): bad unknown type
440443 previous_response_id : response . response_metadata . id ,
441444 }
442445 ) ;
@@ -746,6 +749,108 @@ describe("reasoning summaries", () => {
746749 }
747750 ) ;
748751
752+ // https://github.com/langchain-ai/langchainjs/issues/9072
753+ test . each ( [ false , true ] ) (
754+ "when zdrEnabled=%s, reasoning summaries should be properly paired with function calls" ,
755+ async ( zdrEnabled ) => {
756+ // This test verifies that reasoning summaries are correctly included or excluded
757+ // based on the zdrEnabled flag when the model makes tool calls. When zero data retention
758+ // is disabled, reasoning summaries should be present; when enabled, they should be absent.
759+
760+ // Create a tool for calculating powers
761+ const powerTool = tool (
762+ ( args ) => {
763+ return Math . pow ( args . base , args . exponent ) . toString ( ) ;
764+ } ,
765+ {
766+ name : "calculate_power" ,
767+ description : "Calculate base raised to the power of exponent" ,
768+ schema : z . object ( {
769+ base : z . number ( ) . describe ( "The base number" ) ,
770+ exponent : z . number ( ) . describe ( "The exponent" ) ,
771+ } ) ,
772+ }
773+ ) ;
774+
775+ // Instantiate the model with tools bound
776+ const model = new ChatOpenAI ( {
777+ model : "gpt-5" ,
778+ useResponsesApi : true ,
779+ zdrEnabled,
780+ } ) . bindTools ( [ powerTool ] ) ;
781+
782+ // Create initial messages
783+ const messages : BaseMessage [ ] = [
784+ new SystemMessage (
785+ "You are a helpful assistant that uses tools to answer questions accurately."
786+ ) ,
787+ new HumanMessage ( "What is 3 to the power of 3?" ) ,
788+ ] ;
789+
790+ // First invocation - should trigger tool call
791+ let response : BaseMessage = await model . invoke ( messages , { } ) ;
792+
793+ // Verify response is an AIMessage
794+ expect ( isAIMessage ( response ) ) . toBe ( true ) ;
795+ const aiResponse = response as AIMessage ;
796+
797+ // Verify tool calls were made
798+ expect ( aiResponse . tool_calls ) . toBeDefined ( ) ;
799+ expect ( Array . isArray ( aiResponse . tool_calls ) ) . toBe ( true ) ;
800+ expect ( aiResponse . tool_calls ! . length ) . toBeGreaterThan ( 0 ) ;
801+
802+ // Verify reasoning summary based on zdrEnabled
803+ const reasoning = aiResponse . additional_kwargs . reasoning as
804+ | ChatOpenAIReasoningSummary
805+ | undefined ;
806+ if ( ! zdrEnabled ) {
807+ // When zdrEnabled is false, reasoning summaries should be present
808+ expect ( reasoning ) . toBeDefined ( ) ;
809+ expect ( reasoning ?. type ) . toBe ( "reasoning" ) ;
810+ expect ( reasoning ?. id ) . toBeDefined ( ) ;
811+ expect ( reasoning ?. summary ) . toBeDefined ( ) ;
812+ expect ( Array . isArray ( reasoning ?. summary ) ) . toBe ( true ) ;
813+ if ( reasoning ?. summary && reasoning . summary . length > 0 ) {
814+ for ( const summaryItem of reasoning . summary ) {
815+ expect ( summaryItem . type ) . toBe ( "summary_text" ) ;
816+ expect ( typeof summaryItem . text ) . toBe ( "string" ) ;
817+ }
818+ }
819+ }
820+
821+ // Execute tools and create tool results
822+ const toolResults : ToolMessage [ ] = [ ] ;
823+ for ( const toolCall of aiResponse . tool_calls ! ) {
824+ const { name } = toolCall ;
825+ const tool = [ powerTool ] . find ( ( t ) => t . name === name ) ;
826+ expect ( tool ) . toBeDefined ( ) ;
827+
828+ // Invoke the tool with the tool call - this returns a ToolMessage with the correct tool_call_id
829+ const toolMessage : ToolMessage = await tool ! . invoke ( toolCall ) ;
830+ expect ( toolMessage . tool_call_id ) . toBe ( toolCall . id ) ;
831+ toolResults . push ( toolMessage ) ;
832+ }
833+
834+ // Add response and tool results to messages
835+ messages . push ( aiResponse , ...toolResults ) ;
836+
837+ // Second invocation - should use tool results to provide final answer
838+ // This verifies that reasoning summaries are properly paired with function calls
839+ response = await model . invoke ( messages , { } ) ;
840+ expect ( isAIMessage ( response ) ) . toBe ( true ) ;
841+ expect ( response ) . toBeDefined ( ) ;
842+
843+ // Verify reasoning summaries are properly paired throughout the flow
844+ // The reasoning summary from the first call should be preserved when passed back
845+ if ( ! zdrEnabled && reasoning ) {
846+ // Verify that the reasoning summary can be properly paired with function calls
847+ // by checking that tool calls have proper IDs that can be matched
848+ expect ( aiResponse . tool_calls ! [ 0 ] . id ) . toBeDefined ( ) ;
849+ expect ( toolResults [ 0 ] . tool_call_id ) . toBe ( aiResponse . tool_calls ! [ 0 ] . id ) ;
850+ }
851+ }
852+ ) ;
853+
749854 test ( "it can handle passing back reasoning outputs alongside computer calls" , async ( ) => {
750855 const model = new ChatOpenAI ( {
751856 model : "computer-use-preview" ,
@@ -912,6 +1017,7 @@ describe("promptCacheKey", () => {
9121017 const response2 = await invoke ( ) ;
9131018 expect ( response2 ) . toBeDefined ( ) ;
9141019 expect (
1020+ // @ts -expect-error - FIXME(hntrl): bad unknown type=
9151021 response2 . response_metadata . usage . prompt_tokens_details . cached_tokens
9161022 ) . toBeGreaterThan ( 0 ) ;
9171023 } ) ;
@@ -930,12 +1036,13 @@ describe("promptCacheKey", () => {
9301036 const response2 = await invoke ( ) ;
9311037 expect ( response2 ) . toBeDefined ( ) ;
9321038 expect (
1039+ // @ts -expect-error - FIXME(hntrl): bad unknown type
9331040 response2 . response_metadata . usage . prompt_tokens_details . cached_tokens
9341041 ) . toBeGreaterThan ( 0 ) ;
9351042 } ) ;
9361043} ) ;
9371044
938- it . only ( "won't modify structured output content if outputVersion is set" , async ( ) => {
1045+ it ( "won't modify structured output content if outputVersion is set" , async ( ) => {
9391046 const schema = z . object ( { name : z . string ( ) } ) ;
9401047 const model = new ChatOpenAI ( {
9411048 model : "gpt-5" ,
0 commit comments