@@ -2250,7 +2250,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
22502250 // Note: updateApiReqMsg() is now called from within drainStreamInBackgroundToFindAllUsage
22512251 // to ensure usage data is captured even when the stream is interrupted. The background task
22522252 // uses local variables to accumulate usage data before atomically updating the shared state.
2253- await this . persistGpt5Metadata ( )
2253+ await this . persistGpt5Metadata ( reasoningMessage )
22542254 await this . saveClineMessages ( )
22552255 await this . providerRef . deref ( ) ?. postStateToWebview ( )
22562256
@@ -2836,11 +2836,10 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
28362836 }
28372837
28382838 /**
2839- * Persist GPT-5 per-turn metadata (previous_response_id, instructions)
2839+ * Persist GPT-5 per-turn metadata (previous_response_id, instructions, reasoning_summary )
28402840 * onto the last complete assistant say("text") message.
2841- * Note: reasoning_summary is no longer persisted as reasoning is now sent as separate delta blocks.
28422841 */
2843- private async persistGpt5Metadata ( ) : Promise < void > {
2842+ private async persistGpt5Metadata ( reasoningMessage ?: string ) : Promise < void > {
28442843 try {
28452844 const modelId = this . api . getModel ( ) . id
28462845 if ( ! modelId || ! modelId . startsWith ( "gpt-5" ) ) return
@@ -2861,7 +2860,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
28612860 ...( msg . metadata . gpt5 ?? { } ) ,
28622861 previous_response_id : lastResponseId ,
28632862 instructions : this . lastUsedInstructions ,
2864- // reasoning_summary is no longer stored as reasoning is sent as separate blocks
2863+ reasoning_summary : ( reasoningMessage ?? "" ) . trim ( ) || undefined ,
28652864 }
28662865 msg . metadata . gpt5 = gpt5Metadata
28672866 }
0 commit comments