@@ -2556,7 +2556,7 @@ where
25562556		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
25572557
25582558		let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
2559- 		let mut  shutdown_result = None ;
2559+ 		let shutdown_result;
25602560		loop {
25612561			let per_peer_state = self.per_peer_state.read().unwrap();
25622562
@@ -2571,10 +2571,10 @@ where
25712571					if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
25722572						let funding_txo_opt = chan.context.get_funding_txo();
25732573						let their_features = &peer_state.latest_features;
2574- 						let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
2575- 						let (shutdown_msg, mut monitor_update_opt, htlcs) =
2574+ 						let (shutdown_msg, mut monitor_update_opt, htlcs, local_shutdown_result) =
25762575							chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
25772576						failed_htlcs = htlcs;
2577+ 						shutdown_result = local_shutdown_result;
25782578
25792579						// We can send the `shutdown` message before updating the `ChannelMonitor`
25802580						// here as we don't need the monitor update to complete until we send a
@@ -2602,7 +2602,6 @@ where
26022602									});
26032603								}
26042604								self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
2605- 								shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
26062605							}
26072606						}
26082607						break;
@@ -2692,30 +2691,29 @@ where
26922691		self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
26932692	}
26942693
2695- 	fn finish_close_channel(&self, shutdown_res: ShutdownResult) {
2694+ 	fn finish_close_channel(&self, mut  shutdown_res: ShutdownResult) {
26962695		debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
26972696		#[cfg(debug_assertions)]
26982697		for (_, peer) in self.per_peer_state.read().unwrap().iter() {
26992698			debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
27002699		}
27012700
2702- 		let (monitor_update_option, mut failed_htlcs, unbroadcasted_batch_funding_txid) = shutdown_res;
2703- 		log_debug!(self.logger, "Finishing closure of channel with {} HTLCs to fail", failed_htlcs.len());
2704- 		for htlc_source in failed_htlcs.drain(..) {
2701+ 		log_debug!(self.logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
2702+ 		for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
27052703			let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
27062704			let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
27072705			let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
27082706			self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
27092707		}
2710- 		if let Some((_, funding_txo, monitor_update)) = monitor_update_option  {
2708+ 		if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update  {
27112709			// There isn't anything we can do if we get an update failure - we're already
27122710			// force-closing. The monitor update on the required in-memory copy should broadcast
27132711			// the latest local state, which is the best we can do anyway. Thus, it is safe to
27142712			// ignore the result here.
27152713			let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update);
27162714		}
27172715		let mut shutdown_results = Vec::new();
2718- 		if let Some(txid) = unbroadcasted_batch_funding_txid {
2716+ 		if let Some(txid) = shutdown_res. unbroadcasted_batch_funding_txid {
27192717			let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
27202718			let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten();
27212719			let per_peer_state = self.per_peer_state.read().unwrap();
@@ -6240,22 +6238,19 @@ where
62406238	}
62416239
62426240	fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
6243- 		let mut shutdown_result = None;
6244- 		let unbroadcasted_batch_funding_txid;
62456241		let per_peer_state = self.per_peer_state.read().unwrap();
62466242		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
62476243			.ok_or_else(|| {
62486244				debug_assert!(false);
62496245				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
62506246			})?;
6251- 		let (tx, chan_option) = {
6247+ 		let (tx, chan_option, shutdown_result ) = {
62526248			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
62536249			let peer_state = &mut *peer_state_lock;
62546250			match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
62556251				hash_map::Entry::Occupied(mut chan_phase_entry) => {
62566252					if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
6257- 						unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
6258- 						let (closing_signed, tx) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry);
6253+ 						let (closing_signed, tx, shutdown_result) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry);
62596254						if let Some(msg) = closing_signed {
62606255							peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
62616256								node_id: counterparty_node_id.clone(),
@@ -6268,8 +6263,8 @@ where
62686263							// also implies there are no pending HTLCs left on the channel, so we can
62696264							// fully delete it from tracking (the channel monitor is still around to
62706265							// watch for old state broadcasts)!
6271- 							(tx, Some(remove_channel_phase!(self, chan_phase_entry)))
6272- 						} else { (tx, None) }
6266+ 							(tx, Some(remove_channel_phase!(self, chan_phase_entry)), shutdown_result )
6267+ 						} else { (tx, None, shutdown_result ) }
62736268					} else {
62746269						return try_chan_phase_entry!(self, Err(ChannelError::Close(
62756270							"Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
@@ -6291,7 +6286,6 @@ where
62916286				});
62926287			}
62936288			self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
6294- 			shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
62956289		}
62966290		mem::drop(per_peer_state);
62976291		if let Some(shutdown_result) = shutdown_result {
@@ -6988,13 +6982,16 @@ where
69886982						ChannelPhase::Funded(chan) => {
69896983							let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
69906984							match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
6991- 								Ok((msg_opt, tx_opt)) => {
6985+ 								Ok((msg_opt, tx_opt, shutdown_result_opt )) => {
69926986									if let Some(msg) = msg_opt {
69936987										has_update = true;
69946988										pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
69956989											node_id: chan.context.get_counterparty_node_id(), msg,
69966990										});
69976991									}
6992+ 									if let Some(shutdown_result) = shutdown_result_opt {
6993+ 										shutdown_results.push(shutdown_result);
6994+ 									}
69986995									if let Some(tx) = tx_opt {
69996996										// We're done with this channel. We got a closing_signed and sent back
70006997										// a closing_signed with a closing transaction to broadcast.
@@ -7009,7 +7006,6 @@ where
70097006										log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
70107007										self.tx_broadcaster.broadcast_transactions(&[&tx]);
70117008										update_maps_on_chan_removal!(self, &chan.context);
7012- 										shutdown_results.push((None, Vec::new(), unbroadcasted_batch_funding_txid));
70137009										false
70147010									} else { true }
70157011								},
@@ -7050,7 +7046,7 @@ where
70507046			// Channel::force_shutdown tries to make us do) as we may still be in initialization,
70517047			// so we track the update internally and handle it when the user next calls
70527048			// timer_tick_occurred, guaranteeing we're running normally.
7053- 			if let Some((counterparty_node_id, funding_txo, update)) = failure.0 .take() {
7049+ 			if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update .take() {
70547050				assert_eq!(update.updates.len(), 1);
70557051				if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
70567052					assert!(should_broadcast);
@@ -9267,16 +9263,16 @@ where
92679263						log_error!(args.logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
92689264							&channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
92699265					}
9270- 					let (monitor_update,  mut new_failed_htlcs, batch_funding_txid)  = channel.context.force_shutdown(true);
9271- 					if batch_funding_txid .is_some() {
9266+ 					let mut shutdown_result  = channel.context.force_shutdown(true);
9267+ 					if shutdown_result.unbroadcasted_batch_funding_txid .is_some() {
92729268						return Err(DecodeError::InvalidValue);
92739269					}
9274- 					if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
9270+ 					if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result. monitor_update {
92759271						close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
92769272							counterparty_node_id, funding_txo, update
92779273						});
92789274					}
9279- 					failed_htlcs.append(&mut new_failed_htlcs );
9275+ 					failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs );
92809276					channel_closures.push_back((events::Event::ChannelClosed {
92819277						channel_id: channel.context.channel_id(),
92829278						user_channel_id: channel.context.get_user_id(),
0 commit comments