@@ -457,7 +457,7 @@ impl MsgHandleErrInternal {
457457	#[inline]
458458	fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
459459		let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
460- 		let action = if let (Some(_), ..) = &shutdown_res  {
460+ 		let action = if shutdown_res.monitor_update.is_some()  {
461461			// We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we
462462			// should disconnect our peer such that we force them to broadcast their latest
463463			// commitment upon reconnecting.
@@ -2602,7 +2602,7 @@ where
26022602		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
26032603
26042604		let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
2605- 		let mut  shutdown_result = None ;
2605+ 		let shutdown_result;
26062606		loop {
26072607			let per_peer_state = self.per_peer_state.read().unwrap();
26082608
@@ -2617,10 +2617,11 @@ where
26172617					if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
26182618						let funding_txo_opt = chan.context.get_funding_txo();
26192619						let their_features = &peer_state.latest_features;
2620- 						let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
2621- 						let (shutdown_msg, mut monitor_update_opt, htlcs) =
2620+ 						let (shutdown_msg, mut monitor_update_opt, htlcs, local_shutdown_result) =
26222621							chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
26232622						failed_htlcs = htlcs;
2623+ 						shutdown_result = local_shutdown_result;
2624+ 						debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
26242625
26252626						// We can send the `shutdown` message before updating the `ChannelMonitor`
26262627						// here as we don't need the monitor update to complete until we send a
@@ -2648,7 +2649,6 @@ where
26482649									});
26492650								}
26502651								self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
2651- 								shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
26522652							}
26532653						}
26542654						break;
@@ -2739,30 +2739,29 @@ where
27392739		self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
27402740	}
27412741
2742- 	fn finish_close_channel(&self, shutdown_res: ShutdownResult) {
2742+ 	fn finish_close_channel(&self, mut  shutdown_res: ShutdownResult) {
27432743		debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
27442744		#[cfg(debug_assertions)]
27452745		for (_, peer) in self.per_peer_state.read().unwrap().iter() {
27462746			debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
27472747		}
27482748
2749- 		let (monitor_update_option, mut failed_htlcs, unbroadcasted_batch_funding_txid) = shutdown_res;
2750- 		log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
2751- 		for htlc_source in failed_htlcs.drain(..) {
2749+ 		log_debug!(self.logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
2750+ 		for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
27522751			let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
27532752			let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
27542753			let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
27552754			self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
27562755		}
2757- 		if let Some((_, funding_txo, monitor_update)) = monitor_update_option  {
2756+ 		if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update  {
27582757			// There isn't anything we can do if we get an update failure - we're already
27592758			// force-closing. The monitor update on the required in-memory copy should broadcast
27602759			// the latest local state, which is the best we can do anyway. Thus, it is safe to
27612760			// ignore the result here.
27622761			let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update);
27632762		}
27642763		let mut shutdown_results = Vec::new();
2765- 		if let Some(txid) = unbroadcasted_batch_funding_txid {
2764+ 		if let Some(txid) = shutdown_res. unbroadcasted_batch_funding_txid {
27662765			let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
27672766			let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten();
27682767			let per_peer_state = self.per_peer_state.read().unwrap();
@@ -3907,7 +3906,7 @@ where
39073906	/// Return values are identical to [`Self::funding_transaction_generated`], respective to
39083907	/// each individual channel and transaction output.
39093908	///
3910- 	/// Do NOT broadcast the funding transaction yourself. This batch funding transcaction 
3909+ 	/// Do NOT broadcast the funding transaction yourself. This batch funding transaction 
39113910	/// will only be broadcast when we have safely received and persisted the counterparty's
39123911	/// signature for each channel.
39133912	///
@@ -3961,7 +3960,7 @@ where
39613960				btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())),
39623961			}
39633962		});
3964- 		for &(temporary_channel_id, counterparty_node_id) in temporary_channels.iter()  {
3963+ 		for &(temporary_channel_id, counterparty_node_id) in temporary_channels {
39653964			result = result.and_then(|_| self.funding_transaction_generated_intern(
39663965				temporary_channel_id,
39673966				counterparty_node_id,
@@ -6453,22 +6452,20 @@ where
64536452	}
64546453
64556454	fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
6456- 		let mut shutdown_result = None;
6457- 		let unbroadcasted_batch_funding_txid;
64586455		let per_peer_state = self.per_peer_state.read().unwrap();
64596456		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
64606457			.ok_or_else(|| {
64616458				debug_assert!(false);
64626459				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
64636460			})?;
6464- 		let (tx, chan_option) = {
6461+ 		let (tx, chan_option, shutdown_result ) = {
64656462			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
64666463			let peer_state = &mut *peer_state_lock;
64676464			match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
64686465				hash_map::Entry::Occupied(mut chan_phase_entry) => {
64696466					if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
6470- 						unbroadcasted_batch_funding_txid =  chan.context.unbroadcasted_batch_funding_txid( );
6471- 						let (closing_signed, tx) = try_chan_phase_entry!(self , chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry );
6467+ 						let (closing_signed, tx, shutdown_result) = try_chan_phase_entry!(self,  chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry );
6468+ 						debug_assert_eq!(shutdown_result.is_some() , chan.is_shutdown() );
64726469						if let Some(msg) = closing_signed {
64736470							peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
64746471								node_id: counterparty_node_id.clone(),
@@ -6481,8 +6478,8 @@ where
64816478							// also implies there are no pending HTLCs left on the channel, so we can
64826479							// fully delete it from tracking (the channel monitor is still around to
64836480							// watch for old state broadcasts)!
6484- 							(tx, Some(remove_channel_phase!(self, chan_phase_entry)))
6485- 						} else { (tx, None) }
6481+ 							(tx, Some(remove_channel_phase!(self, chan_phase_entry)), shutdown_result )
6482+ 						} else { (tx, None, shutdown_result ) }
64866483					} else {
64876484						return try_chan_phase_entry!(self, Err(ChannelError::Close(
64886485							"Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
@@ -6504,7 +6501,6 @@ where
65046501				});
65056502			}
65066503			self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
6507- 			shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
65086504		}
65096505		mem::drop(per_peer_state);
65106506		if let Some(shutdown_result) = shutdown_result {
@@ -7237,15 +7233,18 @@ where
72377233				peer_state.channel_by_id.retain(|channel_id, phase| {
72387234					match phase {
72397235						ChannelPhase::Funded(chan) => {
7240- 							let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
72417236							match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
7242- 								Ok((msg_opt, tx_opt)) => {
7237+ 								Ok((msg_opt, tx_opt, shutdown_result_opt )) => {
72437238									if let Some(msg) = msg_opt {
72447239										has_update = true;
72457240										pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
72467241											node_id: chan.context.get_counterparty_node_id(), msg,
72477242										});
72487243									}
7244+ 									debug_assert_eq!(shutdown_result_opt.is_some(), chan.is_shutdown());
7245+ 									if let Some(shutdown_result) = shutdown_result_opt {
7246+ 										shutdown_results.push(shutdown_result);
7247+ 									}
72497248									if let Some(tx) = tx_opt {
72507249										// We're done with this channel. We got a closing_signed and sent back
72517250										// a closing_signed with a closing transaction to broadcast.
@@ -7260,7 +7259,6 @@ where
72607259										log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
72617260										self.tx_broadcaster.broadcast_transactions(&[&tx]);
72627261										update_maps_on_chan_removal!(self, &chan.context);
7263- 										shutdown_results.push((None, Vec::new(), unbroadcasted_batch_funding_txid));
72647262										false
72657263									} else { true }
72667264								},
@@ -7301,7 +7299,7 @@ where
73017299			// Channel::force_shutdown tries to make us do) as we may still be in initialization,
73027300			// so we track the update internally and handle it when the user next calls
73037301			// timer_tick_occurred, guaranteeing we're running normally.
7304- 			if let Some((counterparty_node_id, funding_txo, update)) = failure.0 .take() {
7302+ 			if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update .take() {
73057303				assert_eq!(update.updates.len(), 1);
73067304				if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
73077305					assert!(should_broadcast);
@@ -9966,16 +9964,16 @@ where
99669964						log_error!(args.logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
99679965							&channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
99689966					}
9969- 					let (monitor_update,  mut new_failed_htlcs, batch_funding_txid)  = channel.context.force_shutdown(true);
9970- 					if batch_funding_txid .is_some() {
9967+ 					let mut shutdown_result  = channel.context.force_shutdown(true);
9968+ 					if shutdown_result.unbroadcasted_batch_funding_txid .is_some() {
99719969						return Err(DecodeError::InvalidValue);
99729970					}
9973- 					if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
9971+ 					if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result. monitor_update {
99749972						close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
99759973							counterparty_node_id, funding_txo, update
99769974						});
99779975					}
9978- 					failed_htlcs.append(&mut new_failed_htlcs );
9976+ 					failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs );
99799977					channel_closures.push_back((events::Event::ChannelClosed {
99809978						channel_id: channel.context.channel_id(),
99819979						user_channel_id: channel.context.get_user_id(),
0 commit comments