@@ -2559,7 +2559,7 @@ where
25592559		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
25602560
25612561		let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
2562- 		let mut  shutdown_result = None ;
2562+ 		let shutdown_result;
25632563		loop {
25642564			let per_peer_state = self.per_peer_state.read().unwrap();
25652565
@@ -2574,10 +2574,11 @@ where
25742574					if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
25752575						let funding_txo_opt = chan.context.get_funding_txo();
25762576						let their_features = &peer_state.latest_features;
2577- 						let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
2578- 						let (shutdown_msg, mut monitor_update_opt, htlcs) =
2577+ 						let (shutdown_msg, mut monitor_update_opt, htlcs, local_shutdown_result) =
25792578							chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
25802579						failed_htlcs = htlcs;
2580+ 						shutdown_result = local_shutdown_result;
2581+ 						debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
25812582
25822583						// We can send the `shutdown` message before updating the `ChannelMonitor`
25832584						// here as we don't need the monitor update to complete until we send a
@@ -2605,7 +2606,6 @@ where
26052606									});
26062607								}
26072608								self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
2608- 								shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
26092609							}
26102610						}
26112611						break;
@@ -2697,30 +2697,29 @@ where
26972697		self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
26982698	}
26992699
2700- 	fn finish_close_channel(&self, shutdown_res: ShutdownResult) {
2700+ 	fn finish_close_channel(&self, mut  shutdown_res: ShutdownResult) {
27012701		debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
27022702		#[cfg(debug_assertions)]
27032703		for (_, peer) in self.per_peer_state.read().unwrap().iter() {
27042704			debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
27052705		}
27062706
2707- 		let (monitor_update_option, mut failed_htlcs, unbroadcasted_batch_funding_txid) = shutdown_res;
2708- 		log_debug!(self.logger, "Finishing closure of channel with {} HTLCs to fail", failed_htlcs.len());
2709- 		for htlc_source in failed_htlcs.drain(..) {
2707+ 		log_debug!(self.logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
2708+ 		for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
27102709			let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
27112710			let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
27122711			let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
27132712			self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
27142713		}
2715- 		if let Some((_, funding_txo, monitor_update)) = monitor_update_option  {
2714+ 		if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update  {
27162715			// There isn't anything we can do if we get an update failure - we're already
27172716			// force-closing. The monitor update on the required in-memory copy should broadcast
27182717			// the latest local state, which is the best we can do anyway. Thus, it is safe to
27192718			// ignore the result here.
27202719			let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update);
27212720		}
27222721		let mut shutdown_results = Vec::new();
2723- 		if let Some(txid) = unbroadcasted_batch_funding_txid {
2722+ 		if let Some(txid) = shutdown_res. unbroadcasted_batch_funding_txid {
27242723			let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
27252724			let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten();
27262725			let per_peer_state = self.per_peer_state.read().unwrap();
@@ -6247,22 +6246,20 @@ where
62476246	}
62486247
62496248	fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
6250- 		let mut shutdown_result = None;
6251- 		let unbroadcasted_batch_funding_txid;
62526249		let per_peer_state = self.per_peer_state.read().unwrap();
62536250		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
62546251			.ok_or_else(|| {
62556252				debug_assert!(false);
62566253				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
62576254			})?;
6258- 		let (tx, chan_option) = {
6255+ 		let (tx, chan_option, shutdown_result ) = {
62596256			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
62606257			let peer_state = &mut *peer_state_lock;
62616258			match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
62626259				hash_map::Entry::Occupied(mut chan_phase_entry) => {
62636260					if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
6264- 						unbroadcasted_batch_funding_txid =  chan.context.unbroadcasted_batch_funding_txid( );
6265- 						let (closing_signed, tx) = try_chan_phase_entry!(self , chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry );
6261+ 						let (closing_signed, tx, shutdown_result) = try_chan_phase_entry!(self,  chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry );
6262+ 						debug_assert_eq!(shutdown_result.is_some() , chan.is_shutdown() );
62666263						if let Some(msg) = closing_signed {
62676264							peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
62686265								node_id: counterparty_node_id.clone(),
@@ -6275,8 +6272,8 @@ where
62756272							// also implies there are no pending HTLCs left on the channel, so we can
62766273							// fully delete it from tracking (the channel monitor is still around to
62776274							// watch for old state broadcasts)!
6278- 							(tx, Some(remove_channel_phase!(self, chan_phase_entry)))
6279- 						} else { (tx, None) }
6275+ 							(tx, Some(remove_channel_phase!(self, chan_phase_entry)), shutdown_result )
6276+ 						} else { (tx, None, shutdown_result ) }
62806277					} else {
62816278						return try_chan_phase_entry!(self, Err(ChannelError::Close(
62826279							"Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
@@ -6298,7 +6295,6 @@ where
62986295				});
62996296			}
63006297			self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
6301- 			shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
63026298		}
63036299		mem::drop(per_peer_state);
63046300		if let Some(shutdown_result) = shutdown_result {
@@ -6993,15 +6989,18 @@ where
69936989				peer_state.channel_by_id.retain(|channel_id, phase| {
69946990					match phase {
69956991						ChannelPhase::Funded(chan) => {
6996- 							let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
69976992							match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
6998- 								Ok((msg_opt, tx_opt)) => {
6993+ 								Ok((msg_opt, tx_opt, shutdown_result_opt )) => {
69996994									if let Some(msg) = msg_opt {
70006995										has_update = true;
70016996										pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
70026997											node_id: chan.context.get_counterparty_node_id(), msg,
70036998										});
70046999									}
7000+ 									debug_assert_eq!(shutdown_result_opt.is_some(), chan.is_shutdown());
7001+ 									if let Some(shutdown_result) = shutdown_result_opt {
7002+ 										shutdown_results.push(shutdown_result);
7003+ 									}
70057004									if let Some(tx) = tx_opt {
70067005										// We're done with this channel. We got a closing_signed and sent back
70077006										// a closing_signed with a closing transaction to broadcast.
@@ -7016,7 +7015,6 @@ where
70167015										log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
70177016										self.tx_broadcaster.broadcast_transactions(&[&tx]);
70187017										update_maps_on_chan_removal!(self, &chan.context);
7019- 										shutdown_results.push((None, Vec::new(), unbroadcasted_batch_funding_txid));
70207018										false
70217019									} else { true }
70227020								},
@@ -7057,7 +7055,7 @@ where
70577055			// Channel::force_shutdown tries to make us do) as we may still be in initialization,
70587056			// so we track the update internally and handle it when the user next calls
70597057			// timer_tick_occurred, guaranteeing we're running normally.
7060- 			if let Some((counterparty_node_id, funding_txo, update)) = failure.0 .take() {
7058+ 			if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update .take() {
70617059				assert_eq!(update.updates.len(), 1);
70627060				if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
70637061					assert!(should_broadcast);
@@ -9274,16 +9272,16 @@ where
92749272						log_error!(args.logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
92759273							&channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
92769274					}
9277- 					let (monitor_update,  mut new_failed_htlcs, batch_funding_txid)  = channel.context.force_shutdown(true);
9278- 					if batch_funding_txid .is_some() {
9275+ 					let mut shutdown_result  = channel.context.force_shutdown(true);
9276+ 					if shutdown_result.unbroadcasted_batch_funding_txid .is_some() {
92799277						return Err(DecodeError::InvalidValue);
92809278					}
9281- 					if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
9279+ 					if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result. monitor_update {
92829280						close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
92839281							counterparty_node_id, funding_txo, update
92849282						});
92859283					}
9286- 					failed_htlcs.append(&mut new_failed_htlcs );
9284+ 					failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs );
92879285					channel_closures.push_back((events::Event::ChannelClosed {
92889286						channel_id: channel.context.channel_id(),
92899287						user_channel_id: channel.context.get_user_id(),
0 commit comments