@@ -2626,8 +2626,13 @@ where
26262626 self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
26272627 }
26282628
2629- #[inline]
26302629 fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
2630+ debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
2631+ #[cfg(debug_assertions)]
2632+ for (_, peer) in self.per_peer_state.read().unwrap().iter() {
2633+ debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
2634+ }
2635+
26312636 let (monitor_update_option, mut failed_htlcs) = shutdown_res;
26322637 log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
26332638 for htlc_source in failed_htlcs.drain(..) {
@@ -2653,8 +2658,7 @@ where
26532658 let peer_state_mutex = per_peer_state.get(peer_node_id)
26542659 .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?;
26552660 let (update_opt, counterparty_node_id) = {
2656- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
2657- let peer_state = &mut *peer_state_lock;
2661+ let mut peer_state = peer_state_mutex.lock().unwrap();
26582662 let closure_reason = if let Some(peer_msg) = peer_msg {
26592663 ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }
26602664 } else {
@@ -2664,6 +2668,8 @@ where
26642668 log_error!(self.logger, "Force-closing channel {}", channel_id);
26652669 self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
26662670 let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
2671+ mem::drop(peer_state);
2672+ mem::drop(per_peer_state);
26672673 match chan_phase {
26682674 ChannelPhase::Funded(mut chan) => {
26692675 self.finish_force_close_channel(chan.context.force_shutdown(broadcast));
@@ -2686,10 +2692,17 @@ where
26862692 }
26872693 };
26882694 if let Some(update) = update_opt {
2689- let mut peer_state = peer_state_mutex.lock().unwrap();
2690- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
2691- msg: update
2692- });
2695+ // Try to send the `BroadcastChannelUpdate` to the peer we just force-closed on, but if
2696+ // not try to broadcast it via whatever peer we have.
2697+ let per_peer_state = self.per_peer_state.read().unwrap();
2698+ let a_peer_state_opt = per_peer_state.get(peer_node_id)
2699+ .ok_or(per_peer_state.values().next());
2700+ if let Ok(a_peer_state_mutex) = a_peer_state_opt {
2701+ let mut a_peer_state = a_peer_state_mutex.lock().unwrap();
2702+ a_peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
2703+ msg: update
2704+ });
2705+ }
26932706 }
26942707
26952708 Ok(counterparty_node_id)
@@ -4627,8 +4640,9 @@ where
46274640 let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
46284641 let mut timed_out_mpp_htlcs = Vec::new();
46294642 let mut pending_peers_awaiting_removal = Vec::new();
4643+ let mut shutdown_channels = Vec::new();
46304644
4631- let process_unfunded_channel_tick = |
4645+ let mut process_unfunded_channel_tick = |
46324646 chan_id: &ChannelId,
46334647 context: &mut ChannelContext<SP>,
46344648 unfunded_context: &mut UnfundedChannelContext,
@@ -4641,7 +4655,7 @@ where
46414655 "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
46424656 update_maps_on_chan_removal!(self, &context);
46434657 self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed);
4644- self.finish_force_close_channel (context.force_shutdown(false));
4658+ shutdown_channels.push (context.force_shutdown(false));
46454659 pending_msg_events.push(MessageSendEvent::HandleError {
46464660 node_id: counterparty_node_id,
46474661 action: msgs::ErrorAction::SendErrorMessage {
@@ -4834,6 +4848,10 @@ where
48344848 let _ = handle_error!(self, err, counterparty_node_id);
48354849 }
48364850
4851+ for shutdown_res in shutdown_channels {
4852+ self.finish_force_close_channel(shutdown_res);
4853+ }
4854+
48374855 self.pending_outbound_payments.remove_stale_payments(&self.pending_events);
48384856
48394857 // Technically we don't need to do this here, but if we have holding cell entries in a
@@ -4990,6 +5008,7 @@ where
49905008 // This ensures that future code doesn't introduce a lock-order requirement for
49915009 // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
49925010 // this function with any `per_peer_state` peer lock acquired would.
5011+ #[cfg(debug_assertions)]
49935012 for (_, peer) in self.per_peer_state.read().unwrap().iter() {
49945013 debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
49955014 }
0 commit comments