@@ -1366,6 +1366,25 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
13661366 },
13671367);
13681368
1369+ /// Data extracted from a channel while locks are held, to be processed after locks are released.
1370+ pub(super) enum MonitorUpdateCompletionData {
1371+ /// Channel has blocked monitor updates pending. Only process update actions.
1372+ Blocked {
1373+ update_actions: Vec<MonitorUpdateCompletionAction>,
1374+ },
1375+ /// Channel is fully unblocked and can be resumed.
1376+ Unblocked {
1377+ channel_id: ChannelId,
1378+ counterparty_node_id: PublicKey,
1379+ unbroadcasted_batch_funding_txid: Option<Txid>,
1380+ update_actions: Vec<MonitorUpdateCompletionAction>,
1381+ htlc_forwards: Option<PerSourcePendingForward>,
1382+ decode_update_add_htlcs: Option<(u64, Vec<msgs::UpdateAddHTLC>)>,
1383+ finalized_claimed_htlcs: Vec<(HTLCSource, Option<AttributionData>)>,
1384+ failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1385+ },
1386+ }
1387+
13691388#[derive(Clone, Debug, PartialEq, Eq)]
13701389pub(crate) struct PaymentCompleteUpdate {
13711390 counterparty_node_id: PublicKey,
@@ -3280,93 +3299,18 @@ macro_rules! emit_initial_channel_ready_event {
32803299/// Requires that the in-flight monitor update set for this channel is empty!
32813300macro_rules! handle_monitor_update_completion {
32823301 ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => {{
3283- let chan_id = $chan.context.channel_id();
3284- let outbound_alias = $chan.context().outbound_scid_alias();
3285- let cp_node_id = $chan.context.get_counterparty_node_id();
3286-
3287- #[cfg(debug_assertions)]
3288- {
3289- let in_flight_updates = $peer_state.in_flight_monitor_updates.get(&chan_id);
3290- assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true));
3291- assert!($chan.is_awaiting_monitor_update());
3292- }
3293-
3294- let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3295-
3296- let update_actions =
3297- $peer_state.monitor_update_blocked_actions.remove(&chan_id).unwrap_or(Vec::new());
3298-
3299- if $chan.blocked_monitor_updates_pending() != 0 {
3300- mem::drop($peer_state_lock);
3301- mem::drop($per_peer_state_lock);
3302-
3303- log_debug!(logger, "Channel has blocked monitor updates, completing update actions but leaving channel blocked");
3304- $self.handle_monitor_update_completion_actions(update_actions);
3305- } else {
3306- log_debug!(logger, "Channel is open and awaiting update, resuming it");
3307- let updates = $chan.monitor_updating_restored(
3308- &&logger,
3309- &$self.node_signer,
3310- $self.chain_hash,
3311- &*$self.config.read().unwrap(),
3312- $self.best_block.read().unwrap().height,
3313- |htlc_id| {
3314- $self.path_for_release_held_htlc(htlc_id, outbound_alias, &chan_id, &cp_node_id)
3315- },
3316- );
3317- let channel_update = if updates.channel_ready.is_some()
3318- && $chan.context.is_usable()
3319- && $peer_state.is_connected
3320- {
3321- // We only send a channel_update in the case where we are just now sending a
3322- // channel_ready and the channel is in a usable state. We may re-send a
3323- // channel_update later through the announcement_signatures process for public
3324- // channels, but there's no reason not to just inform our counterparty of our fees
3325- // now.
3326- if let Ok((msg, _, _)) = $self.get_channel_update_for_unicast($chan) {
3327- Some(MessageSendEvent::SendChannelUpdate { node_id: cp_node_id, msg })
3328- } else {
3329- None
3330- }
3331- } else {
3332- None
3333- };
3334-
3335- let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption(
3336- &mut $peer_state.pending_msg_events,
3337- $chan,
3338- updates.raa,
3339- updates.commitment_update,
3340- updates.commitment_order,
3341- updates.accepted_htlcs,
3342- updates.pending_update_adds,
3343- updates.funding_broadcastable,
3344- updates.channel_ready,
3345- updates.announcement_sigs,
3346- updates.tx_signatures,
3347- None,
3348- updates.channel_ready_order,
3349- );
3350- if let Some(upd) = channel_update {
3351- $peer_state.pending_msg_events.push(upd);
3352- }
3302+ let completion_data = $self.prepare_monitor_update_completion_data(
3303+ &mut $peer_state.in_flight_monitor_updates,
3304+ &mut $peer_state.monitor_update_blocked_actions,
3305+ &mut $peer_state.pending_msg_events,
3306+ $peer_state.is_connected,
3307+ $chan,
3308+ );
33533309
3354- let unbroadcasted_batch_funding_txid =
3355- $chan.context.unbroadcasted_batch_funding_txid(&$chan.funding);
3356- core::mem::drop($peer_state_lock);
3357- core::mem::drop($per_peer_state_lock);
3310+ mem::drop($peer_state_lock);
3311+ mem::drop($per_peer_state_lock);
33583312
3359- $self.post_monitor_update_unlock(
3360- chan_id,
3361- cp_node_id,
3362- unbroadcasted_batch_funding_txid,
3363- update_actions,
3364- htlc_forwards,
3365- decode_update_add_htlcs,
3366- updates.finalized_claimed_htlcs,
3367- updates.failed_htlcs,
3368- );
3369- }
3313+ $self.handle_monitor_update_completion_data(completion_data);
33703314 }};
33713315}
33723316
@@ -9791,6 +9735,135 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
97919735 }
97929736 }
97939737
9738+ /// Prepares data for monitor update completion while locks are still held.
9739+ /// This extracts all necessary data from the channel and peer state fields.
9740+ ///
9741+ /// Note: This method takes individual fields from `PeerState` rather than the whole struct
9742+ /// to avoid borrow checker issues when the channel is borrowed from `peer_state.channel_by_id`.
9743+ fn prepare_monitor_update_completion_data(
9744+ &self,
9745+ in_flight_monitor_updates: &mut BTreeMap<ChannelId, (OutPoint, Vec<ChannelMonitorUpdate>)>,
9746+ monitor_update_blocked_actions: &mut BTreeMap<
9747+ ChannelId,
9748+ Vec<MonitorUpdateCompletionAction>,
9749+ >,
9750+ pending_msg_events: &mut Vec<MessageSendEvent>, is_connected: bool,
9751+ chan: &mut FundedChannel<SP>,
9752+ ) -> MonitorUpdateCompletionData {
9753+ let chan_id = chan.context.channel_id();
9754+ let outbound_alias = chan.context.outbound_scid_alias();
9755+ let counterparty_node_id = chan.context.get_counterparty_node_id();
9756+
9757+ #[cfg(debug_assertions)]
9758+ {
9759+ let in_flight_updates = in_flight_monitor_updates.get(&chan_id);
9760+ assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true));
9761+ assert!(chan.is_awaiting_monitor_update());
9762+ }
9763+
9764+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9765+
9766+ let update_actions = monitor_update_blocked_actions.remove(&chan_id).unwrap_or(Vec::new());
9767+
9768+ if chan.blocked_monitor_updates_pending() != 0 {
9769+ log_debug!(logger, "Channel has blocked monitor updates, completing update actions but leaving channel blocked");
9770+ MonitorUpdateCompletionData::Blocked { update_actions }
9771+ } else {
9772+ log_debug!(logger, "Channel is open and awaiting update, resuming it");
9773+ let updates = chan.monitor_updating_restored(
9774+ &&logger,
9775+ &self.node_signer,
9776+ self.chain_hash,
9777+ &*self.config.read().unwrap(),
9778+ self.best_block.read().unwrap().height,
9779+ |htlc_id| {
9780+ self.path_for_release_held_htlc(
9781+ htlc_id,
9782+ outbound_alias,
9783+ &chan_id,
9784+ &counterparty_node_id,
9785+ )
9786+ },
9787+ );
9788+ let channel_update = if updates.channel_ready.is_some()
9789+ && chan.context.is_usable()
9790+ && is_connected
9791+ {
9792+ if let Ok((msg, _, _)) = self.get_channel_update_for_unicast(chan) {
9793+ Some(MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id, msg })
9794+ } else {
9795+ None
9796+ }
9797+ } else {
9798+ None
9799+ };
9800+
9801+ let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption(
9802+ pending_msg_events,
9803+ chan,
9804+ updates.raa,
9805+ updates.commitment_update,
9806+ updates.commitment_order,
9807+ updates.accepted_htlcs,
9808+ updates.pending_update_adds,
9809+ updates.funding_broadcastable,
9810+ updates.channel_ready,
9811+ updates.announcement_sigs,
9812+ updates.tx_signatures,
9813+ None,
9814+ updates.channel_ready_order,
9815+ );
9816+ if let Some(upd) = channel_update {
9817+ pending_msg_events.push(upd);
9818+ }
9819+
9820+ let unbroadcasted_batch_funding_txid =
9821+ chan.context.unbroadcasted_batch_funding_txid(&chan.funding);
9822+
9823+ MonitorUpdateCompletionData::Unblocked {
9824+ channel_id: chan_id,
9825+ counterparty_node_id,
9826+ unbroadcasted_batch_funding_txid,
9827+ update_actions,
9828+ htlc_forwards,
9829+ decode_update_add_htlcs,
9830+ finalized_claimed_htlcs: updates.finalized_claimed_htlcs,
9831+ failed_htlcs: updates.failed_htlcs,
9832+ }
9833+ }
9834+ }
9835+
9836+ /// Processes monitor update completion data after locks have been released.
9837+ /// Call this after dropping peer_state_lock and per_peer_state locks.
9838+ fn handle_monitor_update_completion_data(&self, data: MonitorUpdateCompletionData) {
9839+ match data {
9840+ MonitorUpdateCompletionData::Blocked { update_actions } => {
9841+ self.handle_monitor_update_completion_actions(update_actions);
9842+ },
9843+ MonitorUpdateCompletionData::Unblocked {
9844+ channel_id,
9845+ counterparty_node_id,
9846+ unbroadcasted_batch_funding_txid,
9847+ update_actions,
9848+ htlc_forwards,
9849+ decode_update_add_htlcs,
9850+ finalized_claimed_htlcs,
9851+ failed_htlcs,
9852+ } => {
9853+ self.post_monitor_update_unlock(
9854+ channel_id,
9855+ counterparty_node_id,
9856+ unbroadcasted_batch_funding_txid,
9857+ update_actions,
9858+ htlc_forwards,
9859+ decode_update_add_htlcs,
9860+ finalized_claimed_htlcs,
9861+ failed_htlcs,
9862+ );
9863+ },
9864+ }
9865+ }
9866+
97949867 /// Handles a channel reentering a functional state, either due to reconnect or a monitor
97959868 /// update completion.
97969869 #[rustfmt::skip]
0 commit comments