@@ -3288,8 +3288,8 @@ macro_rules! locked_close_channel {
32883288 }};
32893289 ($self: ident, $peer_state: expr, $funded_chan: expr, $shutdown_res_mut: expr, FUNDED) => {{
32903290 if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() {
3291- handle_new_monitor_update !($self, funding_txo, update, $peer_state,
3292- $funded_chan.context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER );
3291+ handle_new_monitor_update_actions_deferred !($self, funding_txo, update, $peer_state,
3292+ $funded_chan.context);
32933293 }
32943294 // If there's a possibility that we need to generate further monitor updates for this
32953295 // channel, we need to store the last update_id of it. However, we don't want to insert
@@ -3648,57 +3648,152 @@ macro_rules! handle_monitor_update_completion {
36483648 } }
36493649}
36503650
3651- macro_rules! handle_new_monitor_update {
3652- ($self: ident, $update_res: expr, $logger: expr, $channel_id: expr, _internal, $completed: expr) => { {
3653- debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
3654- match $update_res {
3655- ChannelMonitorUpdateStatus::UnrecoverableError => {
3656- let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
3657- log_error!($logger, "{}", err_str);
3658- panic!("{}", err_str);
3659- },
3660- ChannelMonitorUpdateStatus::InProgress => {
3661- #[cfg(not(any(test, feature = "_externalize_tests")))]
3662- if $self.monitor_update_type.swap(1, Ordering::Relaxed) == 2 {
3663- panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart");
3664- }
3665- log_debug!($logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
3666- $channel_id);
3667- false
3668- },
3669- ChannelMonitorUpdateStatus::Completed => {
3670- #[cfg(not(any(test, feature = "_externalize_tests")))]
3671- if $self.monitor_update_type.swap(2, Ordering::Relaxed) == 1 {
3672- panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart");
3673- }
3674- $completed;
3675- true
3676- },
3677- }
3678- } };
3679- ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, INITIAL_MONITOR) => {
3651+ /// Returns whether the monitor update is completed, `false` if the update is in-progress.
3652+ fn handle_monitor_update_res<CM: AChannelManager, LG: Logger>(
3653+ cm: &CM, update_res: ChannelMonitorUpdateStatus, channel_id: ChannelId, logger: LG,
3654+ ) -> bool {
3655+ debug_assert!(cm.get_cm().background_events_processed_since_startup.load(Ordering::Acquire));
3656+ match update_res {
3657+ ChannelMonitorUpdateStatus::UnrecoverableError => {
3658+ let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
3659+ log_error!(logger, "{}", err_str);
3660+ panic!("{}", err_str);
3661+ },
3662+ ChannelMonitorUpdateStatus::InProgress => {
3663+ #[cfg(not(any(test, feature = "_externalize_tests")))]
3664+ if cm.get_cm().monitor_update_type.swap(1, Ordering::Relaxed) == 2 {
3665+ panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart");
3666+ }
3667+ log_debug!(logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
3668+ channel_id);
3669+ false
3670+ },
3671+ ChannelMonitorUpdateStatus::Completed => {
3672+ #[cfg(not(any(test, feature = "_externalize_tests")))]
3673+ if cm.get_cm().monitor_update_type.swap(2, Ordering::Relaxed) == 1 {
3674+ panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart");
3675+ }
3676+ true
3677+ },
3678+ }
3679+ }
3680+
3681+ macro_rules! handle_initial_monitor {
3682+ ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => {
36803683 let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3681- handle_new_monitor_update!($self, $update_res, logger, $chan.context.channel_id(), _internal,
3682- handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
3684+ let update_completed =
3685+ handle_monitor_update_res($self, $update_res, $chan.context.channel_id(), logger);
3686+ if update_completed {
3687+ handle_monitor_update_completion!(
3688+ $self,
3689+ $peer_state_lock,
3690+ $peer_state,
3691+ $per_peer_state_lock,
3692+ $chan
3693+ );
3694+ }
36833695 };
3696+ }
3697+
3698+ macro_rules! handle_post_close_monitor_update {
3699+ (
3700+ $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
3701+ $per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr
3702+ ) => {{
3703+ let logger =
3704+ WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None);
3705+ let in_flight_updates;
3706+ let idx;
3707+ handle_new_monitor_update_internal!(
3708+ $self,
3709+ $funding_txo,
3710+ $update,
3711+ $peer_state,
3712+ logger,
3713+ $channel_id,
3714+ $counterparty_node_id,
3715+ in_flight_updates,
3716+ idx,
3717+ {
3718+ let _ = in_flight_updates.remove(idx);
3719+ if in_flight_updates.is_empty() {
3720+ let update_actions = $peer_state
3721+ .monitor_update_blocked_actions
3722+ .remove(&$channel_id)
3723+ .unwrap_or(Vec::new());
3724+
3725+ mem::drop($peer_state_lock);
3726+ mem::drop($per_peer_state_lock);
3727+
3728+ $self.handle_monitor_update_completion_actions(update_actions);
3729+ }
3730+ }
3731+ )
3732+ }};
3733+ }
3734+
3735+ /// Handles a new monitor update without dropping peer_state locks and calling
3736+ /// [`ChannelManager::handle_monitor_update_completion_actions`] if the monitor update completed
3737+ /// synchronously.
3738+ ///
3739+ /// Useful because monitor updates need to be handled in the same mutex where the channel generated
3740+ /// them (otherwise they can end up getting applied out-of-order) but it's not always possible to
3741+ /// drop the aforementioned peer state locks at a given callsite. In this situation, use this macro
3742+ /// to apply the monitor update immediately and handle the monitor update completion actions at a
3743+ /// later time.
3744+ macro_rules! handle_new_monitor_update_actions_deferred {
3745+ (
3746+ $self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr
3747+ ) => {{
3748+ let logger = WithChannelContext::from(&$self.logger, &$chan_context, None);
3749+ let chan_id = $chan_context.channel_id();
3750+ let counterparty_node_id = $chan_context.get_counterparty_node_id();
3751+ let in_flight_updates;
3752+ let idx;
3753+ handle_new_monitor_update_internal!(
3754+ $self,
3755+ $funding_txo,
3756+ $update,
3757+ $peer_state,
3758+ logger,
3759+ chan_id,
3760+ counterparty_node_id,
3761+ in_flight_updates,
3762+ idx,
3763+ {
3764+ let _ = in_flight_updates.remove(idx);
3765+ }
3766+ )
3767+ }};
3768+ }
3769+
3770+ macro_rules! handle_new_monitor_update_internal {
36843771 (
36853772 $self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr,
36863773 $chan_id: expr, $counterparty_node_id: expr, $in_flight_updates: ident, $update_idx: ident,
3687- _internal_outer, $completed: expr
3688- ) => { {
3689- $in_flight_updates = &mut $peer_state.in_flight_monitor_updates.entry($chan_id)
3690- .or_insert_with(|| ($funding_txo, Vec::new())).1;
3774+ $completed: expr
3775+ ) => {{
3776+ $in_flight_updates = &mut $peer_state
3777+ .in_flight_monitor_updates
3778+ .entry($chan_id)
3779+ .or_insert_with(|| ($funding_txo, Vec::new()))
3780+ .1;
36913781 // During startup, we push monitor updates as background events through to here in
36923782 // order to replay updates that were in-flight when we shut down. Thus, we have to
36933783 // filter for uniqueness here.
3694- $update_idx = $in_flight_updates.iter().position(|upd| upd == &$update)
3695- .unwrap_or_else(|| {
3784+ $update_idx =
3785+ $in_flight_updates.iter().position(|upd| upd == &$update) .unwrap_or_else(|| {
36963786 $in_flight_updates.push($update);
36973787 $in_flight_updates.len() - 1
36983788 });
36993789 if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
3700- let update_res = $self.chain_monitor.update_channel($chan_id, &$in_flight_updates[$update_idx]);
3701- handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed)
3790+ let update_res =
3791+ $self.chain_monitor.update_channel($chan_id, &$in_flight_updates[$update_idx]);
3792+ let update_completed = handle_monitor_update_res($self, update_res, $chan_id, $logger);
3793+ if update_completed {
3794+ $completed;
3795+ }
3796+ update_completed
37023797 } else {
37033798 // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we
37043799 // fail to persist it. This is a fairly safe assumption, however, since anything we do
@@ -3720,62 +3815,43 @@ macro_rules! handle_new_monitor_update {
37203815 $self.pending_background_events.lock().unwrap().push(event);
37213816 false
37223817 }
3723- } };
3724- (
3725- $self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr,
3726- REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER
3727- ) => { {
3728- let logger = WithChannelContext::from(&$self.logger, &$chan_context, None);
3729- let chan_id = $chan_context.channel_id();
3730- let counterparty_node_id = $chan_context.get_counterparty_node_id();
3731- let in_flight_updates;
3732- let idx;
3733- handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3734- counterparty_node_id, in_flight_updates, idx, _internal_outer,
3735- {
3736- let _ = in_flight_updates.remove(idx);
3737- })
3738- } };
3739- (
3740- $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
3741- $per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr, POST_CHANNEL_CLOSE
3742- ) => { {
3743- let logger = WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None);
3744- let in_flight_updates;
3745- let idx;
3746- handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger,
3747- $channel_id, $counterparty_node_id, in_flight_updates, idx, _internal_outer,
3748- {
3749- let _ = in_flight_updates.remove(idx);
3750- if in_flight_updates.is_empty() {
3751- let update_actions = $peer_state.monitor_update_blocked_actions
3752- .remove(&$channel_id).unwrap_or(Vec::new());
3753-
3754- mem::drop($peer_state_lock);
3755- mem::drop($per_peer_state_lock);
3818+ }};
3819+ }
37563820
3757- $self.handle_monitor_update_completion_actions(update_actions);
3758- }
3759- })
3760- } };
3821+ macro_rules! handle_new_monitor_update {
37613822 (
37623823 $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
37633824 $per_peer_state_lock: expr, $chan: expr
3764- ) => { {
3825+ ) => {{
37653826 let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
37663827 let chan_id = $chan.context.channel_id();
37673828 let counterparty_node_id = $chan.context.get_counterparty_node_id();
37683829 let in_flight_updates;
37693830 let idx;
3770- handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3771- counterparty_node_id, in_flight_updates, idx, _internal_outer,
3831+ handle_new_monitor_update_internal!(
3832+ $self,
3833+ $funding_txo,
3834+ $update,
3835+ $peer_state,
3836+ logger,
3837+ chan_id,
3838+ counterparty_node_id,
3839+ in_flight_updates,
3840+ idx,
37723841 {
37733842 let _ = in_flight_updates.remove(idx);
37743843 if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 {
3775- handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
3844+ handle_monitor_update_completion!(
3845+ $self,
3846+ $peer_state_lock,
3847+ $peer_state,
3848+ $per_peer_state_lock,
3849+ $chan
3850+ );
37763851 }
3777- })
3778- } };
3852+ }
3853+ )
3854+ }};
37793855}
37803856
37813857#[rustfmt::skip]
@@ -4452,9 +4528,9 @@ where
44524528 hash_map::Entry::Vacant(_) => {},
44534529 }
44544530
4455- handle_new_monitor_update !(
4531+ handle_post_close_monitor_update !(
44564532 self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
4457- counterparty_node_id, channel_id, POST_CHANNEL_CLOSE
4533+ counterparty_node_id, channel_id
44584534 );
44594535 }
44604536
@@ -9070,16 +9146,15 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
90709146 .push(action);
90719147 }
90729148
9073- handle_new_monitor_update !(
9149+ handle_post_close_monitor_update !(
90749150 self,
90759151 prev_hop.funding_txo,
90769152 preimage_update,
90779153 peer_state_lock,
90789154 peer_state,
90799155 per_peer_state,
90809156 prev_hop.counterparty_node_id,
9081- chan_id,
9082- POST_CHANNEL_CLOSE
9157+ chan_id
90839158 );
90849159 }
90859160
@@ -10260,8 +10335,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1026010335 }
1026110336
1026210337 if let Some(funded_chan) = e.insert(Channel::from(chan)).as_funded_mut() {
10263- handle_new_monitor_update !(self, persist_state, peer_state_lock, peer_state,
10264- per_peer_state, funded_chan, INITIAL_MONITOR );
10338+ handle_initial_monitor !(self, persist_state, peer_state_lock, peer_state,
10339+ per_peer_state, funded_chan);
1026510340 } else {
1026610341 unreachable!("This must be a funded channel as we just inserted it.");
1026710342 }
@@ -10424,7 +10499,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1042410499 })
1042510500 {
1042610501 Ok((funded_chan, persist_status)) => {
10427- handle_new_monitor_update !(self, persist_status, peer_state_lock, peer_state, per_peer_state, funded_chan, INITIAL_MONITOR );
10502+ handle_initial_monitor !(self, persist_status, peer_state_lock, peer_state, per_peer_state, funded_chan);
1042810503 Ok(())
1042910504 },
1043010505 Err(e) => try_channel_entry!(self, peer_state, Err(e), chan_entry),
@@ -11123,8 +11198,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1112311198 if let Some(monitor) = monitor_opt {
1112411199 let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor);
1112511200 if let Ok(persist_state) = monitor_res {
11126- handle_new_monitor_update !(self, persist_state, peer_state_lock, peer_state,
11127- per_peer_state, chan, INITIAL_MONITOR );
11201+ handle_initial_monitor !(self, persist_state, peer_state_lock, peer_state,
11202+ per_peer_state, chan);
1112811203 } else {
1112911204 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
1113011205 log_error!(logger, "Persisting initial ChannelMonitor failed, implying the channel ID was duplicated");
@@ -13564,16 +13639,15 @@ where
1356413639 };
1356513640 self.pending_background_events.lock().unwrap().push(event);
1356613641 } else {
13567- handle_new_monitor_update !(
13642+ handle_post_close_monitor_update !(
1356813643 self,
1356913644 channel_funding_outpoint,
1357013645 update,
1357113646 peer_state,
1357213647 peer_state,
1357313648 per_peer_state,
1357413649 counterparty_node_id,
13575- channel_id,
13576- POST_CHANNEL_CLOSE
13650+ channel_id
1357713651 );
1357813652 }
1357913653 },
@@ -14286,13 +14360,12 @@ where
1428614360 insert_short_channel_id!(short_to_chan_info, funded_channel);
1428714361
1428814362 if let Some(monitor_update) = monitor_update_opt {
14289- handle_new_monitor_update !(
14363+ handle_new_monitor_update_actions_deferred !(
1429014364 self,
1429114365 funding_txo,
1429214366 monitor_update,
1429314367 peer_state,
14294- funded_channel.context,
14295- REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER
14368+ funded_channel.context
1429614369 );
1429714370 to_process_monitor_update_actions.push((
1429814371 counterparty_node_id, channel_id
0 commit comments