@@ -3945,11 +3945,10 @@ where
39453945 }
39463946
39473947 /// Applies a [`ChannelMonitorUpdate`] which may or may not be for a channel which is closed.
3948- #[must_use]
39493948 fn apply_post_close_monitor_update(
39503949 &self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
39513950 monitor_update: ChannelMonitorUpdate,
3952- ) -> ChannelMonitorUpdateStatus {
3951+ ) {
39533952 // Note that there may be some post-close updates which need to be well-ordered with
39543953 // respect to the `update_id`, so we hold the `peer_state` lock here.
39553954 let per_peer_state = self.per_peer_state.read().unwrap();
@@ -3960,16 +3959,21 @@ where
39603959 match peer_state.channel_by_id.entry(channel_id) {
39613960 hash_map::Entry::Occupied(mut chan_phase) => {
39623961 if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
3963- let completed = handle_new_monitor_update!(self, funding_txo,
3962+ handle_new_monitor_update!(self, funding_txo,
39643963 monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
3965- return if completed { ChannelMonitorUpdateStatus::Completed } else { ChannelMonitorUpdateStatus::InProgress } ;
3964+ return;
39663965 } else {
39673966 debug_assert!(false, "We shouldn't have an update for a non-funded channel");
39683967 }
39693968 },
39703969 hash_map::Entry::Vacant(_) => {},
39713970 }
3972- self.chain_monitor.update_channel(funding_txo, &monitor_update)
3971+ let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
3972+
3973+ handle_new_monitor_update!(
3974+ self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
3975+ logger, channel_id, POST_CHANNEL_CLOSE
3976+ );
39733977 }
39743978
39753979 /// When a channel is removed, two things need to happen:
@@ -3998,7 +4002,7 @@ where
39984002 }
39994003 if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
40004004 debug_assert!(false, "This should have been handled in `locked_close_channel`");
4001- let _ = self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
4005+ self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
40024006 }
40034007 if self.background_events_processed_since_startup.load(Ordering::Acquire) {
40044008 // If a `ChannelMonitorUpdate` was applied (i.e. any time we have a funding txo and are
@@ -6293,9 +6297,7 @@ where
62936297 let _ = self.chain_monitor.update_channel(funding_txo, &update);
62946298 },
62956299 BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
6296- // The monitor update will be replayed on startup if it doesnt complete, so no
6297- // use bothering to care about the monitor update completing.
6298- let _ = self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
6300+ self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
62996301 },
63006302 BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
63016303 let per_peer_state = self.per_peer_state.read().unwrap();
@@ -7226,32 +7228,31 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
72267228 let payment_hash = payment_preimage.into();
72277229 let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));
72287230
7229- if !during_init {
7230- if let Some(action) = action_opt {
7231- log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7232- chan_id, action);
7233- peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7234- }
7231+ if let Some(action) = action_opt {
7232+ log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7233+ chan_id, action);
7234+ peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7235+ }
72357236
7237+ if !during_init {
72367238 handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
72377239 } else {
72387240 // If we're running during init we cannot update a monitor directly - they probably
72397241 // haven't actually been loaded yet. Instead, push the monitor update as a background
72407242 // event.
7241- // TODO: Track this update as pending and only complete the completion action when it
7242- // finishes.
7243+
7244+ let in_flight_updates = peer_state.in_flight_monitor_updates
7245+ .entry(prev_hop.funding_txo)
7246+ .or_insert_with(Vec::new);
7247+ in_flight_updates.push(preimage_update.clone());
7248+
72437249 let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
72447250 counterparty_node_id,
72457251 funding_txo: prev_hop.funding_txo,
72467252 channel_id: prev_hop.channel_id,
72477253 update: preimage_update,
72487254 };
72497255 self.pending_background_events.lock().unwrap().push(event);
7250-
7251- mem::drop(peer_state);
7252- mem::drop(per_peer_state);
7253-
7254- self.handle_monitor_update_completion_actions(action_opt);
72557256 }
72567257 }
72577258
0 commit comments