@@ -3291,36 +3291,6 @@ macro_rules! emit_initial_channel_ready_event {
32913291 };
32923292}
32933293
3294- macro_rules! handle_new_monitor_update {
3295- (
3296- $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
3297- $per_peer_state_lock: expr, $chan: expr
3298- ) => {{
3299- let (update_completed, all_updates_complete) = $self.handle_new_monitor_update_internal(
3300- &mut $peer_state.in_flight_monitor_updates,
3301- $chan.context.channel_id(),
3302- $funding_txo,
3303- $chan.context.get_counterparty_node_id(),
3304- $update,
3305- );
3306- if all_updates_complete {
3307- let completion_data = $self.prepare_monitor_update_completion_data(
3308- &mut $peer_state.in_flight_monitor_updates,
3309- &mut $peer_state.monitor_update_blocked_actions,
3310- &mut $peer_state.pending_msg_events,
3311- $peer_state.is_connected,
3312- $chan,
3313- );
3314-
3315- mem::drop($peer_state_lock);
3316- mem::drop($per_peer_state_lock);
3317-
3318- $self.handle_monitor_update_completion_data(completion_data);
3319- }
3320- update_completed
3321- }};
3322- }
3323-
33243294fn convert_channel_err_internal<
33253295 Close: FnOnce(ClosureReason, &str) -> (ShutdownResult, Option<(msgs::ChannelUpdate, NodeId, NodeId)>),
33263296>(
@@ -3982,15 +3952,19 @@ where
39823952
39833953 // Update the monitor with the shutdown script if necessary.
39843954 if let Some(monitor_update) = monitor_update_opt.take() {
3985- handle_new_monitor_update!(
3986- self,
3955+ if let Some(data) = self.handle_new_monitor_update(
3956+ &mut peer_state.in_flight_monitor_updates,
3957+ &mut peer_state.monitor_update_blocked_actions,
3958+ &mut peer_state.pending_msg_events,
3959+ peer_state.is_connected,
3960+ chan,
39873961 funding_txo_opt.unwrap(),
39883962 monitor_update,
3989- peer_state_lock,
3990- peer_state,
3991- per_peer_state,
3992- chan
3993- );
3963+ ) {
3964+ mem::drop(peer_state_lock);
3965+ mem::drop( per_peer_state);
3966+ self.handle_monitor_update_completion_data(data);
3967+ }
39943968 }
39953969 } else {
39963970 let reason = ClosureReason::LocallyCoopClosedUnfundedChannel;
@@ -4115,8 +4089,19 @@ where
41154089 match peer_state.channel_by_id.entry(channel_id) {
41164090 hash_map::Entry::Occupied(mut chan_entry) => {
41174091 if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
4118- handle_new_monitor_update!(self, funding_txo,
4119- monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
4092+ if let Some(data) = self.handle_new_monitor_update(
4093+ &mut peer_state.in_flight_monitor_updates,
4094+ &mut peer_state.monitor_update_blocked_actions,
4095+ &mut peer_state.pending_msg_events,
4096+ peer_state.is_connected,
4097+ chan,
4098+ funding_txo,
4099+ monitor_update,
4100+ ) {
4101+ mem::drop(peer_state_lock);
4102+ mem::drop(per_peer_state);
4103+ self.handle_monitor_update_completion_data(data);
4104+ }
41204105 return;
41214106 } else {
41224107 debug_assert!(false, "We shouldn't have an update for a non-funded channel");
@@ -5260,16 +5245,22 @@ where
52605245 );
52615246 match break_channel_entry!(self, peer_state, send_res, chan_entry) {
52625247 Some(monitor_update) => {
5263- let ok = handle_new_monitor_update!(
5264- self,
5265- funding_txo,
5266- monitor_update,
5267- peer_state_lock,
5268- peer_state,
5269- per_peer_state,
5270- chan
5271- );
5272- if !ok {
5248+ let (update_completed, completion_data) = self
5249+ .handle_new_monitor_update_with_status(
5250+ &mut peer_state.in_flight_monitor_updates,
5251+ &mut peer_state.monitor_update_blocked_actions,
5252+ &mut peer_state.pending_msg_events,
5253+ peer_state.is_connected,
5254+ chan,
5255+ funding_txo,
5256+ monitor_update,
5257+ );
5258+ if let Some(data) = completion_data {
5259+ mem::drop(peer_state_lock);
5260+ mem::drop(per_peer_state);
5261+ self.handle_monitor_update_completion_data(data);
5262+ }
5263+ if !update_completed {
52735264 // Note that MonitorUpdateInProgress here indicates (per function
52745265 // docs) that we will resend the commitment update once monitor
52755266 // updating completes. Therefore, we must return an error
@@ -8933,15 +8924,19 @@ where
89338924 .or_insert_with(Vec::new)
89348925 .push(raa_blocker);
89358926 }
8936- handle_new_monitor_update!(
8937- self,
8927+ if let Some(data) = self.handle_new_monitor_update(
8928+ &mut peer_state.in_flight_monitor_updates,
8929+ &mut peer_state.monitor_update_blocked_actions,
8930+ &mut peer_state.pending_msg_events,
8931+ peer_state.is_connected,
8932+ chan,
89388933 prev_hop.funding_txo,
89398934 monitor_update,
8940- peer_state_lock,
8941- peer_state,
8942- per_peer_state,
8943- chan
8944- );
8935+ ) {
8936+ mem::drop(peer_state_lock);
8937+ mem::drop( per_peer_state);
8938+ self.handle_monitor_update_completion_data(data);
8939+ }
89458940 },
89468941 UpdateFulfillCommitFetch::DuplicateClaim {} => {
89478942 let (action_opt, raa_blocker_opt) = completion_action(None, true);
@@ -9718,6 +9713,75 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
97189713 }
97199714 }
97209715
9716+ /// Handles a new monitor update, returning optionally data to process after locks are released.
9717+ ///
9718+ /// This method extracts all data needed for post-update processing while locks are held,
9719+ /// allowing the caller to release locks before calling `handle_monitor_update_completion_data`.
9720+ ///
9721+ /// Returns `Some` if all in-flight updates are complete and the channel is awaiting monitor update.
9722+ ///
9723+ /// Note: This method takes individual fields from `PeerState` rather than the whole struct
9724+ /// to avoid borrow checker issues when the channel is borrowed from `peer_state.channel_by_id`.
9725+ fn handle_new_monitor_update(
9726+ &self,
9727+ in_flight_monitor_updates: &mut BTreeMap<ChannelId, (OutPoint, Vec<ChannelMonitorUpdate>)>,
9728+ monitor_update_blocked_actions: &mut BTreeMap<
9729+ ChannelId,
9730+ Vec<MonitorUpdateCompletionAction>,
9731+ >,
9732+ pending_msg_events: &mut Vec<MessageSendEvent>, is_connected: bool,
9733+ chan: &mut FundedChannel<SP>, funding_txo: OutPoint, update: ChannelMonitorUpdate,
9734+ ) -> Option<MonitorUpdateCompletionData> {
9735+ self.handle_new_monitor_update_with_status(
9736+ in_flight_monitor_updates,
9737+ monitor_update_blocked_actions,
9738+ pending_msg_events,
9739+ is_connected,
9740+ chan,
9741+ funding_txo,
9742+ update,
9743+ )
9744+ .1
9745+ }
9746+
9747+ /// Like [`Self::handle_new_monitor_update`], but also returns whether this specific update
9748+ /// completed (as opposed to being in-progress).
9749+ fn handle_new_monitor_update_with_status(
9750+ &self,
9751+ in_flight_monitor_updates: &mut BTreeMap<ChannelId, (OutPoint, Vec<ChannelMonitorUpdate>)>,
9752+ monitor_update_blocked_actions: &mut BTreeMap<
9753+ ChannelId,
9754+ Vec<MonitorUpdateCompletionAction>,
9755+ >,
9756+ pending_msg_events: &mut Vec<MessageSendEvent>, is_connected: bool,
9757+ chan: &mut FundedChannel<SP>, funding_txo: OutPoint, update: ChannelMonitorUpdate,
9758+ ) -> (bool, Option<MonitorUpdateCompletionData>) {
9759+ let chan_id = chan.context.channel_id();
9760+ let counterparty_node_id = chan.context.get_counterparty_node_id();
9761+
9762+ let (update_completed, all_updates_complete) = self.handle_new_monitor_update_internal(
9763+ in_flight_monitor_updates,
9764+ chan_id,
9765+ funding_txo,
9766+ counterparty_node_id,
9767+ update,
9768+ );
9769+
9770+ let completion_data = if all_updates_complete {
9771+ Some(self.prepare_monitor_update_completion_data(
9772+ in_flight_monitor_updates,
9773+ monitor_update_blocked_actions,
9774+ pending_msg_events,
9775+ is_connected,
9776+ chan,
9777+ ))
9778+ } else {
9779+ None
9780+ };
9781+
9782+ (update_completed, completion_data)
9783+ }
9784+
97219785 /// Prepares data for monitor update completion while locks are still held.
97229786 /// This extracts all necessary data from the channel and peer state fields.
97239787 ///
@@ -11325,15 +11389,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1132511389 }
1132611390 // Update the monitor with the shutdown script if necessary.
1132711391 if let Some(monitor_update) = monitor_update_opt {
11328- handle_new_monitor_update!(
11329- self,
11392+ if let Some(data) = self.handle_new_monitor_update(
11393+ &mut peer_state.in_flight_monitor_updates,
11394+ &mut peer_state.monitor_update_blocked_actions,
11395+ &mut peer_state.pending_msg_events,
11396+ peer_state.is_connected,
11397+ chan,
1133011398 funding_txo_opt.unwrap(),
1133111399 monitor_update,
11332- peer_state_lock,
11333- peer_state,
11334- per_peer_state,
11335- chan
11336- );
11400+ ) {
11401+ mem::drop(peer_state_lock);
11402+ mem::drop( per_peer_state);
11403+ self.handle_monitor_update_completion_data(data);
11404+ }
1133711405 }
1133811406 },
1133911407 None => {
@@ -11646,8 +11714,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1164611714 try_channel_entry!(self, peer_state, Err(err), chan_entry)
1164711715 }
1164811716 } else if let Some(monitor_update) = monitor_update_opt {
11649- handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
11650- peer_state, per_peer_state, chan);
11717+ if let Some(data) = self.handle_new_monitor_update(
11718+ &mut peer_state.in_flight_monitor_updates,
11719+ &mut peer_state.monitor_update_blocked_actions,
11720+ &mut peer_state.pending_msg_events,
11721+ peer_state.is_connected,
11722+ chan,
11723+ funding_txo.unwrap(),
11724+ monitor_update,
11725+ ) {
11726+ mem::drop(peer_state_lock);
11727+ mem::drop(per_peer_state);
11728+ self.handle_monitor_update_completion_data(data);
11729+ }
1165111730 }
1165211731 }
1165311732 Ok(())
@@ -11677,10 +11756,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1167711756 );
1167811757
1167911758 if let Some(monitor_update) = monitor_update_opt {
11680- handle_new_monitor_update!(
11681- self, funding_txo.unwrap(), monitor_update, peer_state_lock, peer_state,
11682- per_peer_state, chan
11683- );
11759+ if let Some(data) = self.handle_new_monitor_update(
11760+ &mut peer_state.in_flight_monitor_updates,
11761+ &mut peer_state.monitor_update_blocked_actions,
11762+ &mut peer_state.pending_msg_events,
11763+ peer_state.is_connected,
11764+ chan,
11765+ funding_txo.unwrap(),
11766+ monitor_update,
11767+ ) {
11768+ mem::drop(peer_state_lock);
11769+ mem::drop(per_peer_state);
11770+ self.handle_monitor_update_completion_data(data);
11771+ }
1168411772 }
1168511773 }
1168611774 Ok(())
@@ -11917,8 +12005,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1191712005 if let Some(monitor_update) = monitor_update_opt {
1191812006 let funding_txo = funding_txo_opt
1191912007 .expect("Funding outpoint must have been set for RAA handling to succeed");
11920- handle_new_monitor_update!(self, funding_txo, monitor_update,
11921- peer_state_lock, peer_state, per_peer_state, chan);
12008+ if let Some(data) = self.handle_new_monitor_update(
12009+ &mut peer_state.in_flight_monitor_updates,
12010+ &mut peer_state.monitor_update_blocked_actions,
12011+ &mut peer_state.pending_msg_events,
12012+ peer_state.is_connected,
12013+ chan,
12014+ funding_txo,
12015+ monitor_update,
12016+ ) {
12017+ mem::drop(peer_state_lock);
12018+ mem::drop(per_peer_state);
12019+ self.handle_monitor_update_completion_data(data);
12020+ }
1192212021 }
1192312022 (htlcs_to_fail, static_invoices)
1192412023 } else {
@@ -12396,15 +12495,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1239612495 }
1239712496
1239812497 if let Some(monitor_update) = splice_promotion.monitor_update {
12399- handle_new_monitor_update!(
12400- self,
12498+ if let Some(data) = self.handle_new_monitor_update(
12499+ &mut peer_state.in_flight_monitor_updates,
12500+ &mut peer_state.monitor_update_blocked_actions,
12501+ &mut peer_state.pending_msg_events,
12502+ peer_state.is_connected,
12503+ chan,
1240112504 splice_promotion.funding_txo,
1240212505 monitor_update,
12403- peer_state_lock,
12404- peer_state,
12405- per_peer_state,
12406- chan
12407- );
12506+ ) {
12507+ mem::drop(peer_state_lock);
12508+ mem::drop( per_peer_state);
12509+ self.handle_monitor_update_completion_data(data);
12510+ }
1240812511 }
1240912512 }
1241012513 } else {
@@ -12592,15 +12695,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1259212695 if let Some(monitor_update) = monitor_opt {
1259312696 has_monitor_update = true;
1259412697
12595- handle_new_monitor_update!(
12596- self,
12698+ if let Some(data) = self.handle_new_monitor_update(
12699+ &mut peer_state.in_flight_monitor_updates,
12700+ &mut peer_state.monitor_update_blocked_actions,
12701+ &mut peer_state.pending_msg_events,
12702+ peer_state.is_connected,
12703+ chan,
1259712704 funding_txo.unwrap(),
1259812705 monitor_update,
12599- peer_state_lock,
12600- peer_state,
12601- per_peer_state,
12602- chan
12603- );
12706+ ) {
12707+ mem::drop(peer_state_lock);
12708+ mem::drop( per_peer_state);
12709+ self.handle_monitor_update_completion_data(data);
12710+ }
1260412711 continue 'peer_loop;
1260512712 }
1260612713 }
@@ -14029,8 +14136,19 @@ where
1402914136 if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
1403014137 log_debug!(logger, "Unlocking monitor updating and updating monitor",
1403114138 );
14032- handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
14033- peer_state_lck, peer_state, per_peer_state, chan);
14139+ if let Some(data) = self.handle_new_monitor_update(
14140+ &mut peer_state.in_flight_monitor_updates,
14141+ &mut peer_state.monitor_update_blocked_actions,
14142+ &mut peer_state.pending_msg_events,
14143+ peer_state.is_connected,
14144+ chan,
14145+ channel_funding_outpoint,
14146+ monitor_update,
14147+ ) {
14148+ mem::drop(peer_state_lck);
14149+ mem::drop(per_peer_state);
14150+ self.handle_monitor_update_completion_data(data);
14151+ }
1403414152 if further_update_exists {
1403514153 // If there are more `ChannelMonitorUpdate`s to process, restart at the
1403614154 // top of the loop.
0 commit comments