@@ -3289,36 +3289,6 @@ macro_rules! emit_initial_channel_ready_event {
32893289 };
32903290}
32913291
3292- macro_rules! handle_new_monitor_update {
3293- (
3294- $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
3295- $per_peer_state_lock: expr, $chan: expr
3296- ) => {{
3297- let (update_completed, all_updates_complete) = $self.handle_new_monitor_update_internal(
3298- &mut $peer_state.in_flight_monitor_updates,
3299- $chan.context.channel_id(),
3300- $funding_txo,
3301- $chan.context.get_counterparty_node_id(),
3302- $update,
3303- );
3304- if all_updates_complete {
3305- let completion_data = $self.prepare_monitor_update_completion_data(
3306- &mut $peer_state.in_flight_monitor_updates,
3307- &mut $peer_state.monitor_update_blocked_actions,
3308- &mut $peer_state.pending_msg_events,
3309- $peer_state.is_connected,
3310- $chan,
3311- );
3312-
3313- mem::drop($peer_state_lock);
3314- mem::drop($per_peer_state_lock);
3315-
3316- $self.handle_monitor_update_completion_data(completion_data);
3317- }
3318- update_completed
3319- }};
3320- }
3321-
33223292fn convert_channel_err_internal<
33233293 Close: FnOnce(ClosureReason, &str) -> (ShutdownResult, Option<(msgs::ChannelUpdate, NodeId, NodeId)>),
33243294>(
@@ -3980,15 +3950,19 @@ where
39803950
39813951 // Update the monitor with the shutdown script if necessary.
39823952 if let Some(monitor_update) = monitor_update_opt.take() {
3983- handle_new_monitor_update!(
3984- self,
3953+ if let Some(data) = self.handle_new_monitor_update(
3954+ &mut peer_state.in_flight_monitor_updates,
3955+ &mut peer_state.monitor_update_blocked_actions,
3956+ &mut peer_state.pending_msg_events,
3957+ peer_state.is_connected,
3958+ chan,
39853959 funding_txo_opt.unwrap(),
39863960 monitor_update,
3987- peer_state_lock,
3988- peer_state,
3989- per_peer_state,
3990- chan
3991- );
3961+ ) {
3962+ mem::drop(peer_state_lock);
3963+ mem::drop( per_peer_state);
3964+ self.handle_monitor_update_completion_data(data);
3965+ }
39923966 }
39933967 } else {
39943968 let reason = ClosureReason::LocallyCoopClosedUnfundedChannel;
@@ -4113,8 +4087,19 @@ where
41134087 match peer_state.channel_by_id.entry(channel_id) {
41144088 hash_map::Entry::Occupied(mut chan_entry) => {
41154089 if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
4116- handle_new_monitor_update!(self, funding_txo,
4117- monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
4090+ if let Some(data) = self.handle_new_monitor_update(
4091+ &mut peer_state.in_flight_monitor_updates,
4092+ &mut peer_state.monitor_update_blocked_actions,
4093+ &mut peer_state.pending_msg_events,
4094+ peer_state.is_connected,
4095+ chan,
4096+ funding_txo,
4097+ monitor_update,
4098+ ) {
4099+ mem::drop(peer_state_lock);
4100+ mem::drop(per_peer_state);
4101+ self.handle_monitor_update_completion_data(data);
4102+ }
41184103 return;
41194104 } else {
41204105 debug_assert!(false, "We shouldn't have an update for a non-funded channel");
@@ -5258,16 +5243,22 @@ where
52585243 );
52595244 match break_channel_entry!(self, peer_state, send_res, chan_entry) {
52605245 Some(monitor_update) => {
5261- let ok = handle_new_monitor_update!(
5262- self,
5263- funding_txo,
5264- monitor_update,
5265- peer_state_lock,
5266- peer_state,
5267- per_peer_state,
5268- chan
5269- );
5270- if !ok {
5246+ let (update_completed, completion_data) = self
5247+ .handle_new_monitor_update_with_status(
5248+ &mut peer_state.in_flight_monitor_updates,
5249+ &mut peer_state.monitor_update_blocked_actions,
5250+ &mut peer_state.pending_msg_events,
5251+ peer_state.is_connected,
5252+ chan,
5253+ funding_txo,
5254+ monitor_update,
5255+ );
5256+ if let Some(data) = completion_data {
5257+ mem::drop(peer_state_lock);
5258+ mem::drop(per_peer_state);
5259+ self.handle_monitor_update_completion_data(data);
5260+ }
5261+ if !update_completed {
52715262 // Note that MonitorUpdateInProgress here indicates (per function
52725263 // docs) that we will resend the commitment update once monitor
52735264 // updating completes. Therefore, we must return an error
@@ -8931,15 +8922,19 @@ where
89318922 .or_insert_with(Vec::new)
89328923 .push(raa_blocker);
89338924 }
8934- handle_new_monitor_update!(
8935- self,
8925+ if let Some(data) = self.handle_new_monitor_update(
8926+ &mut peer_state.in_flight_monitor_updates,
8927+ &mut peer_state.monitor_update_blocked_actions,
8928+ &mut peer_state.pending_msg_events,
8929+ peer_state.is_connected,
8930+ chan,
89368931 prev_hop.funding_txo,
89378932 monitor_update,
8938- peer_state_lock,
8939- peer_state,
8940- per_peer_state,
8941- chan
8942- );
8933+ ) {
8934+ mem::drop(peer_state_lock);
8935+ mem::drop( per_peer_state);
8936+ self.handle_monitor_update_completion_data(data);
8937+ }
89438938 },
89448939 UpdateFulfillCommitFetch::DuplicateClaim {} => {
89458940 let (action_opt, raa_blocker_opt) = completion_action(None, true);
@@ -9716,6 +9711,75 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
97169711 }
97179712 }
97189713
9714+ /// Handles a new monitor update, returning optionally data to process after locks are released.
9715+ ///
9716+ /// This method extracts all data needed for post-update processing while locks are held,
9717+ /// allowing the caller to release locks before calling `handle_monitor_update_completion_data`.
9718+ ///
9719+ /// Returns `Some` if all in-flight updates are complete and the channel is awaiting monitor update.
9720+ ///
9721+ /// Note: This method takes individual fields from `PeerState` rather than the whole struct
9722+ /// to avoid borrow checker issues when the channel is borrowed from `peer_state.channel_by_id`.
9723+ fn handle_new_monitor_update(
9724+ &self,
9725+ in_flight_monitor_updates: &mut BTreeMap<ChannelId, (OutPoint, Vec<ChannelMonitorUpdate>)>,
9726+ monitor_update_blocked_actions: &mut BTreeMap<
9727+ ChannelId,
9728+ Vec<MonitorUpdateCompletionAction>,
9729+ >,
9730+ pending_msg_events: &mut Vec<MessageSendEvent>, is_connected: bool,
9731+ chan: &mut FundedChannel<SP>, funding_txo: OutPoint, update: ChannelMonitorUpdate,
9732+ ) -> Option<MonitorUpdateCompletionData> {
9733+ self.handle_new_monitor_update_with_status(
9734+ in_flight_monitor_updates,
9735+ monitor_update_blocked_actions,
9736+ pending_msg_events,
9737+ is_connected,
9738+ chan,
9739+ funding_txo,
9740+ update,
9741+ )
9742+ .1
9743+ }
9744+
9745+ /// Like [`Self::handle_new_monitor_update`], but also returns whether this specific update
9746+ /// completed (as opposed to being in-progress).
9747+ fn handle_new_monitor_update_with_status(
9748+ &self,
9749+ in_flight_monitor_updates: &mut BTreeMap<ChannelId, (OutPoint, Vec<ChannelMonitorUpdate>)>,
9750+ monitor_update_blocked_actions: &mut BTreeMap<
9751+ ChannelId,
9752+ Vec<MonitorUpdateCompletionAction>,
9753+ >,
9754+ pending_msg_events: &mut Vec<MessageSendEvent>, is_connected: bool,
9755+ chan: &mut FundedChannel<SP>, funding_txo: OutPoint, update: ChannelMonitorUpdate,
9756+ ) -> (bool, Option<MonitorUpdateCompletionData>) {
9757+ let chan_id = chan.context.channel_id();
9758+ let counterparty_node_id = chan.context.get_counterparty_node_id();
9759+
9760+ let (update_completed, all_updates_complete) = self.handle_new_monitor_update_internal(
9761+ in_flight_monitor_updates,
9762+ chan_id,
9763+ funding_txo,
9764+ counterparty_node_id,
9765+ update,
9766+ );
9767+
9768+ let completion_data = if all_updates_complete {
9769+ Some(self.prepare_monitor_update_completion_data(
9770+ in_flight_monitor_updates,
9771+ monitor_update_blocked_actions,
9772+ pending_msg_events,
9773+ is_connected,
9774+ chan,
9775+ ))
9776+ } else {
9777+ None
9778+ };
9779+
9780+ (update_completed, completion_data)
9781+ }
9782+
97199783 /// Prepares data for monitor update completion while locks are still held.
97209784 /// This extracts all necessary data from the channel and peer state fields.
97219785 ///
@@ -11323,15 +11387,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1132311387 }
1132411388 // Update the monitor with the shutdown script if necessary.
1132511389 if let Some(monitor_update) = monitor_update_opt {
11326- handle_new_monitor_update!(
11327- self,
11390+ if let Some(data) = self.handle_new_monitor_update(
11391+ &mut peer_state.in_flight_monitor_updates,
11392+ &mut peer_state.monitor_update_blocked_actions,
11393+ &mut peer_state.pending_msg_events,
11394+ peer_state.is_connected,
11395+ chan,
1132811396 funding_txo_opt.unwrap(),
1132911397 monitor_update,
11330- peer_state_lock,
11331- peer_state,
11332- per_peer_state,
11333- chan
11334- );
11398+ ) {
11399+ mem::drop(peer_state_lock);
11400+ mem::drop( per_peer_state);
11401+ self.handle_monitor_update_completion_data(data);
11402+ }
1133511403 }
1133611404 },
1133711405 None => {
@@ -11644,8 +11712,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1164411712 try_channel_entry!(self, peer_state, Err(err), chan_entry)
1164511713 }
1164611714 } else if let Some(monitor_update) = monitor_update_opt {
11647- handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
11648- peer_state, per_peer_state, chan);
11715+ if let Some(data) = self.handle_new_monitor_update(
11716+ &mut peer_state.in_flight_monitor_updates,
11717+ &mut peer_state.monitor_update_blocked_actions,
11718+ &mut peer_state.pending_msg_events,
11719+ peer_state.is_connected,
11720+ chan,
11721+ funding_txo.unwrap(),
11722+ monitor_update,
11723+ ) {
11724+ mem::drop(peer_state_lock);
11725+ mem::drop(per_peer_state);
11726+ self.handle_monitor_update_completion_data(data);
11727+ }
1164911728 }
1165011729 }
1165111730 Ok(())
@@ -11675,10 +11754,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1167511754 );
1167611755
1167711756 if let Some(monitor_update) = monitor_update_opt {
11678- handle_new_monitor_update!(
11679- self, funding_txo.unwrap(), monitor_update, peer_state_lock, peer_state,
11680- per_peer_state, chan
11681- );
11757+ if let Some(data) = self.handle_new_monitor_update(
11758+ &mut peer_state.in_flight_monitor_updates,
11759+ &mut peer_state.monitor_update_blocked_actions,
11760+ &mut peer_state.pending_msg_events,
11761+ peer_state.is_connected,
11762+ chan,
11763+ funding_txo.unwrap(),
11764+ monitor_update,
11765+ ) {
11766+ mem::drop(peer_state_lock);
11767+ mem::drop(per_peer_state);
11768+ self.handle_monitor_update_completion_data(data);
11769+ }
1168211770 }
1168311771 }
1168411772 Ok(())
@@ -11915,8 +12003,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1191512003 if let Some(monitor_update) = monitor_update_opt {
1191612004 let funding_txo = funding_txo_opt
1191712005 .expect("Funding outpoint must have been set for RAA handling to succeed");
11918- handle_new_monitor_update!(self, funding_txo, monitor_update,
11919- peer_state_lock, peer_state, per_peer_state, chan);
12006+ if let Some(data) = self.handle_new_monitor_update(
12007+ &mut peer_state.in_flight_monitor_updates,
12008+ &mut peer_state.monitor_update_blocked_actions,
12009+ &mut peer_state.pending_msg_events,
12010+ peer_state.is_connected,
12011+ chan,
12012+ funding_txo,
12013+ monitor_update,
12014+ ) {
12015+ mem::drop(peer_state_lock);
12016+ mem::drop(per_peer_state);
12017+ self.handle_monitor_update_completion_data(data);
12018+ }
1192012019 }
1192112020 (htlcs_to_fail, static_invoices)
1192212021 } else {
@@ -12394,15 +12493,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1239412493 }
1239512494
1239612495 if let Some(monitor_update) = splice_promotion.monitor_update {
12397- handle_new_monitor_update!(
12398- self,
12496+ if let Some(data) = self.handle_new_monitor_update(
12497+ &mut peer_state.in_flight_monitor_updates,
12498+ &mut peer_state.monitor_update_blocked_actions,
12499+ &mut peer_state.pending_msg_events,
12500+ peer_state.is_connected,
12501+ chan,
1239912502 splice_promotion.funding_txo,
1240012503 monitor_update,
12401- peer_state_lock,
12402- peer_state,
12403- per_peer_state,
12404- chan
12405- );
12504+ ) {
12505+ mem::drop(peer_state_lock);
12506+ mem::drop( per_peer_state);
12507+ self.handle_monitor_update_completion_data(data);
12508+ }
1240612509 }
1240712510 }
1240812511 } else {
@@ -12590,15 +12693,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1259012693 if let Some(monitor_update) = monitor_opt {
1259112694 has_monitor_update = true;
1259212695
12593- handle_new_monitor_update!(
12594- self,
12696+ if let Some(data) = self.handle_new_monitor_update(
12697+ &mut peer_state.in_flight_monitor_updates,
12698+ &mut peer_state.monitor_update_blocked_actions,
12699+ &mut peer_state.pending_msg_events,
12700+ peer_state.is_connected,
12701+ chan,
1259512702 funding_txo.unwrap(),
1259612703 monitor_update,
12597- peer_state_lock,
12598- peer_state,
12599- per_peer_state,
12600- chan
12601- );
12704+ ) {
12705+ mem::drop(peer_state_lock);
12706+ mem::drop( per_peer_state);
12707+ self.handle_monitor_update_completion_data(data);
12708+ }
1260212709 continue 'peer_loop;
1260312710 }
1260412711 }
@@ -14027,8 +14134,19 @@ where
1402714134 if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
1402814135 log_debug!(logger, "Unlocking monitor updating and updating monitor",
1402914136 );
14030- handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
14031- peer_state_lck, peer_state, per_peer_state, chan);
14137+ if let Some(data) = self.handle_new_monitor_update(
14138+ &mut peer_state.in_flight_monitor_updates,
14139+ &mut peer_state.monitor_update_blocked_actions,
14140+ &mut peer_state.pending_msg_events,
14141+ peer_state.is_connected,
14142+ chan,
14143+ channel_funding_outpoint,
14144+ monitor_update,
14145+ ) {
14146+ mem::drop(peer_state_lck);
14147+ mem::drop(per_peer_state);
14148+ self.handle_monitor_update_completion_data(data);
14149+ }
1403214150 if further_update_exists {
1403314151 // If there are more `ChannelMonitorUpdate`s to process, restart at the
1403414152 // top of the loop.
0 commit comments