Skip to content

Commit 722f389

Browse files
committed
Always process inb. shutdown ChannelMonitorUpdates asynchronously
We currently have two codepaths on most channel update functions - most methods return a set of messages to send a peer iff the `ChannelMonitorUpdate` succeeds, but if it does not we push the messages back into the `Channel` and then pull them back out when the `ChannelMonitorUpdate` completes and send them then. This adds a substantial amount of complexity in very critical codepaths. Instead, here we swap all our channel update codepaths to immediately set the channel-update-required flag and only return a `ChannelMonitorUpdate` to the `ChannelManager`. Internally in the `Channel` we store a queue of `ChannelMonitorUpdate`s, which will become critical in future work to surface pending `ChannelMonitorUpdate`s to users at startup so they can complete. This leaves some redundant work in `Channel` to be cleaned up later. Specifically, we still generate the messages which we will now ignore and regenerate later. This commit updates the `ChannelMonitorUpdate` pipeline for handling inbound `shutdown` messages.
1 parent b766354 commit 722f389

File tree

3 files changed

+28
-18
lines changed

3 files changed

+28
-18
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2637,7 +2637,15 @@ fn test_permanent_error_during_handling_shutdown() {
26372637
assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
26382638
let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
26392639
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &shutdown);
2640-
check_closed_broadcast!(nodes[1], true);
2640+
2641+
// We always send the `shutdown` response when receiving a shutdown, even if we immediately
2642+
// close the channel thereafter.
2643+
let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
2644+
assert_eq!(msg_events.len(), 3);
2645+
if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
2646+
if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
2647+
if let MessageSendEvent::HandleError { .. } = msg_events[2] {} else { panic!(); }
2648+
26412649
check_added_monitors!(nodes[1], 2);
26422650
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
26432651
}

lightning/src/ln/channel.rs

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4251,7 +4251,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
42514251

42524252
pub fn shutdown<SP: Deref>(
42534253
&mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4254-
) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4254+
) -> Result<(Option<msgs::Shutdown>, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
42554255
where SP::Target: SignerProvider
42564256
{
42574257
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
@@ -4307,12 +4307,15 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
43074307

43084308
let monitor_update = if update_shutdown_script {
43094309
self.latest_monitor_update_id += 1;
4310-
Some(ChannelMonitorUpdate {
4310+
let monitor_update = ChannelMonitorUpdate {
43114311
update_id: self.latest_monitor_update_id,
43124312
updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
43134313
scriptpubkey: self.get_closing_scriptpubkey(),
43144314
}],
4315-
})
4315+
};
4316+
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4317+
self.pending_monitor_updates.push(monitor_update);
4318+
Some(self.pending_monitor_updates.last().unwrap())
43164319
} else { None };
43174320
let shutdown = if send_shutdown {
43184321
Some(msgs::Shutdown {

lightning/src/ln/channelmanager.rs

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -4618,27 +4618,27 @@ where
46184618
if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
46194619
}
46204620

4621-
let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
4621+
let funding_txo_opt = chan_entry.get().get_funding_txo();
4622+
let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
4623+
chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
46224624
dropped_htlcs = htlcs;
46234625

4624-
// Update the monitor with the shutdown script if necessary.
4625-
if let Some(monitor_update) = monitor_update {
4626-
let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), &monitor_update);
4627-
let (result, is_permanent) =
4628-
handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
4629-
if is_permanent {
4630-
remove_channel!(self, chan_entry);
4631-
break result;
4632-
}
4633-
}
4634-
46354626
if let Some(msg) = shutdown {
4627+
// We can send the `shutdown` message before updating the `ChannelMonitor`
4628+
// here as we don't need the monitor update to complete until we send a
4629+
// `shutdown_signed`, which we'll delay if we're pending a monitor update.
46364630
peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
46374631
node_id: *counterparty_node_id,
46384632
msg,
46394633
});
46404634
}
46414635

4636+
// Update the monitor with the shutdown script if necessary.
4637+
if let Some(monitor_update) = monitor_update_opt {
4638+
let update_id = monitor_update.update_id;
4639+
let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
4640+
break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, chan_entry);
4641+
}
46424642
break Ok(());
46434643
},
46444644
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
@@ -4650,8 +4650,7 @@ where
46504650
self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
46514651
}
46524652

4653-
let _ = handle_error!(self, result, *counterparty_node_id);
4654-
Ok(())
4653+
result
46554654
}
46564655

46574656
fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {

0 commit comments

Comments
 (0)