Skip to content

Commit 64f678d

Browse files
committed
Only mark all mon updates complete if there are no blocked updates
In `handle_new_monitor_update!`, we correctly check that the channel doesn't have any blocked monitor updates pending before calling `handle_monitor_update_completion!` (which calls `Channel::monitor_updating_restored`, which in turn assumes that all generated `ChannelMonitorUpdate`s, including blocked ones, have completed). We, however, did not do the same check at several other places where we called `handle_monitor_update_completion!`. Specifically, after a monitor update completes during reload (processed via a `BackgroundEvent` or when monitor update completes async, we didn't check if there were any blocked monitor updates before completing). Here we add the missing check, as well as an assertion in `Channel::monitor_updating_restored`. Conflicts resolved in: * lightning/src/ln/chanmon_update_fail_tests.rs due to `rustfmt`-induced changes as well as other tests cleanups. * lightning/src/ln/channelmanager.rs due to upstream Channel object refactoring
1 parent 34b0f4b commit 64f678d

File tree

3 files changed

+77
-14
lines changed

3 files changed

+77
-14
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 67 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2931,16 +2931,28 @@ fn test_inbound_reload_without_init_mon() {
29312931
do_test_inbound_reload_without_init_mon(false, false);
29322932
}
29332933

2934-
#[test]
2935-
fn test_blocked_chan_preimage_release() {
2934+
#[derive(PartialEq, Eq)]
2935+
enum BlockedUpdateComplMode {
2936+
Async,
2937+
AtReload,
2938+
Sync,
2939+
}
2940+
2941+
fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode) {
29362942
// Test that even if a channel's `ChannelMonitorUpdate` flow is blocked waiting on an event to
29372943
// be handled HTLC preimage `ChannelMonitorUpdate`s will still go out.
29382944
let chanmon_cfgs = create_chanmon_cfgs(3);
29392945
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2946+
let persister;
2947+
let new_chain_mon;
29402948
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2949+
let nodes_1_reload;
29412950
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
29422951

2943-
create_announced_chan_between_nodes(&nodes, 0, 1);
2952+
let node_a_id = nodes[0].node.get_our_node_id();
2953+
let node_b_id = nodes[1].node.get_our_node_id();
2954+
2955+
let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2;
29442956
let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
29452957

29462958
send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5_000_000);
@@ -2968,25 +2980,62 @@ fn test_blocked_chan_preimage_release() {
29682980
expect_payment_claimed!(nodes[0], payment_hash_2, 1_000_000);
29692981

29702982
let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2983+
if completion_mode != BlockedUpdateComplMode::Sync {
2984+
// We use to incorrectly handle monitor update completion in cases where we completed a
2985+
// monitor update async or after reload. We test both based on the `completion_mode`.
2986+
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2987+
}
29712988
nodes[1].node.handle_update_fulfill_htlc(nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.update_fulfill_htlcs[0]);
29722989
check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update
29732990
assert!(get_monitor!(nodes[1], chan_id_2).get_stored_preimages().contains_key(&payment_hash_2));
29742991
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2992+
if completion_mode == BlockedUpdateComplMode::AtReload {
2993+
let node_ser = nodes[1].node.encode();
2994+
let chan_mon_0 = get_monitor!(nodes[1], chan_id_1).encode();
2995+
let chan_mon_1 = get_monitor!(nodes[1], chan_id_2).encode();
2996+
2997+
let mons = &[&chan_mon_0[..], &chan_mon_1[..]];
2998+
reload_node!(nodes[1], &node_ser, mons, persister, new_chain_mon, nodes_1_reload);
2999+
3000+
nodes[0].node.peer_disconnected(node_b_id);
3001+
nodes[2].node.peer_disconnected(node_b_id);
3002+
3003+
let mut a_b_reconnect = ReconnectArgs::new(&nodes[0], &nodes[1]);
3004+
a_b_reconnect.pending_htlc_claims.1 = 1;
3005+
// Note that we will expect no final RAA monitor update in
3006+
// `commitment_signed_dance_through_cp_raa` during the reconnect, matching the below case.
3007+
reconnect_nodes(a_b_reconnect);
3008+
reconnect_nodes(ReconnectArgs::new(&nodes[2], &nodes[1]));
3009+
} else if completion_mode == BlockedUpdateComplMode::Async {
3010+
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_2).unwrap().clone();
3011+
nodes[1]
3012+
.chain_monitor
3013+
.chain_monitor
3014+
.channel_monitor_updated(outpoint, latest_update)
3015+
.unwrap();
3016+
}
29753017

29763018
// Finish the CS dance between nodes[0] and nodes[1]. Note that until the event handling, the
29773019
// update_fulfill_htlc + CS is held, even though the preimage is already on disk for the
29783020
// channel.
2979-
nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.commitment_signed);
2980-
check_added_monitors(&nodes[1], 1);
2981-
let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false);
2982-
assert!(a.is_none());
3021+
// Note that when completing as a side effect of a reload we completed the CS dance in
3022+
// `reconnect_nodes` above.
3023+
if completion_mode != BlockedUpdateComplMode::AtReload {
3024+
nodes[1].node.handle_commitment_signed(
3025+
node_a_id,
3026+
&as_htlc_fulfill_updates.commitment_signed,
3027+
);
3028+
check_added_monitors(&nodes[1], 1);
3029+
let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false);
3030+
assert!(a.is_none());
29833031

2984-
nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa);
2985-
check_added_monitors(&nodes[1], 0);
2986-
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3032+
nodes[1].node.handle_revoke_and_ack(node_a_id, &raa);
3033+
check_added_monitors(&nodes[1], 0);
3034+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3035+
}
29873036

29883037
let events = nodes[1].node.get_and_clear_pending_events();
2989-
assert_eq!(events.len(), 3);
3038+
assert_eq!(events.len(), 3, "{events:?}");
29903039
if let Event::PaymentSent { .. } = events[0] {} else { panic!(); }
29913040
if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
29923041
if let Event::PaymentForwarded { .. } = events[1] {} else { panic!(); }
@@ -3004,6 +3053,13 @@ fn test_blocked_chan_preimage_release() {
30043053
expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true);
30053054
}
30063055

3056+
#[test]
3057+
fn test_blocked_chan_preimage_release() {
3058+
do_test_blocked_chan_preimage_release(BlockedUpdateComplMode::AtReload);
3059+
do_test_blocked_chan_preimage_release(BlockedUpdateComplMode::Sync);
3060+
do_test_blocked_chan_preimage_release(BlockedUpdateComplMode::Async);
3061+
}
3062+
30073063
fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_commitment_dance: bool) {
30083064
// When we forward a payment and receive `update_fulfill_htlc`+`commitment_signed` messages
30093065
// from the downstream channel, we immediately claim the HTLC on the upstream channel, before

lightning/src/ln/channel.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5941,6 +5941,7 @@ impl<SP: Deref> Channel<SP> where
59415941
{
59425942
assert!(self.context.channel_state.is_monitor_update_in_progress());
59435943
self.context.channel_state.clear_monitor_update_in_progress();
5944+
assert_eq!(self.blocked_monitor_updates_pending(), 0);
59445945

59455946
// If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
59465947
// (re-)broadcast the funding transaction as we may have declined to broadcast it when we

lightning/src/ln/channelmanager.rs

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6347,7 +6347,9 @@ where
63476347
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
63486348
let peer_state = &mut *peer_state_lock;
63496349
if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
6350-
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
6350+
if chan.blocked_monitor_updates_pending() == 0 {
6351+
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
6352+
}
63516353
} else {
63526354
let update_actions = peer_state.monitor_update_blocked_actions
63536355
.remove(&channel_id).unwrap_or(Vec::new());
@@ -7625,8 +7627,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
76257627

76267628
if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) {
76277629
if chan.is_awaiting_monitor_update() {
7628-
log_trace!(logger, "Channel is open and awaiting update, resuming it");
7629-
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
7630+
if chan.blocked_monitor_updates_pending() == 0 {
7631+
log_trace!(logger, "Channel is open and awaiting update, resuming it");
7632+
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
7633+
} else {
7634+
log_trace!(logger, "Channel is open and awaiting update, leaving it blocked due to a blocked monitor update");
7635+
}
76307636
} else {
76317637
log_trace!(logger, "Channel is open but not awaiting update");
76327638
}

0 commit comments

Comments
 (0)