Skip to content

Commit d2dfab0

Browse files
committed
Consider quiescence state when freeing holding cells upon revoke_and_ack
We previously would avoid freeing our holding cells upon a `revoke_and_ack` if a monitor update was in progress, which we checked explicitly. With quiescence, if we've already sent `stfu`, we're not allowed to make further commitment updates, so we must also avoid freeing our holding cells in such cases.
1 parent 5321673 commit d2dfab0

File tree

2 files changed

+146
-11
lines changed

2 files changed

+146
-11
lines changed

lightning/src/ln/channel.rs

Lines changed: 20 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -6086,14 +6086,8 @@ impl<SP: Deref> FundedChannel<SP> where
60866086
self.context.monitor_pending_update_adds.append(&mut pending_update_adds);
60876087

60886088
if self.context.channel_state.is_monitor_update_in_progress() {
6089-
// We can't actually generate a new commitment transaction (incl by freeing holding
6090-
// cells) while we can't update the monitor, so we just return what we have.
60916089
if require_commitment {
60926090
self.context.monitor_pending_commitment_signed = true;
6093-
// When the monitor updating is restored we'll call
6094-
// get_last_commitment_update_for_send(), which does not update state, but we're
6095-
// definitely now awaiting a remote revoke before we can step forward any more, so
6096-
// set it here.
60976091
let mut additional_update = self.build_commitment_no_status_check(logger);
60986092
// build_commitment_no_status_check may bump latest_monitor_id but we want them to be
60996093
// strictly increasing by one, so decrement it here.
@@ -6107,7 +6101,7 @@ impl<SP: Deref> FundedChannel<SP> where
61076101
return_with_htlcs_to_fail!(Vec::new());
61086102
}
61096103

6110-
match self.free_holding_cell_htlcs(fee_estimator, logger) {
6104+
match self.maybe_free_holding_cell_htlcs(fee_estimator, logger) {
61116105
(Some(mut additional_update), htlcs_to_fail) => {
61126106
// free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
61136107
// strictly increasing by one, so decrement it here.
@@ -6122,17 +6116,32 @@ impl<SP: Deref> FundedChannel<SP> where
61226116
},
61236117
(None, htlcs_to_fail) => {
61246118
if require_commitment {
6119+
// We can't generate a new commitment transaction yet so we just return what we have.
61256120
let mut additional_update = self.build_commitment_no_status_check(logger);
61266121

61276122
// build_commitment_no_status_check may bump latest_monitor_id but we want them to be
61286123
// strictly increasing by one, so decrement it here.
61296124
self.context.latest_monitor_update_id = monitor_update.update_id;
61306125
monitor_update.updates.append(&mut additional_update.updates);
61316126

6132-
log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
6133-
&self.context.channel_id(),
6134-
update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
6135-
release_state_str);
6127+
log_debug!(logger, "Received a valid revoke_and_ack for channel {}. {} monitor update.",
6128+
&self.context.channel_id(), release_state_str);
6129+
if self.context.channel_state.can_generate_new_commitment() {
6130+
log_debug!(logger, "Responding with a commitment update with {} HTLCs failed for channel {}",
6131+
update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
6132+
&self.context.channel_id);
6133+
} else {
6134+
debug_assert!(htlcs_to_fail.is_empty());
6135+
let reason = if self.context.channel_state.is_local_stfu_sent() {
6136+
"exits quiescence"
6137+
} else if self.context.channel_state.is_monitor_update_in_progress() {
6138+
"completes pending monitor update"
6139+
} else {
6140+
"can continue progress"
6141+
};
6142+
log_debug!(logger, "Holding back commitment update until channel {} {}",
6143+
&self.context.channel_id, reason);
6144+
}
61366145

61376146
self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
61386147
return_with_htlcs_to_fail!(htlcs_to_fail);

lightning/src/ln/quiescence_tests.rs

Lines changed: 126 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
use crate::chain::ChannelMonitorUpdateStatus;
2+
use crate::events::Event;
23
use crate::events::HTLCDestination;
34
use crate::events::MessageSendEvent;
45
use crate::events::MessageSendEventsProvider;
@@ -338,3 +339,128 @@ fn test_quiescence_waits_for_monitor_update_complete() {
338339
expect_payment_sent(&nodes[0], preimage, None, true, true);
339340
expect_payment_claimed!(&nodes[1], payment_hash, payment_amount);
340341
}
342+
343+
#[test]
344+
fn test_quiescence_updates_go_to_holding_cell() {
345+
quiescence_updates_go_to_holding_cell(false);
346+
quiescence_updates_go_to_holding_cell(true);
347+
}
348+
349+
fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) {
350+
// Test that any updates made to a channel while quiescent go to the holding cell.
351+
let chanmon_cfgs = create_chanmon_cfgs(2);
352+
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
353+
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
354+
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
355+
let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
356+
357+
let node_id_0 = nodes[0].node.get_our_node_id();
358+
let node_id_1 = nodes[1].node.get_our_node_id();
359+
360+
// Send enough to be able to pay from both directions.
361+
let payment_amount = 1_000_000;
362+
send_payment(&nodes[0], &[&nodes[1]], payment_amount * 4);
363+
364+
// Propose quiescence from nodes[1], and immediately try to send a payment. Since its `stfu` has
365+
// already gone out first, the outbound HTLC will go into the holding cell.
366+
nodes[1].node.maybe_propose_quiescence(&node_id_0, &chan_id).unwrap();
367+
let stfu = get_event_msg!(&nodes[1], MessageSendEvent::SendStfu, node_id_0);
368+
369+
let (route1, payment_hash1, payment_preimage1, payment_secret1) =
370+
get_route_and_payment_hash!(&nodes[1], &nodes[0], payment_amount);
371+
let onion1 = RecipientOnionFields::secret_only(payment_secret1);
372+
let payment_id1 = PaymentId(payment_hash1.0);
373+
nodes[1].node.send_payment_with_route(route1, payment_hash1, onion1, payment_id1).unwrap();
374+
check_added_monitors!(&nodes[1], 0);
375+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
376+
377+
// Send a payment in the opposite direction. Since nodes[0] hasn't sent its own `stfu` yet, it's
378+
// allowed to make updates.
379+
let (route2, payment_hash2, payment_preimage2, payment_secret2) =
380+
get_route_and_payment_hash!(&nodes[0], &nodes[1], payment_amount);
381+
let onion2 = RecipientOnionFields::secret_only(payment_secret2);
382+
let payment_id2 = PaymentId(payment_hash2.0);
383+
nodes[0].node.send_payment_with_route(route2, payment_hash2, onion2, payment_id2).unwrap();
384+
check_added_monitors!(&nodes[0], 1);
385+
386+
let update_add = get_htlc_update_msgs!(&nodes[0], node_id_1);
387+
nodes[1].node.handle_update_add_htlc(node_id_0, &update_add.update_add_htlcs[0]);
388+
commitment_signed_dance!(&nodes[1], &nodes[0], update_add.commitment_signed, false);
389+
expect_pending_htlcs_forwardable!(&nodes[1]);
390+
expect_payment_claimable!(nodes[1], payment_hash2, payment_secret2, payment_amount);
391+
392+
// Have nodes[1] attempt to fail/claim nodes[0]'s payment. Since nodes[1] already sent out
393+
// `stfu`, the `update_fail/fulfill` will go into the holding cell.
394+
if fail_htlc {
395+
nodes[1].node.fail_htlc_backwards(&payment_hash2);
396+
let failed_payment = HTLCDestination::FailedPayment { payment_hash: payment_hash2 };
397+
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![failed_payment]);
398+
} else {
399+
nodes[1].node.claim_funds(payment_preimage2);
400+
check_added_monitors(&nodes[1], 1);
401+
}
402+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
403+
404+
// Finish the quiescence handshake.
405+
nodes[0].node.handle_stfu(node_id_1, &stfu);
406+
let stfu = get_event_msg!(&nodes[0], MessageSendEvent::SendStfu, node_id_1);
407+
nodes[1].node.handle_stfu(node_id_0, &stfu);
408+
409+
nodes[0].node.exit_quiescence(&node_id_1, &chan_id).unwrap();
410+
nodes[1].node.exit_quiescence(&node_id_0, &chan_id).unwrap();
411+
412+
// Now that quiescence is over, nodes are allowed to make updates again. nodes[1] will have its
413+
// outbound HTLC finally go out, along with the fail/claim of nodes[0]'s payment.
414+
let update = get_htlc_update_msgs!(&nodes[1], node_id_0);
415+
check_added_monitors(&nodes[1], 1);
416+
nodes[0].node.handle_update_add_htlc(node_id_1, &update.update_add_htlcs[0]);
417+
if fail_htlc {
418+
nodes[0].node.handle_update_fail_htlc(node_id_1, &update.update_fail_htlcs[0]);
419+
} else {
420+
nodes[0].node.handle_update_fulfill_htlc(node_id_1, &update.update_fulfill_htlcs[0]);
421+
}
422+
commitment_signed_dance!(&nodes[0], &nodes[1], update.commitment_signed, false);
423+
424+
if !fail_htlc {
425+
expect_payment_claimed!(nodes[1], payment_hash2, payment_amount);
426+
}
427+
428+
let events = nodes[0].node.get_and_clear_pending_events();
429+
assert_eq!(events.len(), 3);
430+
assert!(events.iter().find(|e| matches!(e, Event::PendingHTLCsForwardable { .. })).is_some());
431+
if fail_htlc {
432+
assert!(events.iter().find(|e| matches!(e, Event::PaymentFailed { .. })).is_some());
433+
assert!(events.iter().find(|e| matches!(e, Event::PaymentPathFailed { .. })).is_some());
434+
} else {
435+
assert!(events.iter().find(|e| matches!(e, Event::PaymentSent { .. })).is_some());
436+
assert!(events.iter().find(|e| matches!(e, Event::PaymentPathSuccessful { .. })).is_some());
437+
check_added_monitors(&nodes[0], 1);
438+
}
439+
nodes[0].node.process_pending_htlc_forwards();
440+
expect_payment_claimable!(nodes[0], payment_hash1, payment_secret1, payment_amount);
441+
442+
if fail_htlc {
443+
nodes[0].node.fail_htlc_backwards(&payment_hash1);
444+
let failed_payment = HTLCDestination::FailedPayment { payment_hash: payment_hash1 };
445+
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[0], vec![failed_payment]);
446+
} else {
447+
nodes[0].node.claim_funds(payment_preimage1);
448+
}
449+
check_added_monitors(&nodes[0], 1);
450+
451+
let update = get_htlc_update_msgs!(&nodes[0], node_id_1);
452+
if fail_htlc {
453+
nodes[1].node.handle_update_fail_htlc(node_id_0, &update.update_fail_htlcs[0]);
454+
} else {
455+
nodes[1].node.handle_update_fulfill_htlc(node_id_0, &update.update_fulfill_htlcs[0]);
456+
}
457+
commitment_signed_dance!(&nodes[1], &nodes[0], update.commitment_signed, false);
458+
459+
if fail_htlc {
460+
let conditions = PaymentFailedConditions::new();
461+
expect_payment_failed_conditions(&nodes[1], payment_hash1, true, conditions);
462+
} else {
463+
expect_payment_claimed!(nodes[0], payment_hash1, payment_amount);
464+
expect_payment_sent(&nodes[1], payment_preimage1, None, true, true);
465+
}
466+
}

0 commit comments

Comments
 (0)