-
Couldn't load subscription status.
- Fork 421
Fail HTLC backwards before upstream claims on-chain #3556
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
6368dfb
a78ea1f
4c8d59f
ff344b4
a577f32
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -2270,6 +2270,138 @@ fn channel_reserve_in_flight_removes() { | |
| claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); | ||
| } | ||
|
|
||
| enum PostFailBackAction { | ||
| TimeoutOnChain, | ||
| ClaimOnChain, | ||
| FailOffChain, | ||
| ClaimOffChain, | ||
| } | ||
|
|
||
| #[test] | ||
| fn test_fail_back_before_backwards_timeout() { | ||
| do_test_fail_back_before_backwards_timeout(PostFailBackAction::TimeoutOnChain); | ||
| do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOnChain); | ||
| do_test_fail_back_before_backwards_timeout(PostFailBackAction::FailOffChain); | ||
| do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOffChain); | ||
| } | ||
|
|
||
| fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBackAction) { | ||
| // Test that we fail an HTLC upstream if we are still waiting for confirmation downstream | ||
| // just before the upstream timeout expires | ||
| let chanmon_cfgs = create_chanmon_cfgs(3); | ||
| let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); | ||
| let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); | ||
| let nodes = create_network(3, &node_cfgs, &node_chanmgrs); | ||
| for node in nodes.iter() { | ||
| *node.fee_estimator.sat_per_kw.lock().unwrap() = 2000; | ||
| } | ||
|
|
||
| let node_b_id = nodes[1].node.get_our_node_id(); | ||
| let node_c_id = nodes[2].node.get_our_node_id(); | ||
|
|
||
| create_announced_chan_between_nodes(&nodes, 0, 1); | ||
| let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); | ||
arik-so marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| // Start every node on the same block height to make reasoning about timeouts easier | ||
| connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); | ||
| connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); | ||
| connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); | ||
|
|
||
| let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); | ||
|
|
||
| // Force close the B<->C channel by timing out the HTLC | ||
| let timeout_blocks = TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1; | ||
| connect_blocks(&nodes[1], timeout_blocks); | ||
| let node_1_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT); | ||
| check_closed_event(&nodes[1], 1, ClosureReason::HTLCsTimedOut, false, &[node_c_id], 100_000); | ||
| check_closed_broadcast(&nodes[1], 1, true); | ||
| check_added_monitors(&nodes[1], 1); | ||
|
|
||
| // After the A<->B HTLC gets within LATENCY_GRACE_PERIOD_BLOCKS we will fail the HTLC to avoid | ||
| // the channel force-closing. Note that we already connected `TEST_FINAL_CLTV + | ||
| // LATENCY_GRACE_PERIOD_BLOCKS` blocks above, so we subtract that from the HTLC expiry (which | ||
| // is `TEST_FINAL_CLTV` + `MIN_CLTV_EXPIRY_DELTA`). | ||
| let upstream_timeout_blocks = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS * 2; | ||
| connect_blocks(&nodes[1], upstream_timeout_blocks); | ||
|
|
||
| // Connect blocks for nodes[0] to make sure they don't go on-chain | ||
| connect_blocks(&nodes[0], timeout_blocks + upstream_timeout_blocks); | ||
|
|
||
| // Check that nodes[1] fails the HTLC upstream | ||
| expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], | ||
| vec![HTLCDestination::NextHopChannel { | ||
| node_id: Some(nodes[2].node.get_our_node_id()), | ||
| channel_id: chan_2.2 | ||
| }]); | ||
| check_added_monitors!(nodes[1], 1); | ||
| let htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); | ||
| let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates; | ||
|
|
||
| nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); | ||
| commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); | ||
| expect_payment_failed_conditions(&nodes[0], payment_hash, false, | ||
| PaymentFailedConditions::new().blamed_chan_closed(true)); | ||
|
|
||
| // Make sure we handle possible duplicate fails or extra messages after failing back | ||
| match post_fail_back_action { | ||
| PostFailBackAction::TimeoutOnChain => { | ||
| // Confirm nodes[1]'s claim with timeout, make sure we don't fail upstream again | ||
| mine_transaction(&nodes[1], &node_1_txn[0]); // Commitment | ||
| mine_transaction(&nodes[1], &node_1_txn[1]); // HTLC timeout | ||
| connect_blocks(&nodes[1], ANTI_REORG_DELAY); | ||
| // Expect handling another fail back event, but the HTLC is already gone | ||
| expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We could avoid having this duplicate go out if we check the set on the confirmed timeout path, but I guess it's not worth doing since a restart in between would yield it anyway. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I also spent a while trying to move the event generation to where we do the actual failure so that we could detect the duplicate in |
||
| vec![HTLCDestination::NextHopChannel { | ||
| node_id: Some(nodes[2].node.get_our_node_id()), | ||
| channel_id: chan_2.2 | ||
| }]); | ||
| assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); | ||
| }, | ||
| PostFailBackAction::ClaimOnChain => { | ||
| nodes[2].node.claim_funds(payment_preimage); | ||
| expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); | ||
| check_added_monitors!(nodes[2], 1); | ||
| get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); | ||
|
|
||
| connect_blocks(&nodes[2], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2); | ||
| let node_2_txn = test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::SUCCESS); | ||
| check_closed_broadcast!(nodes[2], true); | ||
| check_closed_event(&nodes[2], 1, ClosureReason::HTLCsTimedOut, false, &[node_b_id], 100_000); | ||
| check_added_monitors!(nodes[2], 1); | ||
|
|
||
| mine_transaction(&nodes[1], &node_2_txn[0]); // Commitment | ||
| mine_transaction(&nodes[1], &node_2_txn[1]); // HTLC success | ||
| connect_blocks(&nodes[1], ANTI_REORG_DELAY); | ||
| assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); | ||
| }, | ||
| PostFailBackAction::FailOffChain => { | ||
| nodes[2].node.fail_htlc_backwards(&payment_hash); | ||
| expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], | ||
| vec![HTLCDestination::FailedPayment { payment_hash }]); | ||
| check_added_monitors!(nodes[2], 1); | ||
| let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); | ||
| let update_fail = commitment_update.update_fail_htlcs[0].clone(); | ||
|
|
||
| nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &update_fail); | ||
| let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id()); | ||
| assert_eq!(err_msg.channel_id, chan_2.2); | ||
| assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); | ||
| }, | ||
| PostFailBackAction::ClaimOffChain => { | ||
| nodes[2].node.claim_funds(payment_preimage); | ||
| expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); | ||
| check_added_monitors!(nodes[2], 1); | ||
| let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); | ||
| let update_fulfill = commitment_update.update_fulfill_htlcs[0].clone(); | ||
|
|
||
| nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &update_fulfill); | ||
| let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id()); | ||
| assert_eq!(err_msg.channel_id, chan_2.2); | ||
| assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); | ||
| }, | ||
| }; | ||
| } | ||
|
|
||
| #[test] | ||
| fn channel_monitor_network_test() { | ||
| // Simple test which builds a network of ChannelManagers, connects them to each other, and | ||
|
|
@@ -2374,7 +2506,7 @@ fn channel_monitor_network_test() { | |
| let node2_commitment_txid; | ||
| { | ||
| let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE); | ||
| connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1); | ||
| connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS); | ||
| test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT); | ||
| node2_commitment_txid = node_txn[0].compute_txid(); | ||
|
|
||
|
|
@@ -3312,8 +3444,8 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { | |
| // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain | ||
| // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence | ||
| mine_transaction(&nodes[1], &commitment_tx[0]); | ||
| check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false | ||
| , [nodes[2].node.get_our_node_id()], 100000); | ||
| check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, | ||
| &[nodes[2].node.get_our_node_id()], 100000); | ||
| let htlc_expiry = get_monitor!(nodes[1], chan_2.2).get_claimable_balances().iter().filter_map(|bal| | ||
| if let Balance::MaybeTimeoutClaimableHTLC { claimable_height, .. } = bal { | ||
| Some(*claimable_height) | ||
|
|
@@ -9774,6 +9906,8 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t | |
| let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); | ||
| *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks; | ||
|
|
||
| let node_c_id = nodes[2].node.get_our_node_id(); | ||
|
|
||
| create_announced_chan_between_nodes(&nodes, 0, 1); | ||
| let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2); | ||
| let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); | ||
|
|
@@ -9789,7 +9923,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t | |
|
|
||
| let conf_height = nodes[1].best_block_info().1; | ||
| if !test_height_before_timelock { | ||
| connect_blocks(&nodes[1], 24 * 6); | ||
| connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS); | ||
| } | ||
| nodes[1].chain_monitor.chain_monitor.transactions_confirmed( | ||
| &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height); | ||
|
|
@@ -9808,10 +9942,6 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t | |
| &spending_txn[0] | ||
| }; | ||
| check_spends!(htlc_tx, node_txn[0]); | ||
| // We should also generate a SpendableOutputs event with the to_self output (as its | ||
| // timelock is up). | ||
| let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); | ||
| assert_eq!(descriptor_spend_txn.len(), 1); | ||
|
|
||
| // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we | ||
| // should immediately fail-backwards the HTLC to the previous hop, without waiting for an | ||
|
|
@@ -9830,6 +9960,18 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t | |
| nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); | ||
| commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); | ||
| expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true); | ||
|
|
||
| // We should also generate a SpendableOutputs event with the to_self output (once the | ||
| // timelock is up). | ||
| connect_blocks(&nodes[1], (BREAKDOWN_TIMEOUT as u32) - TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - 1); | ||
| let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); | ||
| assert_eq!(descriptor_spend_txn.len(), 1); | ||
|
|
||
| // When the HTLC times out on the A<->B edge, the B<->C channel will fail the HTLC back to | ||
| // avoid the A<->B channel closing (even though it already has). This will generate a | ||
| // spurious HTLCHandlingFailed event. | ||
| expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], | ||
| vec![HTLCDestination::NextHopChannel { node_id: Some(node_c_id), channel_id }]); | ||
| } | ||
| } | ||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Nit: is this really considered an error? Info/warn seems better suited.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Its an error in that a violation of our assumptions around tx confirmation time happened, and its probably an important enough case that users should see it and think hard about what is happening.