@@ -2263,6 +2263,138 @@ fn channel_reserve_in_flight_removes() {
22632263 claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
22642264}
22652265
2266+ enum PostFailBackAction {
2267+ TimeoutOnChain,
2268+ ClaimOnChain,
2269+ FailOffChain,
2270+ ClaimOffChain,
2271+ }
2272+
2273+ #[test]
2274+ fn test_fail_back_before_backwards_timeout() {
2275+ do_test_fail_back_before_backwards_timeout(PostFailBackAction::TimeoutOnChain);
2276+ do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOnChain);
2277+ do_test_fail_back_before_backwards_timeout(PostFailBackAction::FailOffChain);
2278+ do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOffChain);
2279+ }
2280+
2281+ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBackAction) {
2282+ // Test that we fail an HTLC upstream if we are still waiting for confirmation downstream
2283+ // just before the upstream timeout expires
2284+ let chanmon_cfgs = create_chanmon_cfgs(3);
2285+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2286+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2287+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2288+ for node in nodes.iter() {
2289+ *node.fee_estimator.sat_per_kw.lock().unwrap() = 2000;
2290+ }
2291+
2292+ let node_b_id = nodes[1].node.get_our_node_id();
2293+ let node_c_id = nodes[2].node.get_our_node_id();
2294+
2295+ create_announced_chan_between_nodes(&nodes, 0, 1);
2296+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2297+
2298+ // Start every node on the same block height to make reasoning about timeouts easier
2299+ connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
2300+ connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
2301+ connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
2302+
2303+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2304+
2305+ // Force close the B<->C channel by timing out the HTLC
2306+ let timeout_blocks = TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1;
2307+ connect_blocks(&nodes[1], timeout_blocks);
2308+ let node_1_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
2309+ check_closed_event(&nodes[1], 1, ClosureReason::HTLCsTimedOut, false, &[node_c_id], 100_000);
2310+ check_closed_broadcast(&nodes[1], 1, true);
2311+ check_added_monitors(&nodes[1], 1);
2312+
2313+ // After the A<->B HTLC gets within LATENCY_GRACE_PERIOD_BLOCKS we will fail the HTLC to avoid
2314+ // the channel force-closing. Note that we already connected `TEST_FINAL_CLTV +
2315+ // LATENCY_GRACE_PERIOD_BLOCKS` blocks above, so we subtract that from the HTLC expiry (which
2316+ // is `TEST_FINAL_CLTV` + `MIN_CLTV_EXPIRY_DELTA`).
2317+ let upstream_timeout_blocks = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS * 2;
2318+ connect_blocks(&nodes[1], upstream_timeout_blocks);
2319+
2320+ // Connect blocks for nodes[0] to make sure they don't go on-chain
2321+ connect_blocks(&nodes[0], timeout_blocks + upstream_timeout_blocks);
2322+
2323+ // Check that nodes[1] fails the HTLC upstream
2324+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
2325+ vec![HTLCDestination::NextHopChannel {
2326+ node_id: Some(nodes[2].node.get_our_node_id()),
2327+ channel_id: chan_2.2
2328+ }]);
2329+ check_added_monitors!(nodes[1], 1);
2330+ let htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
2331+ let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates;
2332+
2333+ nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
2334+ commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
2335+ expect_payment_failed_conditions(&nodes[0], payment_hash, false,
2336+ PaymentFailedConditions::new().blamed_chan_closed(true));
2337+
2338+ // Make sure we handle possible duplicate fails or extra messages after failing back
2339+ match post_fail_back_action {
2340+ PostFailBackAction::TimeoutOnChain => {
2341+ // Confirm nodes[1]'s claim with timeout, make sure we don't fail upstream again
2342+ mine_transaction(&nodes[1], &node_1_txn[0]); // Commitment
2343+ mine_transaction(&nodes[1], &node_1_txn[1]); // HTLC timeout
2344+ connect_blocks(&nodes[1], ANTI_REORG_DELAY);
2345+ // Expect handling another fail back event, but the HTLC is already gone
2346+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
2347+ vec![HTLCDestination::NextHopChannel {
2348+ node_id: Some(nodes[2].node.get_our_node_id()),
2349+ channel_id: chan_2.2
2350+ }]);
2351+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2352+ },
2353+ PostFailBackAction::ClaimOnChain => {
2354+ nodes[2].node.claim_funds(payment_preimage);
2355+ expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
2356+ check_added_monitors!(nodes[2], 1);
2357+ get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
2358+
2359+ connect_blocks(&nodes[2], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
2360+ let node_2_txn = test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::SUCCESS);
2361+ check_closed_broadcast!(nodes[2], true);
2362+ check_closed_event(&nodes[2], 1, ClosureReason::HTLCsTimedOut, false, &[node_b_id], 100_000);
2363+ check_added_monitors!(nodes[2], 1);
2364+
2365+ mine_transaction(&nodes[1], &node_2_txn[0]); // Commitment
2366+ mine_transaction(&nodes[1], &node_2_txn[1]); // HTLC success
2367+ connect_blocks(&nodes[1], ANTI_REORG_DELAY);
2368+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2369+ },
2370+ PostFailBackAction::FailOffChain => {
2371+ nodes[2].node.fail_htlc_backwards(&payment_hash);
2372+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2],
2373+ vec![HTLCDestination::FailedPayment { payment_hash }]);
2374+ check_added_monitors!(nodes[2], 1);
2375+ let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
2376+ let update_fail = commitment_update.update_fail_htlcs[0].clone();
2377+
2378+ nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &update_fail);
2379+ let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id());
2380+ assert_eq!(err_msg.channel_id, chan_2.2);
2381+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2382+ },
2383+ PostFailBackAction::ClaimOffChain => {
2384+ nodes[2].node.claim_funds(payment_preimage);
2385+ expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
2386+ check_added_monitors!(nodes[2], 1);
2387+ let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
2388+ let update_fulfill = commitment_update.update_fulfill_htlcs[0].clone();
2389+
2390+ nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &update_fulfill);
2391+ let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id());
2392+ assert_eq!(err_msg.channel_id, chan_2.2);
2393+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2394+ },
2395+ };
2396+ }
2397+
22662398#[test]
22672399fn channel_monitor_network_test() {
22682400 // Simple test which builds a network of ChannelManagers, connects them to each other, and
@@ -2367,7 +2499,7 @@ fn channel_monitor_network_test() {
23672499 let node2_commitment_txid;
23682500 {
23692501 let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
2370- connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1 );
2502+ connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
23712503 test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
23722504 node2_commitment_txid = node_txn[0].compute_txid();
23732505
@@ -3305,8 +3437,8 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
33053437 // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
33063438 // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
33073439 mine_transaction(&nodes[1], &commitment_tx[0]);
3308- check_closed_event! (&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false
3309- , [nodes[2].node.get_our_node_id()], 100000);
3440+ check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false,
3441+ & [nodes[2].node.get_our_node_id()], 100000);
33103442 let htlc_expiry = get_monitor!(nodes[1], chan_2.2).get_claimable_balances().iter().filter_map(|bal|
33113443 if let Balance::MaybeTimeoutClaimableHTLC { claimable_height, .. } = bal {
33123444 Some(*claimable_height)
@@ -9786,6 +9918,8 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
97869918 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
97879919 *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
97889920
9921+ let node_c_id = nodes[2].node.get_our_node_id();
9922+
97899923 create_announced_chan_between_nodes(&nodes, 0, 1);
97909924 let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
97919925 let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
@@ -9801,7 +9935,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
98019935
98029936 let conf_height = nodes[1].best_block_info().1;
98039937 if !test_height_before_timelock {
9804- connect_blocks(&nodes[1], 24 * 6 );
9938+ connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS );
98059939 }
98069940 nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
98079941 &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height);
@@ -9820,10 +9954,6 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
98209954 &spending_txn[0]
98219955 };
98229956 check_spends!(htlc_tx, node_txn[0]);
9823- // We should also generate a SpendableOutputs event with the to_self output (as its
9824- // timelock is up).
9825- let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
9826- assert_eq!(descriptor_spend_txn.len(), 1);
98279957
98289958 // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we
98299959 // should immediately fail-backwards the HTLC to the previous hop, without waiting for an
@@ -9842,6 +9972,18 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
98429972 nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
98439973 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
98449974 expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true);
9975+
9976+ // We should also generate a SpendableOutputs event with the to_self output (once the
9977+ // timelock is up).
9978+ connect_blocks(&nodes[1], (BREAKDOWN_TIMEOUT as u32) - TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - 1);
9979+ let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
9980+ assert_eq!(descriptor_spend_txn.len(), 1);
9981+
9982+ // When the HTLC times out on the A<->B edge, the B<->C channel will fail the HTLC back to
9983+ // avoid the A<->B channel closing (even though it already has). This will generate a
9984+ // spurious HTLCHandlingFailed event.
9985+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
9986+ vec![HTLCDestination::NextHopChannel { node_id: Some(node_c_id), channel_id }]);
98459987 }
98469988}
98479989
0 commit comments