@@ -40,7 +40,7 @@ use crate::blinded_path::payment::{AsyncBolt12OfferContext, BlindedPaymentPath,
4040use crate::chain;
4141use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
4242use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
43- use crate::chain::channelmonitor::{Balance, ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent};
43+ use crate::chain::channelmonitor::{Balance, ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, MAX_BLOCKS_FOR_CONF, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent};
4444use crate::chain::transaction::{OutPoint, TransactionData};
4545use crate::events::{self, Event, EventHandler, EventsProvider, InboundChannelFunds, ClosureReason, HTLCDestination, PaymentFailureReason, ReplayEvent};
4646// Since this struct is returned in `list_channels` methods, expose it here in case users want to
@@ -2824,7 +2824,7 @@ pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
28242824pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
28252825
28262826/// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
2827- /// HTLC's CLTV. The current default represents roughly seven hours of blocks at six blocks/hour.
2827+ /// HTLC's CLTV. The current default represents roughly eight hours of blocks at six blocks/hour.
28282828///
28292829/// This can be increased (but not decreased) through [`ChannelConfig::cltv_expiry_delta`]
28302830///
@@ -2833,7 +2833,7 @@ pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
28332833// i.e. the node we forwarded the payment on to should always have enough room to reliably time out
28342834// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
28352835// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
2836- pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*7 ;
2836+ pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*8 ;
28372837// This should be long enough to allow a payment path drawn across multiple routing hops with substantial
28382838// `cltv_expiry_delta`. Indeed, the length of those values is the reaction delay offered to a routing node
28392839// in case of HTLC on-chain settlement. While appearing less competitive, a node operator could decide to
@@ -2850,19 +2850,34 @@ pub(super) const CLTV_FAR_FAR_AWAY: u32 = 14 * 24 * 6;
28502850// a payment was being routed, so we add an extra block to be safe.
28512851pub const MIN_FINAL_CLTV_EXPIRY_DELTA: u16 = HTLC_FAIL_BACK_BUFFER as u16 + 3;
28522852
2853- // Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
2854- // ie that if the next-hop peer fails the HTLC within
2855- // LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain,
2856- // then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and
2857- // failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before
2858- // LATENCY_GRACE_PERIOD_BLOCKS.
2859- #[allow(dead_code)]
2860- const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
2853+ // Check that our MIN_CLTV_EXPIRY_DELTA gives us enough time to get everything on chain and locked
2854+ // in with enough time left to fail the corresponding HTLC back to our inbound edge before they
2855+ // force-close on us.
2856+ // In other words, if the next-hop peer fails HTLC LATENCY_GRACE_PERIOD_BLOCKS after our
2857+ // CLTV_CLAIM_BUFFER (because that's how many blocks we allow them after expiry), we'll still have
2858+ // 2*MAX_BLOCKS_FOR_CONF + ANTI_REORG_DELAY left to get two transactions on chain and the second
2859+ // fully locked in before the peer force-closes on us (LATENCY_GRACE_PERIOD_BLOCKS before the
2860+ // expiry, i.e. assuming the peer force-closes right at the expiry and we're behind by
2861+ // LATENCY_GRACE_PERIOD_BLOCKS).
2862+ const _CHECK_CLTV_EXPIRY_SANITY: () = assert!(
2863+ MIN_CLTV_EXPIRY_DELTA as u32 >= 2*LATENCY_GRACE_PERIOD_BLOCKS + 2*MAX_BLOCKS_FOR_CONF + ANTI_REORG_DELAY
2864+ );
2865+
2866+ // Check that our MIN_CLTV_EXPIRY_DELTA gives us enough time to get the HTLC preimage back to our
2867+ // counterparty if the outbound edge gives us the preimage only one block before we'd force-close
2868+ // the channel.
2869+ // ie they provide the preimage LATENCY_GRACE_PERIOD_BLOCKS - 1 after the HTLC expires, then we
2870+ // pass the preimage back, which takes LATENCY_GRACE_PERIOD_BLOCKS to complete, and we want to make
2871+ // sure this all happens at least N blocks before the inbound HTLC expires (where N is the
2872+ // counterparty's CLTV_CLAIM_BUFFER or equivalent).
2873+ const _ASSUMED_COUNTERPARTY_CLTV_CLAIM_BUFFER: u32 = 6 * 6;
28612874
2862- // Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See
2863- // ChannelMonitor::should_broadcast_holder_commitment_txn for a description of why this is needed.
2864- #[allow(dead_code)]
2865- const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
2875+ const _CHECK_COUNTERPARTY_REALISTIC: () =
2876+ assert!(_ASSUMED_COUNTERPARTY_CLTV_CLAIM_BUFFER >= CLTV_CLAIM_BUFFER);
2877+
2878+ const _CHECK_CLTV_EXPIRY_OFFCHAIN: () = assert!(
2879+ MIN_CLTV_EXPIRY_DELTA as u32 >= 2*LATENCY_GRACE_PERIOD_BLOCKS - 1 + _ASSUMED_COUNTERPARTY_CLTV_CLAIM_BUFFER
2880+ );
28662881
28672882/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs
28682883pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
@@ -15979,15 +15994,15 @@ mod tests {
1597915994 let current_height: u32 = node[0].node.best_block.read().unwrap().height;
1598015995 let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive(msgs::InboundOnionReceivePayload {
1598115996 sender_intended_htlc_amt_msat: 100,
15982- cltv_expiry_height: 22 ,
15997+ cltv_expiry_height: TEST_FINAL_CLTV ,
1598315998 payment_metadata: None,
1598415999 keysend_preimage: None,
1598516000 payment_data: Some(msgs::FinalOnionHopData {
1598616001 payment_secret: PaymentSecret([0; 32]),
1598716002 total_msat: 100,
1598816003 }),
1598916004 custom_tlvs: Vec::new(),
15990- }), [0; 32], PaymentHash([0; 32]), 100, 23 , None, true, None, current_height);
16005+ }), [0; 32], PaymentHash([0; 32]), 100, TEST_FINAL_CLTV + 1 , None, true, None, current_height);
1599116006
1599216007 // Should not return an error as this condition:
1599316008 // https://github.com/lightning/bolts/blob/4dcc377209509b13cf89a4b91fde7d478f5b46d8/04-onion-routing.md?plain=1#L334
0 commit comments