From c41fe2fadf270fa2de9089ca7cc27a8284298394 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 29 Aug 2025 17:37:45 -0400 Subject: [PATCH 01/20] Cache peers in OffersMessageFlow In upcoming commits, we'll be creating blinded paths during the process of creating a revoke_and_ack message within the Channel struct. These paths will be included in said RAA to be used as reply paths for often-offline senders held_htlc_available messages. Because we hold the per-peer lock corresponding to the Channel while creating this RAA, we can't use our typical approach of calling ChannelManager::get_peers_for_blinded_path to create these blinded paths. The ::get_peers method takes each peer's lock in turn in order to check for usable channels/onion message feature support, and it's not permitted to hold multiple peer state locks at the same time due to the potential for deadlocks (see the debug_sync module). To avoid taking other peer state locks while holding a particular Channel's peer state lock, here we cache the set of peers in the OffersMessageFlow, which is the struct that ultimately creates the blinded paths for the RAA. --- lightning/src/ln/channelmanager.rs | 9 +++++ lightning/src/offers/flow.rs | 64 ++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 0397116f96d..78137dac033 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -12983,6 +12983,8 @@ where for (err, counterparty_node_id) in failed_channels.drain(..) { let _ = handle_error!(self, err, counterparty_node_id); } + + let _ = self.flow.peer_disconnected(counterparty_node_id); } #[rustfmt::skip] @@ -12993,6 +12995,7 @@ where return Err(()); } + let mut peer_has_announced_channel = false; let mut res = Ok(()); PersistenceNotifierGuard::optionally_notify(self, || { @@ -13080,6 +13083,9 @@ where }), ReconnectionMsg::None => {}, } + if chan.context().should_announce() { + peer_has_announced_channel = true; + } } } @@ -13092,6 +13098,9 @@ where // until we have some peer connection(s) to receive onion messages over, so as a minor optimization // refresh the cache when a peer connects. self.check_refresh_async_receive_offer_cache(false); + let _ = self.flow.peer_connected( + counterparty_node_id, &init_msg.features, peer_has_announced_channel + ); res } diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index 38f472b2f5b..ea0cd347455 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -61,6 +61,7 @@ use crate::sign::{EntropySource, NodeSigner, ReceiveAuthKey}; use crate::offers::static_invoice::{StaticInvoice, StaticInvoiceBuilder}; use crate::sync::{Mutex, RwLock}; +use crate::types::features::InitFeatures; use crate::types::payment::{PaymentHash, PaymentSecret}; use crate::util::ser::Writeable; @@ -98,6 +99,7 @@ where pending_async_payments_messages: Mutex>, async_receive_offer_cache: Mutex, + peers_cache: Mutex>, #[cfg(feature = "dnssec")] pub(crate) hrn_resolver: OMNameResolver, @@ -130,6 +132,7 @@ where pending_offers_messages: Mutex::new(Vec::new()), pending_async_payments_messages: Mutex::new(Vec::new()), + peers_cache: Mutex::new(Vec::with_capacity(MAX_CACHED_PEERS)), #[cfg(feature = "dnssec")] hrn_resolver: OMNameResolver::new(current_timestamp, best_block.height), @@ -260,6 +263,10 @@ const DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY: Duration = Duration::from_secs(365 * 2 pub(crate) const TEST_DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY: Duration = DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY; +/// The maximum number of peers that the [`OffersMessageFlow`] will hold in their peer cache. +/// See [`OffersMessageFlow::peer_connected`] and [`OffersMessageFlow::peer_disconnected`]. +pub const MAX_CACHED_PEERS: usize = 100; + impl OffersMessageFlow where MR::Target: MessageRouter, @@ -1627,4 +1634,61 @@ where pub fn writeable_async_receive_offer_cache(&self) -> Vec { self.async_receive_offer_cache.encode() } + + /// Indicates that a peer was connected to our node. Useful for the [`OffersMessageFlow`] to keep + /// track of which peers are connected, which allows for methods that can create blinded paths + /// without requiring a fresh set of [`MessageForwardNode`]s to be passed in. + /// + /// MUST be called by always-online nodes that support holding HTLCs on behalf of often-offline + /// senders. + /// + /// Errors if the peer does not support onion messages or we have more than [`MAX_CACHED_PEERS`] + /// connected already. + pub fn peer_connected( + &self, peer_node_id: PublicKey, features: &InitFeatures, announced_channel: bool, + ) -> Result<(), ()> { + if !features.supports_onion_messages() { + return Err(()); + } + + let mut peers_cache = self.peers_cache.lock().unwrap(); + let existing_peer = peers_cache.iter_mut().find(|(peer, _)| peer.node_id == peer_node_id); + if let Some((_, is_announced)) = existing_peer { + *is_announced = announced_channel; + return Ok(()); + } + + // Cache this peer if we have space or if they are a public node and can replace an existing + // peer that is potentially private, since private peers are not useful as introduction nodes + // for blinded paths. + let peer = MessageForwardNode { node_id: peer_node_id, short_channel_id: None }; + match peers_cache.len() { + num_peers if num_peers >= MAX_CACHED_PEERS => { + if !announced_channel { + return Err(()); + } + match peers_cache.iter().position(|(_, announced)| !announced) { + Some(idx) => peers_cache[idx] = (peer, announced_channel), + None => return Err(()), + } + }, + _ => peers_cache.push((peer, announced_channel)), + } + + Ok(()) + } + + /// Indicates that a peer was disconnected from our node. See [`Self::peer_connected`]. + /// + /// Errors if the peer is unknown. + pub fn peer_disconnected(&self, peer_node_id: PublicKey) -> Result<(), ()> { + let mut peers_cache = self.peers_cache.lock().unwrap(); + let peer_idx = match peers_cache.iter().position(|(peer, _)| peer.node_id == peer_node_id) { + Some(idx) => idx, + None => return Err(()), + }; + peers_cache.swap_remove(peer_idx); + + Ok(()) + } } From bbe17ed0f3ac1e60af1a0af24535bc9867d72518 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 4 Sep 2025 16:37:18 -0400 Subject: [PATCH 02/20] f remove limits on peer cache --- lightning/src/ln/channelmanager.rs | 8 +------ lightning/src/offers/flow.rs | 37 +++++------------------------- 2 files changed, 7 insertions(+), 38 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 78137dac033..f927a669c23 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -12995,7 +12995,6 @@ where return Err(()); } - let mut peer_has_announced_channel = false; let mut res = Ok(()); PersistenceNotifierGuard::optionally_notify(self, || { @@ -13083,9 +13082,6 @@ where }), ReconnectionMsg::None => {}, } - if chan.context().should_announce() { - peer_has_announced_channel = true; - } } } @@ -13098,9 +13094,7 @@ where // until we have some peer connection(s) to receive onion messages over, so as a minor optimization // refresh the cache when a peer connects. self.check_refresh_async_receive_offer_cache(false); - let _ = self.flow.peer_connected( - counterparty_node_id, &init_msg.features, peer_has_announced_channel - ); + let _ = self.flow.peer_connected(counterparty_node_id, &init_msg.features); res } diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index ea0cd347455..8890431123e 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -99,7 +99,7 @@ where pending_async_payments_messages: Mutex>, async_receive_offer_cache: Mutex, - peers_cache: Mutex>, + peers_cache: Mutex>, #[cfg(feature = "dnssec")] pub(crate) hrn_resolver: OMNameResolver, @@ -132,7 +132,7 @@ where pending_offers_messages: Mutex::new(Vec::new()), pending_async_payments_messages: Mutex::new(Vec::new()), - peers_cache: Mutex::new(Vec::with_capacity(MAX_CACHED_PEERS)), + peers_cache: Mutex::new(Vec::new()), #[cfg(feature = "dnssec")] hrn_resolver: OMNameResolver::new(current_timestamp, best_block.height), @@ -263,10 +263,6 @@ const DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY: Duration = Duration::from_secs(365 * 2 pub(crate) const TEST_DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY: Duration = DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY; -/// The maximum number of peers that the [`OffersMessageFlow`] will hold in their peer cache. -/// See [`OffersMessageFlow::peer_connected`] and [`OffersMessageFlow::peer_disconnected`]. -pub const MAX_CACHED_PEERS: usize = 100; - impl OffersMessageFlow where MR::Target: MessageRouter, @@ -1642,38 +1638,17 @@ where /// MUST be called by always-online nodes that support holding HTLCs on behalf of often-offline /// senders. /// - /// Errors if the peer does not support onion messages or we have more than [`MAX_CACHED_PEERS`] - /// connected already. + /// Errors if the peer does not support onion messages. pub fn peer_connected( - &self, peer_node_id: PublicKey, features: &InitFeatures, announced_channel: bool, + &self, peer_node_id: PublicKey, features: &InitFeatures, ) -> Result<(), ()> { if !features.supports_onion_messages() { return Err(()); } let mut peers_cache = self.peers_cache.lock().unwrap(); - let existing_peer = peers_cache.iter_mut().find(|(peer, _)| peer.node_id == peer_node_id); - if let Some((_, is_announced)) = existing_peer { - *is_announced = announced_channel; - return Ok(()); - } - - // Cache this peer if we have space or if they are a public node and can replace an existing - // peer that is potentially private, since private peers are not useful as introduction nodes - // for blinded paths. let peer = MessageForwardNode { node_id: peer_node_id, short_channel_id: None }; - match peers_cache.len() { - num_peers if num_peers >= MAX_CACHED_PEERS => { - if !announced_channel { - return Err(()); - } - match peers_cache.iter().position(|(_, announced)| !announced) { - Some(idx) => peers_cache[idx] = (peer, announced_channel), - None => return Err(()), - } - }, - _ => peers_cache.push((peer, announced_channel)), - } + peers_cache.push(peer); Ok(()) } @@ -1683,7 +1658,7 @@ where /// Errors if the peer is unknown. pub fn peer_disconnected(&self, peer_node_id: PublicKey) -> Result<(), ()> { let mut peers_cache = self.peers_cache.lock().unwrap(); - let peer_idx = match peers_cache.iter().position(|(peer, _)| peer.node_id == peer_node_id) { + let peer_idx = match peers_cache.iter().position(|peer| peer.node_id == peer_node_id) { Some(idx) => idx, None => return Err(()), }; From b8258aae174918dbfc74e90a5a121cc863d3618c Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 15 Aug 2025 14:57:14 -0400 Subject: [PATCH 03/20] Add UpdateAddHTLC::hold_htlc As part of supporting sending payments as an often-offline sender, the sender needs to be able to set a flag in their update_add_htlc message indicating that the HTLC should be held until receipt of a release_held_htlc onion message from the often-offline payment recipient. We don't yet ever set this flag, but lay the groundwork by including the field in the update_add struct. See-also --- lightning/src/ln/blinded_payment_tests.rs | 1 + lightning/src/ln/channel.rs | 1 + lightning/src/ln/functional_tests.rs | 1 + lightning/src/ln/htlc_reserve_unit_tests.rs | 5 +++++ lightning/src/ln/msgs.rs | 11 ++++++++++- lightning/src/ln/onion_payment.rs | 1 + lightning/src/ln/payment_tests.rs | 1 + 7 files changed, 20 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index a8e7af23984..25fa5e71e5d 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -1522,6 +1522,7 @@ fn update_add_msg( onion_routing_packet, skimmed_fee_msat: None, blinding_point, + hold_htlc: None, } } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 198569bd77e..2f7853e192b 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -9027,6 +9027,7 @@ where onion_routing_packet: (**onion_packet).clone(), skimmed_fee_msat: htlc.skimmed_fee_msat, blinding_point: htlc.blinding_point, + hold_htlc: None, // Will be set by the async sender when support is added }); } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 5e78049664c..04aa9a75457 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2288,6 +2288,7 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[0].node.handle_update_add_htlc(node_b_id, &update_add_htlc); } diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index f90b8b880bc..7f411a6fcb6 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -835,6 +835,7 @@ pub fn do_test_fee_spike_buffer(cfg: Option, htlc_fails: bool) { onion_routing_packet: onion_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); @@ -1072,6 +1073,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { onion_routing_packet: onion_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[0].node.handle_update_add_htlc(node_b_id, &msg); @@ -1255,6 +1257,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { onion_routing_packet: onion_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); @@ -1637,6 +1640,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { onion_routing_packet: onion_packet.clone(), skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; for i in 0..50 { @@ -2242,6 +2246,7 @@ pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 6d025293b9e..5540007e88d 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -765,6 +765,11 @@ pub struct UpdateAddHTLC { /// Provided if we are relaying or receiving a payment within a blinded path, to decrypt the onion /// routing packet and the recipient-provided encrypted payload within. pub blinding_point: Option, + /// Set to `Some` if the sender wants the receiver of this message to hold onto this HTLC until + /// receipt of a [`ReleaseHeldHtlc`] onion message from the payment recipient. + /// + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + pub hold_htlc: Option<()>, } /// An onion message to be sent to or received from a peer. @@ -3268,7 +3273,10 @@ impl_writeable_msg!(UpdateAddHTLC, { onion_routing_packet, }, { (0, blinding_point, option), - (65537, skimmed_fee_msat, option) + (65537, skimmed_fee_msat, option), + // TODO: currently we may fail to read the `ChannelManager` if we write a new even TLV in this message + // and then downgrade. Once this is fixed, update the type here to match BOLTs PR 989. + (75537, hold_htlc, option), }); impl LengthReadable for OnionMessage { @@ -5701,6 +5709,7 @@ mod tests { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; let encoded_value = update_add_htlc.encode(); let target_value = >::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d32144668701144760101010101010101010101010101010101010101010101010101010101010101000c89d4ff031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202").unwrap(); diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index 79952faca9a..acaa0304f29 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -753,6 +753,7 @@ mod tests { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, } } diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 0af0463134e..3dab164e619 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -5017,6 +5017,7 @@ fn peel_payment_onion_custom_tlvs() { skimmed_fee_msat: None, onion_routing_packet, blinding_point: None, + hold_htlc: None, }; let peeled_onion = crate::ln::onion_payment::peel_payment_onion( &update_add, From f74ce6574756dc98c629aa70cfc572a4fe75a7c2 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 15 Aug 2025 12:16:31 -0400 Subject: [PATCH 04/20] Add feature bit for HtlcHold (52/53) As part of supporting sending payments as an often-offline sender, the often-offline sender's channel counterparty needs to advertise a feature bit indicating that they support holding onto the sender's HTLC until they receive a release_held_htlc onion message from the recipient indicating that they are online and ready to receive the payment. See-also We don't yet advertise support of this feature, so here we also disconnect from peers that try to send us hold HTLCs. --- lightning-types/src/features.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/lightning-types/src/features.rs b/lightning-types/src/features.rs index aca4bb6e5a9..521aefc2184 100644 --- a/lightning-types/src/features.rs +++ b/lightning-types/src/features.rs @@ -80,6 +80,8 @@ //! (see [BOLT-2](https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#channel-quiescence) for more information). //! - `ZeroFeeCommitments` - A channel type which always uses zero transaction fee on commitment transactions. //! (see [BOLT PR #1228](https://github.com/lightning/bolts/pull/1228) for more info). +//! - `HtlcHold` - requires/supports holding HTLCs and forwarding on receipt of an onion message +//! (see [BOLT-2](https://github.com/lightning/bolts/pull/989/files) for more information). //! //! LDK knows about the following features, but does not support them: //! - `AnchorsNonzeroFeeHtlcTx` - the initial version of anchor outputs, which was later found to be @@ -161,7 +163,7 @@ mod sealed { // Byte 5 ProvideStorage | ChannelType | SCIDPrivacy | AnchorZeroFeeCommitments, // Byte 6 - ZeroConf, + ZeroConf | HtlcHold, // Byte 7 Trampoline | SimpleClose, ] @@ -182,7 +184,7 @@ mod sealed { // Byte 5 ProvideStorage | ChannelType | SCIDPrivacy | AnchorZeroFeeCommitments, // Byte 6 - ZeroConf | Keysend, + ZeroConf | HtlcHold | Keysend, // Byte 7 Trampoline | SimpleClose, // Byte 8 - 31 @@ -640,6 +642,17 @@ mod sealed { define_feature!(51, ZeroConf, [InitContext, NodeContext, ChannelTypeContext], "Feature flags for accepting channels with zero confirmations. Called `option_zeroconf` in the BOLTs", set_zero_conf_optional, set_zero_conf_required, supports_zero_conf, requires_zero_conf); + define_feature!( + 53, + HtlcHold, + [InitContext, NodeContext], + "Feature flags for holding HTLCs and forwarding on receipt of an onion message", + set_htlc_hold_optional, + set_htlc_hold_required, + clear_htlc_hold, + supports_htlc_hold, + requires_htlc_hold + ); define_feature!( 55, Keysend, From 1653cf22488698f32b3b35a44bcb14c223732609 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 3 Sep 2025 17:09:21 -0400 Subject: [PATCH 05/20] Add enable_htlc_hold cfg flag + fail hold htlcs As part of supporting sending payments as an often-offline sender, the often-offline sender's channel counterparty needs to advertise a feature bit indicating that they support holding onto the sender's HTLC until they receive a release_held_htlc onion message from the recipient indicating that they are online and ready to receive the payment. Here we add a config flag to turn on advertising this feature, and fail back hold_htlcs if this config flag is not set. See-also --- lightning/src/ln/channelmanager.rs | 33 ++++++++++++++++++++++++++++++ lightning/src/util/config.rs | 14 +++++++++++++ 2 files changed, 47 insertions(+) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f927a669c23..79f4519673b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6406,6 +6406,32 @@ where }); let shared_secret = next_hop.shared_secret().secret_bytes(); + // Nodes shouldn't expect us to hold HTLCs for them if we don't advertise htlc_hold feature + // support. + // + // If we wanted to pretend to be a node that didn't understand the feature at all here, the + // correct behavior would've been to disconnect the sender when we first received the + // update_add message. However, this would make the `UserConfig::enable_htlc_hold` option + // unsafe -- if our node switched the config option from on to off just after the sender + // enqueued their update_add + CS, the sender would continue retransmitting those messages + // and we would keep disconnecting them until the HTLC timed out. + if update_add_htlc.hold_htlc.is_some() + && !BaseMessageHandler::provided_node_features(self).supports_htlc_hold() + { + let reason = LocalHTLCFailureReason::TemporaryNodeFailure; + let htlc_fail = self.htlc_failure_from_update_add_err( + &update_add_htlc, + &incoming_counterparty_node_id, + reason, + is_intro_node_blinded_forward, + &shared_secret, + ); + let failure_type = + get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); + htlc_fails.push((htlc_fail, failure_type, reason.into())); + continue; + } + // Process the HTLC on the incoming channel. match self.do_funded_channel_callback( incoming_scid, @@ -14792,6 +14818,13 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { features.set_anchor_zero_fee_commitments_optional(); } + // If we are configured to be an announced node, we are expected to be always-online and can + // advertise the htlc_hold feature. + #[cfg(test)] + if config.enable_htlc_hold { + features.set_htlc_hold_optional(); + } + features } diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index a0c74673799..f0c0330e14d 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -935,6 +935,18 @@ pub struct UserConfig { /// /// Default value: `false` pub enable_dual_funded_channels: bool, + /// LDK supports a feature for always-online nodes such that these nodes can hold onto an HTLC + /// from an often-offline channel peer until the often-offline payment recipient sends an onion + /// message telling the always-online node to release the HTLC. If this is set to `true`, our node + /// will carry out this feature for channel peers that request it. + /// + /// This should only be set to `true` for nodes which expect to be online reliably. + /// + /// Setting this to `true` may break backwards compatibility with LDK versions < 0.2. + /// + /// Default value: `false` + #[cfg(test)] + pub enable_htlc_hold: bool, } impl Default for UserConfig { @@ -949,6 +961,8 @@ impl Default for UserConfig { accept_intercept_htlcs: false, manually_handle_bolt12_invoices: false, enable_dual_funded_channels: false, + #[cfg(test)] + enable_htlc_hold: false, } } } From 1878efdbb0fb8a620f9ec970c89a8adaada352fa Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 19 Aug 2025 15:10:04 -0400 Subject: [PATCH 06/20] Add RevokeAndACK::release_htlc_message_paths As part of supporting sending payments as an often-offline sender, the sender needs to send held_htlc_available onion messages where the reply path terminates at their always-online channel counterparty that is holding the HTLC until the recipient comes online. That way when the recipient sends release_held_htlc, the sender's counterparty will receive that message. To accomplish this, the sender's always-online counterparty includes said reply path in the revoke_and_ack message corresponding to the held HTLC. Here we add support for this field, though we don't set it yet. We also had to tweak the ser macros for this because impl_writeable_msg had never had to write a Vec in a message TLV field before. --- lightning/src/ln/channel.rs | 1 + lightning/src/ln/functional_tests.rs | 1 + lightning/src/ln/htlc_reserve_unit_tests.rs | 2 ++ lightning/src/ln/msgs.rs | 14 +++++++++++++- lightning/src/util/ser_macros.rs | 4 ++-- 5 files changed, 19 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 2f7853e192b..10270ef900e 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -8980,6 +8980,7 @@ where next_per_commitment_point: self.holder_commitment_point.next_point(), #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }); } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 04aa9a75457..ab2d730ac6b 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -6544,6 +6544,7 @@ pub fn test_counterparty_raa_skip_no_crash() { next_per_commitment_point, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); assert_eq!( diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index 7f411a6fcb6..dc5d07c180e 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -936,6 +936,7 @@ pub fn do_test_fee_spike_buffer(cfg: Option, htlc_fails: bool) { next_per_commitment_point: next_local_point, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); expect_and_process_pending_htlcs(&nodes[1], false); @@ -2381,6 +2382,7 @@ pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { next_per_commitment_point: next_local_point, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 5540007e88d..b4034391564 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -31,6 +31,7 @@ use bitcoin::secp256k1::ecdsa::Signature; use bitcoin::secp256k1::PublicKey; use bitcoin::{secp256k1, Transaction, Witness}; +use crate::blinded_path::message::BlindedMessagePath; use crate::blinded_path::payment::{ BlindedPaymentTlvs, ForwardTlvs, ReceiveTlvs, UnauthenticatedReceiveTlvs, }; @@ -888,6 +889,13 @@ pub struct RevokeAndACK { #[cfg(taproot)] /// Musig nonce the recipient should use in their next commitment signature message pub next_local_nonce: Option, + /// A list of `(htlc_id, blinded_path)`. The receiver of this message will use the blinded paths + /// as reply paths to [`HeldHtlcAvailable`] onion messages that they send to the often-offline + /// receiver of this HTLC. The `htlc_id` is used by the receiver of this message to identify which + /// held HTLC a given blinded path corresponds to. + /// + /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable + pub release_htlc_message_paths: Vec<(u64, BlindedMessagePath)>, } /// An [`update_fee`] message to be sent to or received from a peer @@ -3178,7 +3186,9 @@ impl_writeable_msg!(RevokeAndACK, { channel_id, per_commitment_secret, next_per_commitment_point -}, {}); +}, { + (1, release_htlc_message_paths, optional_vec) +}); #[cfg(taproot)] impl_writeable_msg!(RevokeAndACK, { @@ -3186,6 +3196,7 @@ impl_writeable_msg!(RevokeAndACK, { per_commitment_secret, next_per_commitment_point }, { + (1, release_htlc_message_paths, optional_vec), (4, next_local_nonce, option) }); @@ -5830,6 +5841,7 @@ mod tests { next_per_commitment_point: pubkey_1, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; let encoded_value = raa.encode(); let target_value = >::from_hex("02020202020202020202020202020202020202020202020202020202020202020101010101010101010101010101010101010101010101010101010101010101031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f").unwrap(); diff --git a/lightning/src/util/ser_macros.rs b/lightning/src/util/ser_macros.rs index ea7a3e8a2a2..647e7c77a6c 100644 --- a/lightning/src/util/ser_macros.rs +++ b/lightning/src/util/ser_macros.rs @@ -700,7 +700,7 @@ macro_rules! impl_writeable_msg { impl $crate::util::ser::Writeable for $st { fn write(&self, w: &mut W) -> Result<(), $crate::io::Error> { $( self.$field.write(w)?; )* - $crate::encode_tlv_stream!(w, {$(($type, self.$tlvfield.as_ref(), $fieldty)),*}); + $crate::encode_tlv_stream!(w, {$(($type, &self.$tlvfield, $fieldty)),*}); Ok(()) } } @@ -713,7 +713,7 @@ macro_rules! impl_writeable_msg { $crate::decode_tlv_stream!(r, {$(($type, $tlvfield, $fieldty)),*}); Ok(Self { $($field,)* - $($tlvfield),* + $($tlvfield: $crate::_init_tlv_based_struct_field!($tlvfield, $fieldty)),* }) } } From 324312e83ed5afb3a59980ea82abd6a06c69cc50 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 3 Sep 2025 13:38:05 -0400 Subject: [PATCH 07/20] Extract helper for failing HTLC intercepts Makes the next commit cleaner. --- lightning/src/ln/channelmanager.rs | 33 +++++++++++++++--------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 79f4519673b..594fd51a956 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -10664,6 +10664,22 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ prev_user_channel_id, forward_info, }; + let mut fail_intercepted_htlc = || { + let htlc_source = + HTLCSource::PreviousHopData(pending_add.htlc_previous_hop_data()); + let reason = HTLCFailReason::from_failure_code( + LocalHTLCFailureReason::UnknownNextPeer, + ); + let failure_type = HTLCHandlingFailureType::InvalidForward { + requested_forward_scid: scid, + }; + failed_intercept_forwards.push(( + htlc_source, + payment_hash, + reason, + failure_type, + )); + }; match forward_htlcs.entry(scid) { hash_map::Entry::Occupied(mut entry) => { entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add)); @@ -10713,22 +10729,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid ); - let htlc_source = HTLCSource::PreviousHopData( - pending_add.htlc_previous_hop_data(), - ); - let reason = HTLCFailReason::from_failure_code( - LocalHTLCFailureReason::UnknownNextPeer, - ); - let failure_type = - HTLCHandlingFailureType::InvalidForward { - requested_forward_scid: scid, - }; - failed_intercept_forwards.push(( - htlc_source, - payment_hash, - reason, - failure_type, - )); + fail_intercepted_htlc(); }, } } else { From 0213a9b8ea01f8a2b389f6949b232cf066779d0d Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Sat, 16 Aug 2025 00:53:41 -0400 Subject: [PATCH 08/20] Store held htlcs in pending_intercepted_htlcs As part of supporting sending payments as an often-offline sender, the sender's always-online channel counterparty needs to hold onto the sender's HTLC until they receive a release_held_htlc onion message from the often-offline recipient. Here we implement storing these held HTLCs in the existing ChannelManager::pending_intercepted_htlcs map. We want to move in the direction of obviating the need to persistence the ChannelManager entirely, so it doesn't really make sense to add a whole new map for these HTLCs. --- lightning/src/ln/channelmanager.rs | 111 +++++++++++++++++++++++++---- lightning/src/ln/onion_payment.rs | 1 + 2 files changed, 99 insertions(+), 13 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 594fd51a956..9d1ad3115ec 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -229,6 +229,9 @@ pub enum PendingHTLCRouting { blinded: Option, /// The absolute CLTV of the inbound HTLC incoming_cltv_expiry: Option, + /// Whether this HTLC should be held by our node until we receive a corresponding + /// [`ReleaseHeldHtlc`] onion message. + hold_htlc: Option<()>, }, /// An HTLC which should be forwarded on to another Trampoline node. TrampolineForward { @@ -371,6 +374,15 @@ impl PendingHTLCRouting { Self::ReceiveKeysend { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry), } } + + /// Whether this HTLC should be held by our node until we receive a corresponding + /// [`ReleaseHeldHtlc`] onion message. + fn should_hold_htlc(&self) -> bool { + match self { + Self::Forward { hold_htlc: Some(()), .. } => true, + _ => false, + } + } } /// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it @@ -641,6 +653,34 @@ impl Readable for PaymentId { #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] pub struct InterceptId(pub [u8; 32]); +impl InterceptId { + /// This intercept id corresponds to an HTLC that will be forwarded on + /// [`ChannelManager::forward_intercepted_htlc`]. + fn from_incoming_shared_secret(ss: &[u8; 32]) -> Self { + Self(Sha256::hash(ss).to_byte_array()) + } + + /// This intercept id corresponds to an HTLC that will be forwarded on receipt of a + /// [`ReleaseHeldHtlc`] onion message. + fn from_htlc_id_and_chan_id( + htlc_id: u64, channel_id: &ChannelId, counterparty_node_id: &PublicKey, + ) -> Self { + let htlc_id_size = core::mem::size_of::(); + let chan_id_size = core::mem::size_of::(); + let cp_id_serialized = counterparty_node_id.serialize(); + + const RES_SIZE: usize = 8 + 32 + 33; + debug_assert_eq!(RES_SIZE, htlc_id_size + chan_id_size + cp_id_serialized.len()); + + let mut res = [0u8; RES_SIZE]; + res[..htlc_id_size].copy_from_slice(&htlc_id.to_be_bytes()); + res[htlc_id_size..htlc_id_size + chan_id_size].copy_from_slice(&channel_id.0); + res[htlc_id_size + chan_id_size..].copy_from_slice(&cp_id_serialized); + + Self(Sha256::hash(&res[..]).to_byte_array()) + } +} + impl Writeable for InterceptId { fn write(&self, w: &mut W) -> Result<(), io::Error> { self.0.write(w) @@ -2588,8 +2628,14 @@ pub struct ChannelManager< pub(super) forward_htlcs: Mutex>>, #[cfg(not(test))] forward_htlcs: Mutex>>, - /// Storage for HTLCs that have been intercepted and bubbled up to the user. We hold them here - /// until the user tells us what we should do with them. + /// Storage for HTLCs that have been intercepted. + /// + /// These HTLCs fall into two categories: + /// 1. HTLCs that are bubbled up to the user and held until the invocation of + /// [`ChannelManager::forward_intercepted_htlc`] or [`ChannelManager::fail_intercepted_htlc`] + /// (or timeout) + /// 2. HTLCs that are being held on behalf of an often-offline sender until receipt of a + /// [`ReleaseHeldHtlc`] onion message from an often-offline recipient /// /// See `ChannelManager` struct-level documentation for lock order requirements. pending_intercepted_htlcs: Mutex>, @@ -6268,13 +6314,18 @@ where })?; let routing = match payment.forward_info.routing { - PendingHTLCRouting::Forward { onion_packet, blinded, incoming_cltv_expiry, .. } => { - PendingHTLCRouting::Forward { - onion_packet, - blinded, - incoming_cltv_expiry, - short_channel_id: next_hop_scid, - } + PendingHTLCRouting::Forward { + onion_packet, + blinded, + incoming_cltv_expiry, + hold_htlc, + .. + } => PendingHTLCRouting::Forward { + onion_packet, + blinded, + incoming_cltv_expiry, + hold_htlc, + short_channel_id: next_hop_scid, }, _ => unreachable!(), // Only `PendingHTLCRouting::Forward`s are intercepted }; @@ -10682,7 +10733,25 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; match forward_htlcs.entry(scid) { hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add)); + if pending_add.forward_info.routing.should_hold_htlc() { + let intercept_id = InterceptId::from_htlc_id_and_chan_id( + prev_htlc_id, + &prev_channel_id, + &prev_counterparty_node_id, + ); + let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); + match held_htlcs.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + entry.insert(pending_add); + }, + hash_map::Entry::Occupied(_) => { + debug_assert!(false, "Should never have two HTLCs with the same scid and htlc id"); + fail_intercepted_htlc(); + }, + } + } else { + entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add)); + } }, hash_map::Entry::Vacant(entry) => { if !is_our_scid @@ -10692,9 +10761,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ scid, &self.chain_hash, ) { - let intercept_id = InterceptId( - Sha256::hash(&pending_add.forward_info.incoming_shared_secret) - .to_byte_array(), + let intercept_id = InterceptId::from_incoming_shared_secret( + &pending_add.forward_info.incoming_shared_secret, ); let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); @@ -10732,6 +10800,22 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fail_intercepted_htlc(); }, } + } else if pending_add.forward_info.routing.should_hold_htlc() { + let intercept_id = InterceptId::from_htlc_id_and_chan_id( + prev_htlc_id, + &prev_channel_id, + &prev_counterparty_node_id, + ); + let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); + match held_htlcs.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + entry.insert(pending_add); + }, + hash_map::Entry::Occupied(_) => { + debug_assert!(false, "Should never have two HTLCs with the same scid and htlc id"); + fail_intercepted_htlc(); + }, + } } else { entry.insert(vec![HTLCForwardInfo::AddHTLC(pending_add)]); } @@ -14850,6 +14934,7 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (1, blinded, option), (2, short_channel_id, required), (3, incoming_cltv_expiry, option), + (5, hold_htlc, option), }, (1, Receive) => { (0, payment_data, required), diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index acaa0304f29..0934c6c812b 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -190,6 +190,7 @@ pub(super) fn create_fwd_pending_htlc_info( onion_packet: outgoing_packet, short_channel_id, incoming_cltv_expiry: Some(msg.cltv_expiry), + hold_htlc: msg.hold_htlc, blinded: intro_node_blinding_point.or(msg.blinding_point) .map(|bp| BlindedForward { inbound_blinding_point: bp, From 77719ad7aea01b33dcc49ac4538818e84537e8ee Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 29 Aug 2025 23:40:30 -0400 Subject: [PATCH 09/20] Support creating reply_path for HeldHtlcAvailable As part of supporting sending payments as an often-offline sender, the sender needs to send held_htlc_available onion messages such that the reply path to the message terminates at their always-online channel counterparty that is holding the HTLC. That way when the recipient responds with release_held_htlc, the sender's counterparty will receive that message. Here we add a method for creating said reply path, which will be used in the next commit. --- lightning/src/blinded_path/message.rs | 18 +++++++++++-- lightning/src/ln/channelmanager.rs | 12 +++++++++ lightning/src/offers/flow.rs | 39 +++++++++++++++++++++++++-- 3 files changed, 65 insertions(+), 4 deletions(-) diff --git a/lightning/src/blinded_path/message.rs b/lightning/src/blinded_path/message.rs index 7d721cd1fdc..e291c83b66c 100644 --- a/lightning/src/blinded_path/message.rs +++ b/lightning/src/blinded_path/message.rs @@ -19,7 +19,7 @@ use crate::blinded_path::{BlindedHop, BlindedPath, Direction, IntroductionNode, use crate::crypto::streams::ChaChaPolyReadAdapter; use crate::io; use crate::io::Cursor; -use crate::ln::channelmanager::PaymentId; +use crate::ln::channelmanager::{InterceptId, PaymentId}; use crate::ln::msgs::DecodeError; use crate::ln::onion_utils; use crate::offers::nonce::Nonce; @@ -556,7 +556,7 @@ pub enum AsyncPaymentsContext { }, /// Context contained within the reply [`BlindedMessagePath`] we put in outbound /// [`HeldHtlcAvailable`] messages, provided back to us in corresponding [`ReleaseHeldHtlc`] - /// messages. + /// messages if we are an always-online sender paying an async recipient. /// /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc @@ -577,6 +577,17 @@ pub enum AsyncPaymentsContext { /// able to trivially ask if we're online forever. path_absolute_expiry: core::time::Duration, }, + /// Context contained within the reply [`BlindedMessagePath`] put in outbound + /// [`HeldHtlcAvailable`] messages, provided back to the async sender's always-online counterparty + /// in corresponding [`ReleaseHeldHtlc`] messages. + /// + /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + ReleaseHeldHtlc { + /// An identifier for the HTLC that should be released by us as the sender's always-online + /// channel counterparty to the often-offline recipient. + intercept_id: InterceptId, + }, } impl_writeable_tlv_based_enum!(MessageContext, @@ -632,6 +643,9 @@ impl_writeable_tlv_based_enum!(AsyncPaymentsContext, (2, invoice_slot, required), (4, path_absolute_expiry, required), }, + (6, ReleaseHeldHtlc) => { + (0, intercept_id, required), + }, ); /// Contains a simple nonce for use in a blinded path's context. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 9d1ad3115ec..05aed928d93 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -5450,6 +5450,18 @@ where res } + /// If we are holding an HTLC on behalf of an often-offline sender, this method allows us to + /// create a path for the sender to use as the reply path when they send the recipient a + /// [`HeldHtlcAvailable`] onion message, so the recipient's [`ReleaseHeldHtlc`] response will be + /// received to our node. + fn path_for_release_held_htlc( + &self, htlc_id: u64, channel_id: &ChannelId, counterparty_node_id: &PublicKey, + ) -> BlindedMessagePath { + let intercept_id = + InterceptId::from_htlc_id_and_chan_id(htlc_id, channel_id, counterparty_node_id); + self.flow.path_for_release_held_htlc(intercept_id, &*self.entropy_source) + } + /// Signals that no further attempts for the given payment should occur. Useful if you have a /// pending outbound payment with retries remaining, but wish to stop retrying the payment before /// retries are exhausted. diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index 8890431123e..1541d0e01c7 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -32,7 +32,7 @@ use crate::prelude::*; use crate::chain::BestBlock; use crate::ln::channel_state::ChannelDetails; -use crate::ln::channelmanager::{PaymentId, CLTV_FAR_FAR_AWAY}; +use crate::ln::channelmanager::{InterceptId, PaymentId, CLTV_FAR_FAR_AWAY}; use crate::ln::inbound_payment; use crate::offers::async_receive_offer_cache::AsyncReceiveOfferCache; use crate::offers::invoice::{ @@ -52,7 +52,7 @@ use crate::onion_message::async_payments::{ StaticInvoicePersisted, }; use crate::onion_message::messenger::{ - Destination, MessageRouter, MessageSendInstructions, Responder, + Destination, MessageRouter, MessageSendInstructions, Responder, PADDED_PATH_LENGTH, }; use crate::onion_message::offers::OffersMessage; use crate::onion_message::packet::OnionMessageContents; @@ -1154,6 +1154,41 @@ where Ok(()) } + /// If we are holding an HTLC on behalf of an often-offline sender, this method allows us to + /// create a path for the sender to use as the reply path when they send the recipient a + /// [`HeldHtlcAvailable`] onion message, so the recipient's [`ReleaseHeldHtlc`] response will be + /// received to our node. + /// + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + pub fn path_for_release_held_htlc( + &self, intercept_id: InterceptId, entropy: ES, + ) -> BlindedMessagePath + where + ES::Target: EntropySource, + { + let peers = self.peers_cache.lock().unwrap().clone(); + let context = + MessageContext::AsyncPayments(AsyncPaymentsContext::ReleaseHeldHtlc { intercept_id }); + if let Ok(mut paths) = self.create_blinded_paths(peers, context.clone()) { + if let Some(path) = paths.pop() { + return path; + } + } + + // If the `MessageRouter` fails on blinded path creation, fall back to creating a 1-hop blinded + // path. + let num_dummy_hops = PADDED_PATH_LENGTH.saturating_sub(1); + BlindedMessagePath::new_with_dummy_hops( + &[], + self.get_our_node_id(), + num_dummy_hops, + self.receive_auth_key, + context, + &*entropy, + &self.secp_ctx, + ) + } + /// Enqueues the created [`DNSSECQuery`] to be sent to the counterparty. /// /// # Peers From b5bd601c23d82c0a880b3ce831b4f4dc6ee01c42 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Sat, 30 Aug 2025 00:33:20 -0400 Subject: [PATCH 10/20] Include release_held_htlc blinded paths in RAA As part of supporting sending payments as an often-offline sender, the sender needs to send held_htlc_available onion messages such that the reply path to the message terminates at their always-online channel counterparty that is holding the HTLC. That way when the recipient responds with release_held_htlc, the sender's counterparty will receive that message. Here the counterparty starts including said reply paths in the revoke_and_ack message destined for the sender, so the sender can use these paths in subsequent held_htlc_available messages. We put the paths in the RAA to ensure the sender receives the blinded paths, because failure to deliver the paths means the HTLC will timeout/fail. --- lightning/src/ln/channel.rs | 74 +++++++++++++++++++++++------- lightning/src/ln/channelmanager.rs | 22 +++++---- 2 files changed, 72 insertions(+), 24 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 10270ef900e..c869d513ddd 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -28,6 +28,7 @@ use bitcoin::{secp256k1, sighash, TxIn}; #[cfg(splicing)] use bitcoin::{FeeRate, Sequence}; +use crate::blinded_path::message::BlindedMessagePath; use crate::chain::chaininterface::{ fee_for_weight, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator, }; @@ -282,6 +283,29 @@ impl InboundHTLCState { _ => None, } } + + /// Whether we need to hold onto this HTLC until receipt of a corresponding [`ReleaseHeldHtlc`] + /// onion message. + /// + ///[`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + fn should_hold_htlc(&self) -> bool { + match self { + InboundHTLCState::RemoteAnnounced(res) + | InboundHTLCState::AwaitingRemoteRevokeToAnnounce(res) + | InboundHTLCState::AwaitingAnnouncedRemoteRevoke(res) => match res { + InboundHTLCResolution::Resolved { pending_htlc_status } => { + match pending_htlc_status { + PendingHTLCStatus::Forward(info) => info.routing.should_hold_htlc(), + _ => false, + } + }, + InboundHTLCResolution::Pending { update_add_htlc } => { + update_add_htlc.hold_htlc.is_some() + }, + }, + InboundHTLCState::Committed | InboundHTLCState::LocalRemoved(_) => false, + } + } } struct InboundHTLCOutput { @@ -1602,12 +1626,12 @@ where } #[rustfmt::skip] - pub fn signer_maybe_unblocked( - &mut self, chain_hash: ChainHash, logger: &L, - ) -> Option where L::Target: Logger { + pub fn signer_maybe_unblocked( + &mut self, chain_hash: ChainHash, logger: &L, path_for_release_htlc: CBP + ) -> Option where L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath { match &mut self.phase { ChannelPhase::Undefined => unreachable!(), - ChannelPhase::Funded(chan) => Some(chan.signer_maybe_unblocked(logger)), + ChannelPhase::Funded(chan) => Some(chan.signer_maybe_unblocked(logger, path_for_release_htlc)), ChannelPhase::UnfundedOutboundV1(chan) => { let (open_channel, funding_created) = chan.signer_maybe_unblocked(chain_hash, logger); Some(SignerResumeUpdates { @@ -8707,13 +8731,14 @@ where /// successfully and we should restore normal operation. Returns messages which should be sent /// to the remote side. #[rustfmt::skip] - pub fn monitor_updating_restored( + pub fn monitor_updating_restored( &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash, - user_config: &UserConfig, best_block_height: u32 + user_config: &UserConfig, best_block_height: u32, path_for_release_htlc: CBP ) -> MonitorRestoreUpdates where L::Target: Logger, - NS::Target: NodeSigner + NS::Target: NodeSigner, + CBP: Fn(u64) -> BlindedMessagePath { assert!(self.context.channel_state.is_monitor_update_in_progress()); self.context.channel_state.clear_monitor_update_in_progress(); @@ -8770,7 +8795,7 @@ where } let mut raa = if self.context.monitor_pending_revoke_and_ack { - self.get_last_revoke_and_ack(logger) + self.get_last_revoke_and_ack(path_for_release_htlc, logger) } else { None }; let mut commitment_update = if self.context.monitor_pending_commitment_signed { self.get_last_commitment_update_for_send(logger).ok() @@ -8859,7 +8884,9 @@ where /// Indicates that the signer may have some signatures for us, so we should retry if we're /// blocked. #[rustfmt::skip] - pub fn signer_maybe_unblocked(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger { + pub fn signer_maybe_unblocked( + &mut self, logger: &L, path_for_release_htlc: CBP + ) -> SignerResumeUpdates where L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath { if !self.holder_commitment_point.can_advance() { log_trace!(logger, "Attempting to update holder per-commitment point..."); self.holder_commitment_point.try_resolve_pending(&self.context.holder_signer, &self.context.secp_ctx, logger); @@ -8887,7 +8914,7 @@ where } else { None }; let mut revoke_and_ack = if self.context.signer_pending_revoke_and_ack { log_trace!(logger, "Attempting to generate pending revoke and ack..."); - self.get_last_revoke_and_ack(logger) + self.get_last_revoke_and_ack(path_for_release_htlc, logger) } else { None }; if self.context.resend_order == RAACommitmentOrder::CommitmentFirst @@ -8958,9 +8985,12 @@ where } } - fn get_last_revoke_and_ack(&mut self, logger: &L) -> Option + fn get_last_revoke_and_ack( + &mut self, path_for_release_htlc: CBP, logger: &L, + ) -> Option where L::Target: Logger, + CBP: Fn(u64) -> BlindedMessagePath, { debug_assert!( self.holder_commitment_point.next_transaction_number() <= INITIAL_COMMITMENT_NUMBER - 2 @@ -8973,6 +9003,14 @@ where .ok(); if let Some(per_commitment_secret) = per_commitment_secret { if self.holder_commitment_point.can_advance() { + let mut release_htlc_message_paths = Vec::new(); + for htlc in &self.context.pending_inbound_htlcs { + if htlc.state.should_hold_htlc() { + let path = path_for_release_htlc(htlc.htlc_id); + release_htlc_message_paths.push((htlc.htlc_id, path)); + } + } + self.context.signer_pending_revoke_and_ack = false; return Some(msgs::RevokeAndACK { channel_id: self.context.channel_id, @@ -8980,7 +9018,7 @@ where next_per_commitment_point: self.holder_commitment_point.next_point(), #[cfg(taproot)] next_local_nonce: None, - release_htlc_message_paths: Vec::new(), + release_htlc_message_paths, }); } } @@ -9128,13 +9166,15 @@ where /// May panic if some calls other than message-handling calls (which will all Err immediately) /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call. #[rustfmt::skip] - pub fn channel_reestablish( + pub fn channel_reestablish( &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS, - chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock + chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock, + path_for_release_htlc: CBP, ) -> Result where L::Target: Logger, - NS::Target: NodeSigner + NS::Target: NodeSigner, + CBP: Fn(u64) -> BlindedMessagePath { if !self.context.channel_state.is_peer_disconnected() { // While BOLT 2 doesn't indicate explicitly we should error this channel here, it @@ -9235,7 +9275,7 @@ where self.context.monitor_pending_revoke_and_ack = true; None } else { - self.get_last_revoke_and_ack(logger) + self.get_last_revoke_and_ack(path_for_release_htlc, logger) } } else { debug_assert!(false, "All values should have been handled in the four cases above"); @@ -16490,6 +16530,7 @@ mod tests { chain_hash, &config, 0, + |_| unreachable!() ); // Receive funding_signed, but the channel will be configured to hold sending channel_ready and @@ -16504,6 +16545,7 @@ mod tests { chain_hash, &config, 0, + |_| unreachable!() ); // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set, // as the funding transaction depends on all channels in the batch becoming ready. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 05aed928d93..59252eb53fb 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -377,7 +377,7 @@ impl PendingHTLCRouting { /// Whether this HTLC should be held by our node until we receive a corresponding /// [`ReleaseHeldHtlc`] onion message. - fn should_hold_htlc(&self) -> bool { + pub(super) fn should_hold_htlc(&self) -> bool { match self { Self::Forward { hold_htlc: Some(()), .. } => true, _ => false, @@ -3430,18 +3430,20 @@ macro_rules! emit_initial_channel_ready_event { /// set for this channel is empty! macro_rules! handle_monitor_update_completion { ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { + let channel_id = $chan.context.channel_id(); + let counterparty_node_id = $chan.context.get_counterparty_node_id(); #[cfg(debug_assertions)] { let in_flight_updates = - $peer_state.in_flight_monitor_updates.get(&$chan.context.channel_id()); + $peer_state.in_flight_monitor_updates.get(&channel_id); assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true)); assert_eq!($chan.blocked_monitor_updates_pending(), 0); } let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); let mut updates = $chan.monitor_updating_restored(&&logger, &$self.node_signer, $self.chain_hash, &*$self.config.read().unwrap(), - $self.best_block.read().unwrap().height); - let counterparty_node_id = $chan.context.get_counterparty_node_id(); + $self.best_block.read().unwrap().height, + |htlc_id| $self.path_for_release_held_htlc(htlc_id, &channel_id, &counterparty_node_id)); let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() { // We only send a channel_update in the case where we are just now sending a // channel_ready and the channel is in a usable state. We may re-send a @@ -3457,7 +3459,7 @@ macro_rules! handle_monitor_update_completion { } else { None }; let update_actions = $peer_state.monitor_update_blocked_actions - .remove(&$chan.context.channel_id()).unwrap_or(Vec::new()); + .remove(&channel_id).unwrap_or(Vec::new()); let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption( &mut $peer_state.pending_msg_events, $chan, updates.raa, @@ -3468,7 +3470,6 @@ macro_rules! handle_monitor_update_completion { $peer_state.pending_msg_events.push(upd); } - let channel_id = $chan.context.channel_id(); let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid(&$chan.funding); core::mem::drop($peer_state_lock); core::mem::drop($per_peer_state_lock); @@ -11145,6 +11146,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.chain_hash, &self.config.read().unwrap(), &*self.best_block.read().unwrap(), + |htlc_id| self.path_for_release_held_htlc(htlc_id, &msg.channel_id, counterparty_node_id) ); let responses = try_channel_entry!(self, peer_state, res, chan_entry); let mut channel_update = None; @@ -11608,9 +11610,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Returns whether we should remove this channel as it's just been closed. let unblock_chan = |chan: &mut Channel, pending_msg_events: &mut Vec| -> Option { + let channel_id = chan.context().channel_id(); let logger = WithChannelContext::from(&self.logger, &chan.context(), None); let node_id = chan.context().get_counterparty_node_id(); - if let Some(msgs) = chan.signer_maybe_unblocked(self.chain_hash, &&logger) { + if let Some(msgs) = chan.signer_maybe_unblocked( + self.chain_hash, &&logger, + |htlc_id| self.path_for_release_held_htlc(htlc_id, &channel_id, &node_id) + ) { if let Some(msg) = msgs.open_channel { pending_msg_events.push(MessageSendEvent::SendOpenChannel { node_id, @@ -11631,7 +11637,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } let cu_msg = msgs.commitment_update.map(|updates| MessageSendEvent::UpdateHTLCs { node_id, - channel_id: chan.context().channel_id(), + channel_id, updates, }); let raa_msg = msgs.revoke_and_ack.map(|msg| MessageSendEvent::SendRevokeAndACK { From 110bfab9296dc862d041f7fd7f739ba6c5290e2c Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 19 Aug 2025 17:27:53 -0400 Subject: [PATCH 11/20] Release held htlcs on release_held_htlc As part of supporting sending payments as an often-offline sender, the sender's always-online channel counterparty needs to hold onto the sender's HTLC until they receive a release_held_htlc onion message from the often-offline recipient. Here we implement forwarding these held HTLCs upon receipt of the release message from the recipient. --- lightning/src/ln/channelmanager.rs | 52 +++++++++++++++++++++++------- 1 file changed, 41 insertions(+), 11 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 59252eb53fb..02dc83435d0 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -14726,18 +14726,48 @@ where } fn handle_release_held_htlc(&self, _message: ReleaseHeldHtlc, context: AsyncPaymentsContext) { - let payment_id = match context { - AsyncPaymentsContext::OutboundPayment { payment_id } => payment_id, + match context { + AsyncPaymentsContext::OutboundPayment { payment_id } => { + if let Err(e) = self.send_payment_for_static_invoice(payment_id) { + log_trace!( + self.logger, + "Failed to release held HTLC with payment id {}: {:?}", + payment_id, + e + ); + } + }, + AsyncPaymentsContext::ReleaseHeldHtlc { intercept_id } => { + let mut htlc = { + let mut pending_intercept_htlcs = + self.pending_intercepted_htlcs.lock().unwrap(); + match pending_intercept_htlcs.remove(&intercept_id) { + Some(htlc) => htlc, + None => return, + } + }; + match htlc.forward_info.routing { + PendingHTLCRouting::Forward { ref mut hold_htlc, .. } => { + debug_assert!(hold_htlc.is_some()); + *hold_htlc = None; + }, + _ => { + debug_assert!(false, "HTLC intercepts can only be forwards"); + return; + }, + } + let mut per_source_pending_forward = [( + htlc.prev_short_channel_id, + htlc.prev_counterparty_node_id, + htlc.prev_funding_outpoint, + htlc.prev_channel_id, + htlc.prev_user_channel_id, + vec![(htlc.forward_info, htlc.prev_htlc_id)], + )]; + self.forward_htlcs(&mut per_source_pending_forward); + PersistenceNotifierGuard::notify_on_drop(self); + }, _ => return, - }; - - if let Err(e) = self.send_payment_for_static_invoice(payment_id) { - log_trace!( - self.logger, - "Failed to release held HTLC with payment id {}: {:?}", - payment_id, - e - ); } } From d3d5116b18adca527b44e6c88d87098d5f983847 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 21 Aug 2025 14:19:01 -0400 Subject: [PATCH 12/20] Add init_features to list_channels filter callback Useful to filter for channel peers that support a specific feature, in this case the hold_htlc feature, in upcoming commits. --- lightning/src/ln/channelmanager.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 02dc83435d0..14d1fc4e85e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4014,7 +4014,9 @@ where Ok(temporary_channel_id) } - fn list_funded_channels_with_filter)) -> bool + Copy>( + fn list_funded_channels_with_filter< + Fn: FnMut(&(&InitFeatures, &ChannelId, &Channel)) -> bool + Copy, + >( &self, f: Fn, ) -> Vec { // Allocate our best estimate of the number of channels we have in the `res` @@ -4030,9 +4032,13 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; // Only `Channels` in the `Channel::Funded` phase can be considered funded. - let filtered_chan_by_id = - peer_state.channel_by_id.iter().filter(|(_, chan)| chan.is_funded()).filter(f); - res.extend(filtered_chan_by_id.map(|(_channel_id, channel)| { + let filtered_chan_by_id = peer_state + .channel_by_id + .iter() + .map(|(cid, c)| (&peer_state.latest_features, cid, c)) + .filter(|(_, _, chan)| chan.is_funded()) + .filter(f); + res.extend(filtered_chan_by_id.map(|(_, _channel_id, channel)| { ChannelDetails::from_channel( channel, best_block_height, @@ -4084,7 +4090,7 @@ where // Note we use is_live here instead of usable which leads to somewhat confused // internal/external nomenclature, but that's ok cause that's probably what the user // really wanted anyway. - self.list_funded_channels_with_filter(|&(_, ref channel)| channel.context().is_live()) + self.list_funded_channels_with_filter(|&(_, _, ref channel)| channel.context().is_live()) } /// Gets the list of channels we have with a given counterparty, in random order. From c9b77bb961a17cb0fe4036c6d5904da48e44c46e Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 18 Aug 2025 17:37:43 -0400 Subject: [PATCH 13/20] Add HTLCSource::OutboundRoute::hold_htlc As part of supporting sending payments as an often-offline sender, the sender needs to be able to set a flag in their update_add_htlc message indicating that the HTLC should be held until receipt of a release_held_htlc onion message from the often-offline payment recipient. We don't yet ever set this flag, but lay the groundwork by including the field in the HTLCSource::OutboundRoute enum variant. See-also --- fuzz/src/process_onion_failure.rs | 1 + lightning/src/ln/channel.rs | 2 ++ lightning/src/ln/channelmanager.rs | 13 +++++++++++++ lightning/src/ln/onion_utils.rs | 4 ++++ 4 files changed, 20 insertions(+) diff --git a/fuzz/src/process_onion_failure.rs b/fuzz/src/process_onion_failure.rs index 1bc9900718a..2f038000ece 100644 --- a/fuzz/src/process_onion_failure.rs +++ b/fuzz/src/process_onion_failure.rs @@ -120,6 +120,7 @@ fn do_test(data: &[u8], out: Out) { first_hop_htlc_msat: 0, payment_id, bolt12_invoice: None, + hold_htlc: None, }; let failure_len = get_u16!(); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index c869d513ddd..6a856656b71 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -15098,6 +15098,7 @@ mod tests { first_hop_htlc_msat: 548, payment_id: PaymentId([42; 32]), bolt12_invoice: None, + hold_htlc: None, }, skimmed_fee_msat: None, blinding_point: None, @@ -15545,6 +15546,7 @@ mod tests { first_hop_htlc_msat: 0, payment_id: PaymentId([42; 32]), bolt12_invoice: None, + hold_htlc: None, }; let dummy_outbound_output = OutboundHTLCOutput { htlc_id: 0, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 14d1fc4e85e..d56d9b78133 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -748,6 +748,10 @@ mod fuzzy_channelmanager { /// we can provide proof-of-payment details in payment claim events even after a restart /// with a stale ChannelManager state. bolt12_invoice: Option, + /// Whether we want our next-hop channel peer to hold onto this HTLC until they receive an + /// onion message from the often-offline recipient indicating that the recipient is online and + /// ready to receive the HTLC. + hold_htlc: Option<()>, }, } @@ -791,6 +795,7 @@ impl core::hash::Hash for HTLCSource { payment_id, first_hop_htlc_msat, bolt12_invoice, + hold_htlc, } => { 1u8.hash(hasher); path.hash(hasher); @@ -798,6 +803,7 @@ impl core::hash::Hash for HTLCSource { payment_id.hash(hasher); first_hop_htlc_msat.hash(hasher); bolt12_invoice.hash(hasher); + hold_htlc.hash(hasher); }, } } @@ -811,6 +817,7 @@ impl HTLCSource { first_hop_htlc_msat: 0, payment_id: PaymentId([2; 32]), bolt12_invoice: None, + hold_htlc: None, } } @@ -5019,6 +5026,7 @@ where first_hop_htlc_msat: htlc_msat, payment_id, bolt12_invoice: bolt12_invoice.cloned(), + hold_htlc: None, }; let send_res = chan.send_htlc_and_commit( htlc_msat, @@ -15212,6 +15220,7 @@ impl Readable for HTLCSource { let mut payment_params: Option = None; let mut blinded_tail: Option = None; let mut bolt12_invoice: Option = None; + let mut hold_htlc: Option<()> = None; read_tlv_fields!(reader, { (0, session_priv, required), (1, payment_id, option), @@ -15220,6 +15229,7 @@ impl Readable for HTLCSource { (5, payment_params, (option: ReadableArgs, 0)), (6, blinded_tail, option), (7, bolt12_invoice, option), + (9, hold_htlc, option), }); if payment_id.is_none() { // For backwards compat, if there was no payment_id written, use the session_priv bytes @@ -15243,6 +15253,7 @@ impl Readable for HTLCSource { path, payment_id: payment_id.unwrap(), bolt12_invoice, + hold_htlc, }) } 1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)), @@ -15260,6 +15271,7 @@ impl Writeable for HTLCSource { ref path, payment_id, bolt12_invoice, + hold_htlc, } => { 0u8.write(writer)?; let payment_id_opt = Some(payment_id); @@ -15272,6 +15284,7 @@ impl Writeable for HTLCSource { (5, None::, option), // payment_params in LDK versions prior to 0.0.115 (6, path.blinded_tail, option), (7, bolt12_invoice, option), + (9, hold_htlc, option), }); }, HTLCSource::PreviousHopData(ref field) => { diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index d45860b0e26..ac008f29137 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -3413,6 +3413,7 @@ mod tests { first_hop_htlc_msat: 0, payment_id: PaymentId([1; 32]), bolt12_invoice: None, + hold_htlc: None, }; process_onion_failure(&ctx_full, &logger, &htlc_source, onion_error) @@ -3603,6 +3604,7 @@ mod tests { first_hop_htlc_msat: dummy_amt_msat, payment_id: PaymentId([1; 32]), bolt12_invoice: None, + hold_htlc: None, }; { @@ -3791,6 +3793,7 @@ mod tests { first_hop_htlc_msat: 0, payment_id: PaymentId([1; 32]), bolt12_invoice: None, + hold_htlc: None, }; // Iterate over all possible failure positions and check that the cases that can be attributed are. @@ -3900,6 +3903,7 @@ mod tests { first_hop_htlc_msat: 0, payment_id: PaymentId([1; 32]), bolt12_invoice: None, + hold_htlc: None, }; let decrypted_failure = process_onion_failure(&ctx_full, &logger, &htlc_source, packet); From cd041ed2cfd48689cb3877e2f948787783aa9c85 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 21 Aug 2025 14:49:56 -0400 Subject: [PATCH 14/20] Add hold_htlcs param to pay_route_internal As part of supporting sending payments as an often-offline sender, the sender needs to be able to set a flag in their update_add_htlc message indicating that the HTLC should be held until receipt of a release_held_htlc onion message from the often-offline payment recipient. We don't yet ever set this flag, but lay the groundwork by including the parameter in the pay_route method. See-also --- lightning/src/ln/outbound_payment.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index a66c21ac4ef..34fa87d95ff 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -1089,8 +1089,8 @@ impl OutboundPayments { let result = self.pay_route_internal( &route, payment_hash, &recipient_onion, keysend_preimage, invoice_request, Some(&bolt12_invoice), payment_id, - Some(route_params.final_value_msat), &onion_session_privs, node_signer, best_block_height, - &send_payment_along_path + Some(route_params.final_value_msat), &onion_session_privs, false, node_signer, + best_block_height, &send_payment_along_path ); log_info!( logger, "Sending payment with id {} and hash {} returned {:?}", payment_id, @@ -1486,7 +1486,7 @@ impl OutboundPayments { })?; let res = self.pay_route_internal(&route, payment_hash, &recipient_onion, - keysend_preimage, None, None, payment_id, None, &onion_session_privs, node_signer, + keysend_preimage, None, None, payment_id, None, &onion_session_privs, false, node_signer, best_block_height, &send_payment_along_path); log_info!(logger, "Sending payment with id {} and hash {} returned {:?}", payment_id, payment_hash, res); @@ -1649,8 +1649,8 @@ impl OutboundPayments { } }; let res = self.pay_route_internal(&route, payment_hash, &recipient_onion, keysend_preimage, - invoice_request.as_ref(), bolt12_invoice.as_ref(), payment_id, Some(total_msat), &onion_session_privs, node_signer, - best_block_height, &send_payment_along_path); + invoice_request.as_ref(), bolt12_invoice.as_ref(), payment_id, Some(total_msat), + &onion_session_privs, false, node_signer, best_block_height, &send_payment_along_path); log_info!(logger, "Result retrying payment id {}: {:?}", &payment_id, res); if let Err(e) = res { self.handle_pay_route_err( @@ -1814,8 +1814,8 @@ impl OutboundPayments { let recipient_onion_fields = RecipientOnionFields::spontaneous_empty(); match self.pay_route_internal(&route, payment_hash, &recipient_onion_fields, - None, None, None, payment_id, None, &onion_session_privs, node_signer, best_block_height, - &send_payment_along_path + None, None, None, payment_id, None, &onion_session_privs, false, node_signer, + best_block_height, &send_payment_along_path ) { Ok(()) => Ok((payment_hash, payment_id)), Err(e) => { @@ -2063,7 +2063,7 @@ impl OutboundPayments { &self, route: &Route, payment_hash: PaymentHash, recipient_onion: &RecipientOnionFields, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, bolt12_invoice: Option<&PaidBolt12Invoice>, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: &Vec<[u8; 32]>, - node_signer: &NS, best_block_height: u32, send_payment_along_path: &F + hold_htlcs_at_next_hop: bool, node_signer: &NS, best_block_height: u32, send_payment_along_path: &F ) -> Result<(), PaymentSendFailure> where NS::Target: NodeSigner, @@ -2186,7 +2186,7 @@ impl OutboundPayments { { self.pay_route_internal(route, payment_hash, &recipient_onion, keysend_preimage, None, None, payment_id, recv_value_msat, &onion_session_privs, - node_signer, best_block_height, &send_payment_along_path) + false, node_signer, best_block_height, &send_payment_along_path) .map_err(|e| { self.remove_outbound_if_all_failed(payment_id, &e); e }) } From 16c262ddb7701dd2decde752034db759b18dc965 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 21 Aug 2025 15:08:33 -0400 Subject: [PATCH 15/20] Add hold_htlcs field in StaticInvReceived outbounds As part of supporting sending payments as an often-offline sender, the sender needs to be able to set a flag in their update_add_htlc message indicating that the HTLC should be held until receipt of a release_held_htlc onion message from the often-offline payment recipient. We don't yet ever set this flag, but lay the groundwork by including the field in the outbound payment variant for static invoices. We also add a helper method to gather channels for nodes that advertise support for the hold_htlc feature, which will be used in the next commit. See-also --- lightning/src/ln/channelmanager.rs | 39 ++++++++++++++++++++++++++++ lightning/src/ln/outbound_payment.rs | 25 ++++++++++++++++-- lightning/src/util/config.rs | 14 ++++++++++ 3 files changed, 76 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d56d9b78133..bbdd831c242 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -5385,12 +5385,14 @@ where &self, invoice: &StaticInvoice, payment_id: PaymentId, ) -> Result<(), Bolt12PaymentError> { let mut res = Ok(()); + let hold_htlc_channels_res = self.hold_htlc_channels(); PersistenceNotifierGuard::optionally_notify(self, || { let best_block_height = self.best_block.read().unwrap().height; let features = self.bolt12_invoice_features(); let outbound_pmts_res = self.pending_outbound_payments.static_invoice_received( invoice, payment_id, + hold_htlc_channels_res.is_ok(), features, best_block_height, self.duration_since_epoch(), @@ -5430,6 +5432,43 @@ where res } + /// Returns a list of channels where our counterparty supports + /// [`InitFeatures::supports_htlc_hold`], or an error if there are none or we detect that we are + /// an announced node. Useful for sending async payments to [`StaticInvoice`]s. + fn hold_htlc_channels(&self) -> Result, ()> { + let should_send_async = { + let cfg = self.config.read().unwrap(); + cfg.hold_outbound_htlcs_at_next_hop + && !cfg.channel_handshake_config.announce_for_forwarding + && cfg.channel_handshake_limits.force_announced_channel_preference + }; + if !should_send_async { + return Err(()); + } + + let any_announced_channels = AtomicBool::new(false); + let hold_htlc_channels = + self.list_funded_channels_with_filter(|&(init_features, _, ref channel)| { + // If we have an announced channel, we are a node that is expected to be always-online and + // shouldn't be relying on channel counterparties to hold onto our HTLCs for us while + // waiting for the payment recipient to come online. + if channel.context().should_announce() { + any_announced_channels.store(true, Ordering::Relaxed); + } + if any_announced_channels.load(Ordering::Relaxed) { + return false; + } + + init_features.supports_htlc_hold() && channel.context().is_live() + }); + + if any_announced_channels.load(Ordering::Relaxed) || hold_htlc_channels.is_empty() { + Err(()) + } else { + Ok(hold_htlc_channels) + } + } + fn send_payment_for_static_invoice( &self, payment_id: PaymentId, ) -> Result<(), Bolt12PaymentError> { diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 34fa87d95ff..6798ef505da 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -103,6 +103,10 @@ pub(crate) enum PendingOutboundPayment { route_params: RouteParameters, invoice_request: InvoiceRequest, static_invoice: StaticInvoice, + // Whether we should pay the static invoice asynchronously, i.e. by setting + // [`UpdateAddHTLC::hold_htlc`] so our channel counterparty(s) hold the HTLC(s) for us until the + // recipient comes online, allowing us to go offline after locking in the HTLC(s). + hold_htlcs_at_next_hop: bool, // The deadline as duration since the Unix epoch for the async recipient to come online, // after which we'll fail the payment. // @@ -1107,8 +1111,9 @@ impl OutboundPayments { } pub(super) fn static_invoice_received( - &self, invoice: &StaticInvoice, payment_id: PaymentId, features: Bolt12InvoiceFeatures, - best_block_height: u32, duration_since_epoch: Duration, entropy_source: ES, + &self, invoice: &StaticInvoice, payment_id: PaymentId, hold_htlcs_at_next_hop: bool, + features: Bolt12InvoiceFeatures, best_block_height: u32, duration_since_epoch: Duration, + entropy_source: ES, pending_events: &Mutex)>>, ) -> Result<(), Bolt12PaymentError> where @@ -1192,6 +1197,13 @@ impl OutboundPayments { RetryableSendFailure::OnionPacketSizeExceeded, )); } + + // If we expect the HTLCs for this payment to be held at our next-hop counterparty, don't + // retry the payment. In future iterations of this feature, we will send this payment via + // trampoline and the counterparty will retry on our behalf. + if hold_htlcs_at_next_hop { + *retry_strategy = Retry::Attempts(0); + } let absolute_expiry = duration_since_epoch.saturating_add(ASYNC_PAYMENT_TIMEOUT_RELATIVE_EXPIRY); @@ -1200,6 +1212,7 @@ impl OutboundPayments { keysend_preimage, retry_strategy: *retry_strategy, route_params, + hold_htlcs_at_next_hop, invoice_request: retryable_invoice_request .take() .ok_or(Bolt12PaymentError::UnexpectedInvoice)? @@ -2759,6 +2772,12 @@ impl_writeable_tlv_based_enum_upgradable!(PendingOutboundPayment, // HTLCs are in-flight. (9, StaticInvoiceReceived) => { (0, payment_hash, required), + // Added in 0.2. If this field is set when this variant is created, the HTLCs are sent + // immediately after and the pending outbound is also immediately transitioned to Retryable. + // However, if we crash and then downgrade before the transition to Retryable, this payment will + // sit in outbounds until it either times out in `remove_stale_payments` or is manually + // abandoned. + (1, hold_htlcs_at_next_hop, required), (2, keysend_preimage, required), (4, retry_strategy, required), (6, route_params, required), @@ -3418,6 +3437,7 @@ mod tests { invoice_request: dummy_invoice_request(), static_invoice: dummy_static_invoice(), expiry_time: Duration::from_secs(absolute_expiry + 2), + hold_htlcs_at_next_hop: false }; outbounds.insert(payment_id, outbound); core::mem::drop(outbounds); @@ -3468,6 +3488,7 @@ mod tests { invoice_request: dummy_invoice_request(), static_invoice: dummy_static_invoice(), expiry_time: now(), + hold_htlcs_at_next_hop: false, }; outbounds.insert(payment_id, outbound); core::mem::drop(outbounds); diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index f0c0330e14d..b997afb4824 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -947,6 +947,18 @@ pub struct UserConfig { /// Default value: `false` #[cfg(test)] pub enable_htlc_hold: bool, + /// If this is set to true, then if we as an often-offline payer receive a [`StaticInvoice`] to + /// pay, we will attempt to hold the corresponding outbound HTLCs with our next-hop channel + /// counterparty(s) that support the `htlc_hold` feature. This allows our node to go offline once + /// the HTLCs are locked in even though the recipient may not yet be online to receive them. + /// + /// This option only applies if we are a private node, and will be ignored if we are an announced + /// node that is expected to be online at all times. + /// + /// Default value: `true` + /// + /// [`StaticInvoice`]: crate::offers::static_invoice::StaticInvoice + pub hold_outbound_htlcs_at_next_hop: bool, } impl Default for UserConfig { @@ -963,6 +975,7 @@ impl Default for UserConfig { enable_dual_funded_channels: false, #[cfg(test)] enable_htlc_hold: false, + hold_outbound_htlcs_at_next_hop: true, } } } @@ -983,6 +996,7 @@ impl Readable for UserConfig { accept_intercept_htlcs: Readable::read(reader)?, manually_handle_bolt12_invoices: Readable::read(reader)?, enable_dual_funded_channels: Readable::read(reader)?, + hold_outbound_htlcs_at_next_hop: Readable::read(reader)?, }) } } From 0bebd4908d481cd6e413232f29ff5b6eba7f1ee1 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 21 Aug 2025 15:12:41 -0400 Subject: [PATCH 16/20] Set UpdateAddHTLC::hold_htlc for offline payees As part of supporting sending payments as an often-offline sender, the sender needs to be able to set a flag in their update_add_htlc message indicating that the HTLC should be held until receipt of a release_held_htlc onion message from the often-offline payment recipient. The prior commits laid groundwork to finally set the flag here in this commit. See-also --- lightning/src/ln/channel.rs | 2 +- lightning/src/ln/channelmanager.rs | 95 +++++++++++++++++++--------- lightning/src/ln/outbound_payment.rs | 20 +++--- 3 files changed, 79 insertions(+), 38 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 6a856656b71..96b756b936a 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -9066,7 +9066,7 @@ where onion_routing_packet: (**onion_packet).clone(), skimmed_fee_msat: htlc.skimmed_fee_msat, blinding_point: htlc.blinding_point, - hold_htlc: None, // Will be set by the async sender when support is added + hold_htlc: htlc.source.hold_htlc_at_next_hop().then(|| ()), }); } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index bbdd831c242..6c57e6bfd7b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -842,6 +842,13 @@ impl HTLCSource { _ => None, } } + + pub(crate) fn hold_htlc_at_next_hop(&self) -> bool { + match self { + Self::OutboundRoute { hold_htlc, .. } => hold_htlc.is_some(), + _ => false, + } + } } /// This enum is used to specify which error data to send to peers when failing back an HTLC @@ -4930,6 +4937,7 @@ where invoice_request: None, bolt12_invoice: None, session_priv_bytes, + hold_htlc_at_next_hop: false, }) } @@ -4945,6 +4953,7 @@ where invoice_request, bolt12_invoice, session_priv_bytes, + hold_htlc_at_next_hop, } = args; // The top-level caller should hold the total_consistency_lock read lock. debug_assert!(self.total_consistency_lock.try_write().is_err()); @@ -5026,7 +5035,7 @@ where first_hop_htlc_msat: htlc_msat, payment_id, bolt12_invoice: bolt12_invoice.cloned(), - hold_htlc: None, + hold_htlc: hold_htlc_at_next_hop.then(|| ()), }; let send_res = chan.send_htlc_and_commit( htlc_msat, @@ -5412,19 +5421,35 @@ where }, }; - let enqueue_held_htlc_available_res = self.flow.enqueue_held_htlc_available( - invoice, - payment_id, - self.get_peers_for_blinded_path(), - ); - if enqueue_held_htlc_available_res.is_err() { - self.abandon_payment_with_reason( + // If the call to `Self::hold_htlc_channels` succeeded, then we are a private node and can + // hold the HTLCs for this payment at our next-hop channel counterparty until the recipient + // comes online. This allows us to go offline after locking in the HTLCs. + if let Ok(channels) = hold_htlc_channels_res { + if let Err(e) = + self.send_payment_for_static_invoice_no_persist(payment_id, channels) + { + log_trace!( + self.logger, + "Failed to send held HTLC with payment id {}: {:?}", + payment_id, + e + ); + } + } else { + let enqueue_held_htlc_available_res = self.flow.enqueue_held_htlc_available( + invoice, payment_id, - PaymentFailureReason::BlindedPathCreationFailed, + self.get_peers_for_blinded_path(), ); - res = Err(Bolt12PaymentError::BlindedPathCreationFailed); - return NotifyOption::DoPersist; - }; + if enqueue_held_htlc_available_res.is_err() { + self.abandon_payment_with_reason( + payment_id, + PaymentFailureReason::BlindedPathCreationFailed, + ); + res = Err(Bolt12PaymentError::BlindedPathCreationFailed); + return NotifyOption::DoPersist; + }; + } NotifyOption::DoPersist }); @@ -5469,26 +5494,15 @@ where } } + /// If we want the HTLCs for this payment to be held at the next-hop channel counterparty, use + /// [`Self::hold_htlc_channels`] and pass the resulting channels in here. fn send_payment_for_static_invoice( - &self, payment_id: PaymentId, + &self, payment_id: PaymentId, first_hops: Vec, ) -> Result<(), Bolt12PaymentError> { - let best_block_height = self.best_block.read().unwrap().height; let mut res = Ok(()); PersistenceNotifierGuard::optionally_notify(self, || { - let outbound_pmts_res = self.pending_outbound_payments.send_payment_for_static_invoice( - payment_id, - &self.router, - self.list_usable_channels(), - || self.compute_inflight_htlcs(), - &self.entropy_source, - &self.node_signer, - &self, - &self.secp_ctx, - best_block_height, - &self.logger, - &self.pending_events, - |args| self.send_payment_along_path(args), - ); + let outbound_pmts_res = + self.send_payment_for_static_invoice_no_persist(payment_id, first_hops); match outbound_pmts_res { Err(Bolt12PaymentError::UnexpectedInvoice) | Err(Bolt12PaymentError::DuplicateInvoice) => { @@ -5504,6 +5518,27 @@ where res } + /// Useful if the caller is already triggering a persist of the `ChannelManager`. + fn send_payment_for_static_invoice_no_persist( + &self, payment_id: PaymentId, first_hops: Vec, + ) -> Result<(), Bolt12PaymentError> { + let best_block_height = self.best_block.read().unwrap().height; + self.pending_outbound_payments.send_payment_for_static_invoice( + payment_id, + &self.router, + first_hops, + || self.compute_inflight_htlcs(), + &self.entropy_source, + &self.node_signer, + &self, + &self.secp_ctx, + best_block_height, + &self.logger, + &self.pending_events, + |args| self.send_payment_along_path(args), + ) + } + /// If we are holding an HTLC on behalf of an often-offline sender, this method allows us to /// create a path for the sender to use as the reply path when they send the recipient a /// [`HeldHtlcAvailable`] onion message, so the recipient's [`ReleaseHeldHtlc`] response will be @@ -14781,7 +14816,9 @@ where fn handle_release_held_htlc(&self, _message: ReleaseHeldHtlc, context: AsyncPaymentsContext) { match context { AsyncPaymentsContext::OutboundPayment { payment_id } => { - if let Err(e) = self.send_payment_for_static_invoice(payment_id) { + if let Err(e) = + self.send_payment_for_static_invoice(payment_id, self.list_usable_channels()) + { log_trace!( self.logger, "Failed to release held HTLC with payment id {}: {:?}", diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 6798ef505da..9a828be0197 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -832,6 +832,7 @@ pub(super) struct SendAlongPathArgs<'a> { pub invoice_request: Option<&'a InvoiceRequest>, pub bolt12_invoice: Option<&'a PaidBolt12Invoice>, pub session_priv_bytes: [u8; 32], + pub hold_htlc_at_next_hop: bool, } pub(super) struct OutboundPayments { @@ -1064,7 +1065,7 @@ impl OutboundPayments { let payment_params = Some(route_params.payment_params.clone()); let mut outbounds = self.pending_outbound_payments.lock().unwrap(); - let onion_session_privs = match outbounds.entry(payment_id) { + let (onion_session_privs, hold_htlcs_at_next_hop) = match outbounds.entry(payment_id) { hash_map::Entry::Occupied(entry) => match entry.get() { PendingOutboundPayment::InvoiceReceived { .. } => { let (retryable_payment, onion_session_privs) = Self::create_pending_payment( @@ -1072,18 +1073,21 @@ impl OutboundPayments { Some(retry_strategy), payment_params, entropy_source, best_block_height, ); *entry.into_mut() = retryable_payment; - onion_session_privs + (onion_session_privs, false) }, PendingOutboundPayment::StaticInvoiceReceived { .. } => { - let invreq = if let PendingOutboundPayment::StaticInvoiceReceived { invoice_request, .. } = entry.remove() { - invoice_request - } else { unreachable!() }; + let (invreq, hold_htlcs_at_next_hop) = + if let PendingOutboundPayment::StaticInvoiceReceived { + invoice_request, hold_htlcs_at_next_hop, .. + } = entry.remove() { + (invoice_request, hold_htlcs_at_next_hop) + } else { unreachable!() }; let (retryable_payment, onion_session_privs) = Self::create_pending_payment( payment_hash, recipient_onion.clone(), keysend_preimage, Some(invreq), Some(bolt12_invoice.clone()), &route, Some(retry_strategy), payment_params, entropy_source, best_block_height ); outbounds.insert(payment_id, retryable_payment); - onion_session_privs + (onion_session_privs, hold_htlcs_at_next_hop) }, _ => return Err(Bolt12PaymentError::DuplicateInvoice), }, @@ -1093,7 +1097,7 @@ impl OutboundPayments { let result = self.pay_route_internal( &route, payment_hash, &recipient_onion, keysend_preimage, invoice_request, Some(&bolt12_invoice), payment_id, - Some(route_params.final_value_msat), &onion_session_privs, false, node_signer, + Some(route_params.final_value_msat), &onion_session_privs, hold_htlcs_at_next_hop, node_signer, best_block_height, &send_payment_along_path ); log_info!( @@ -2130,7 +2134,7 @@ impl OutboundPayments { let path_res = send_payment_along_path(SendAlongPathArgs { path: &path, payment_hash: &payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage: &keysend_preimage, invoice_request, - bolt12_invoice, + bolt12_invoice, hold_htlc_at_next_hop: hold_htlcs_at_next_hop, session_priv_bytes: *session_priv_bytes }); results.push(path_res); From 5ea87248c2f2d054d048dad7e363e845489f5157 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 29 Aug 2025 15:50:18 -0400 Subject: [PATCH 17/20] Support held_htlc_available counterparty reply path As part of supporting sending payments as an often-offline sender, the sender needs to send held_htlc_available onion messages such that the reply path to the message terminates at their always-online channel counterparty that is holding the HTLC. That way when the recipient responds with release_held_htlc, the sender's counterparty will receive that message. Here we lay some groundwork for using a counterparty-created reply path when sending held_htlc_available as an async sender in the next commit. --- lightning/src/ln/channelmanager.rs | 11 +++++---- lightning/src/offers/flow.rs | 39 ++++++++++++++++++++++++------ 2 files changed, 38 insertions(+), 12 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 6c57e6bfd7b..bea3e3b118f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -92,7 +92,7 @@ use crate::ln::outbound_payment::{ }; use crate::ln::types::ChannelId; use crate::offers::async_receive_offer_cache::AsyncReceiveOfferCache; -use crate::offers::flow::{InvreqResponseInstructions, OffersMessageFlow}; +use crate::offers::flow::{HeldHtlcReplyPath, InvreqResponseInstructions, OffersMessageFlow}; use crate::offers::invoice::{ Bolt12Invoice, DerivedSigningPubkey, InvoiceBuilder, DEFAULT_RELATIVE_EXPIRY, }; @@ -5436,11 +5436,12 @@ where ); } } else { - let enqueue_held_htlc_available_res = self.flow.enqueue_held_htlc_available( - invoice, + let reply_path = HeldHtlcReplyPath::ToUs { payment_id, - self.get_peers_for_blinded_path(), - ); + peers: self.get_peers_for_blinded_path(), + }; + let enqueue_held_htlc_available_res = + self.flow.enqueue_held_htlc_available(invoice, reply_path); if enqueue_held_htlc_available_res.is_err() { self.abandon_payment_with_reason( payment_id, diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index 1541d0e01c7..7129726d6aa 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -412,6 +412,26 @@ pub enum InvreqResponseInstructions { }, } +/// Parameters for the reply path to a [`HeldHtlcAvailable`] onion message. +pub enum HeldHtlcReplyPath { + /// The reply path to the [`HeldHtlcAvailable`] message should terminate at our node. + ToUs { + /// The id of the payment. + payment_id: PaymentId, + /// The peers to use when creating this reply path. + peers: Vec, + }, + /// The reply path to the [`HeldHtlcAvailable`] message should terminate at our next-hop channel + /// counterparty, as they are holding our HTLC until they receive the corresponding + /// [`ReleaseHeldHtlc`] message. + /// + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + ToCounterparty { + /// The blinded path provided to us by our counterparty. + path: BlindedMessagePath, + }, +} + impl OffersMessageFlow where MR::Target: MessageRouter, @@ -1131,14 +1151,19 @@ where /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc /// [`supports_onion_messages`]: crate::types::features::Features::supports_onion_messages pub fn enqueue_held_htlc_available( - &self, invoice: &StaticInvoice, payment_id: PaymentId, peers: Vec, + &self, invoice: &StaticInvoice, reply_path_params: HeldHtlcReplyPath, ) -> Result<(), Bolt12SemanticError> { - let context = - MessageContext::AsyncPayments(AsyncPaymentsContext::OutboundPayment { payment_id }); - - let reply_paths = self - .create_blinded_paths(peers, context) - .map_err(|_| Bolt12SemanticError::MissingPaths)?; + let reply_paths = match reply_path_params { + HeldHtlcReplyPath::ToUs { payment_id, peers } => { + let context = + MessageContext::AsyncPayments(AsyncPaymentsContext::OutboundPayment { + payment_id, + }); + self.create_blinded_paths(peers, context) + .map_err(|_| Bolt12SemanticError::MissingPaths)? + }, + HeldHtlcReplyPath::ToCounterparty { path } => vec![path], + }; let mut pending_async_payments_messages = self.pending_async_payments_messages.lock().unwrap(); From b37d04732f029b798a697b13838904180e4379fe Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 29 Aug 2025 15:57:21 -0400 Subject: [PATCH 18/20] Send held_htlc_available with counterparty reply path As part of supporting sending payments as an often-offline sender, the sender needs to send held_htlc_available onion messages such that the reply path to the message terminates at their always-online channel counterparty that is holding the HTLC. That way when the recipient responds with release_held_htlc, the sender's counterparty will receive that message. After laying groundwork over some past commits, here we as an async sender send held_htlc_available messages using reply paths created by our always-online channel counterparty. --- lightning/src/ln/channel.rs | 37 +++++++++++++++++++++++++++--- lightning/src/ln/channelmanager.rs | 20 +++++++++++++--- 2 files changed, 51 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 96b756b936a..fc8dacc8596 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -79,6 +79,7 @@ use crate::ln::script::{self, ShutdownScript}; use crate::ln::types::ChannelId; #[cfg(splicing)] use crate::ln::LN_MAX_MSG_LEN; +use crate::offers::static_invoice::StaticInvoice; use crate::routing::gossip::NodeId; use crate::sign::ecdsa::EcdsaChannelSigner; use crate::sign::tx_builder::{HTLCAmountDirection, NextCommitmentStats, SpecTxBuilder, TxBuilder}; @@ -7992,10 +7993,25 @@ where /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail, /// generating an appropriate error *after* the channel state has been updated based on the /// revoke_and_ack message. + /// + /// The static invoices will be used by us as an async sender to enqueue [`HeldHtlcAvailable`] + /// onion messages for the often-offline recipient, and the blinded reply paths the invoices are + /// paired with were created by our channel counterparty and will be used as reply paths for + /// corresponding [`ReleaseHeldHtlc`] messages. + /// + /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc pub fn revoke_and_ack( &mut self, msg: &msgs::RevokeAndACK, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, hold_mon_update: bool, - ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option), ChannelError> + ) -> Result< + ( + Vec<(HTLCSource, PaymentHash)>, + Vec<(StaticInvoice, BlindedMessagePath)>, + Option, + ), + ChannelError, + > where F::Target: FeeEstimator, L::Target: Logger, @@ -8110,6 +8126,7 @@ where let mut finalized_claimed_htlcs = Vec::new(); let mut update_fail_htlcs = Vec::new(); let mut update_fail_malformed_htlcs = Vec::new(); + let mut static_invoices = Vec::new(); let mut require_commitment = false; let mut value_to_self_msat_diff: i64 = 0; @@ -8225,6 +8242,20 @@ where } } for htlc in pending_outbound_htlcs.iter_mut() { + for (htlc_id, blinded_path) in &msg.release_htlc_message_paths { + if htlc.htlc_id != *htlc_id { + continue; + } + let static_invoice = match htlc.source.static_invoice() { + Some(inv) => inv, + None => { + // This is reachable but it means the counterparty is buggy and included a release + // path for an HTLC that we didn't originally flag as a hold_htlc. + continue; + }, + }; + static_invoices.push((static_invoice, blinded_path.clone())); + } if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { log_trace!( logger, @@ -8292,9 +8323,9 @@ where self.context .blocked_monitor_updates .push(PendingChannelMonitorUpdate { update: monitor_update }); - return Ok(($htlcs_to_fail, None)); + return Ok(($htlcs_to_fail, static_invoices, None)); } else { - return Ok(($htlcs_to_fail, Some(monitor_update))); + return Ok(($htlcs_to_fail, static_invoices, Some(monitor_update))); } }; } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index bea3e3b118f..ee831db494f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -849,6 +849,16 @@ impl HTLCSource { _ => false, } } + + pub(crate) fn static_invoice(&self) -> Option { + match self { + Self::OutboundRoute { + bolt12_invoice: Some(PaidBolt12Invoice::StaticInvoice(inv)), + .. + } => Some(inv.clone()), + _ => None, + } + } } /// This enum is used to specify which error data to send to peers when failing back an HTLC @@ -10989,7 +10999,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ #[rustfmt::skip] fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> { - let htlcs_to_fail = { + let (htlcs_to_fail, static_invoices) = { let per_peer_state = self.per_peer_state.read().unwrap(); let mut peer_state_lock = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { @@ -11005,7 +11015,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let mon_update_blocked = self.raa_monitor_updates_held( &peer_state.actions_blocking_raa_monitor_updates, msg.channel_id, *counterparty_node_id); - let (htlcs_to_fail, monitor_update_opt) = try_channel_entry!(self, peer_state, + let (htlcs_to_fail, static_invoices, monitor_update_opt) = try_channel_entry!(self, peer_state, chan.revoke_and_ack(&msg, &self.fee_estimator, &&logger, mon_update_blocked), chan_entry); if let Some(monitor_update) = monitor_update_opt { let funding_txo = funding_txo_opt @@ -11013,7 +11023,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } - htlcs_to_fail + (htlcs_to_fail, static_invoices) } else { return try_channel_entry!(self, peer_state, Err(ChannelError::close( "Got a revoke_and_ack message for an unfunded channel!".into())), chan_entry); @@ -11023,6 +11033,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }; self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id); + for (static_invoice, reply_path) in static_invoices { + let res = self.flow.enqueue_held_htlc_available(&static_invoice, HeldHtlcReplyPath::ToCounterparty { path: reply_path }); + debug_assert!(res.is_ok(), "enqueue_held_htlc_available can only fail for non-async senders"); + } Ok(()) } From a271381075a525c31322db6da5f2d0febf537b10 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 29 Aug 2025 15:57:38 -0400 Subject: [PATCH 19/20] WIP Test async send --- lightning/src/ln/async_payments_tests.rs | 218 ++++++++++++++++++++-- lightning/src/ln/functional_test_utils.rs | 1 + 2 files changed, 208 insertions(+), 11 deletions(-) diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index e617f6fbf1f..ed7c294956d 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -7,7 +7,7 @@ // You may not use this file except in accordance with one or both of these // licenses. -use crate::blinded_path::message::{MessageContext, OffersContext}; +use crate::blinded_path::message::{MessageContext, NextMessageHop, OffersContext}; use crate::blinded_path::payment::PaymentContext; use crate::blinded_path::payment::{AsyncBolt12OfferContext, BlindedPaymentTlvs}; use crate::chain::channelmonitor::{HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS}; @@ -54,11 +54,12 @@ use crate::sign::NodeSigner; use crate::sync::Mutex; use crate::types::features::Bolt12InvoiceFeatures; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; +use crate::util::config::UserConfig; use crate::util::ser::Writeable; use bitcoin::constants::ChainHash; use bitcoin::network::Network; use bitcoin::secp256k1; -use bitcoin::secp256k1::Secp256k1; +use bitcoin::secp256k1::{PublicKey, Secp256k1}; use core::convert::Infallible; use core::time::Duration; @@ -331,32 +332,114 @@ fn expect_offer_paths_requests(recipient: &Node, next_hop_nodes: &[&Node]) { // We want to check that the async recipient has enqueued at least one `OfferPathsRequest` and no // other message types. Check this by iterating through all their outbound onion messages, peeling // multiple times if the messages are forwarded through other nodes. - let per_msg_recipient_msgs = recipient.onion_messenger.release_pending_msgs(); + let offer_paths_reqs = extract_expected_om(recipient, next_hop_nodes, |peeled_onion| { + matches!( + peeled_onion, + PeeledOnion::AsyncPayments(AsyncPaymentsMessage::OfferPathsRequest(_), _, _) + ) + }); + assert!(!offer_paths_reqs.is_empty()); +} + +fn extract_invoice_request_om<'a>( + payer: &'a Node, next_hop_nodes: &[&'a Node], +) -> (PublicKey, msgs::OnionMessage) { + extract_expected_om(payer, next_hop_nodes, |peeled_onion| { + matches!(peeled_onion, &PeeledOnion::Offers(OffersMessage::InvoiceRequest(_), _, _)) + }) + .pop() + .unwrap() +} + +fn extract_static_invoice_om<'a>( + invoice_server: &'a Node, next_hop_nodes: &[&'a Node], +) -> (PublicKey, msgs::OnionMessage, StaticInvoice) { + let mut static_invoice = None; + let (peer_id, om) = extract_expected_om(invoice_server, next_hop_nodes, |peeled_onion| { + if let &PeeledOnion::Offers(OffersMessage::StaticInvoice(inv), _, _) = &peeled_onion { + static_invoice = Some(inv.clone()); + true + } else { + false + } + }) + .pop() + .unwrap(); + (peer_id, om, static_invoice.unwrap()) +} + +fn extract_held_htlc_available_om<'a>( + payer: &'a Node, next_hop_nodes: &[&'a Node], +) -> (PublicKey, msgs::OnionMessage) { + extract_expected_om(payer, next_hop_nodes, |peeled_onion| { + matches!( + peeled_onion, + &PeeledOnion::AsyncPayments(AsyncPaymentsMessage::HeldHtlcAvailable(_), _, _) + ) + }) + .pop() + .unwrap() +} + +fn extract_release_htlc_om<'a>( + recipient: &'a Node, next_hop_nodes: &[&'a Node], +) -> (PublicKey, msgs::OnionMessage) { + extract_expected_om(recipient, next_hop_nodes, |peeled_onion| { + matches!( + peeled_onion, + &PeeledOnion::AsyncPayments(AsyncPaymentsMessage::ReleaseHeldHtlc(_), _, _) + ) + }) + .pop() + .unwrap() +} + +fn extract_expected_om( + msg_sender: &Node, next_hop_nodes: &[&Node], mut expected_msg_type: F, +) -> Vec<(PublicKey, msgs::OnionMessage)> +where + F: FnMut(&PeeledOnion) -> bool, +{ + let per_msg_recipient_msgs = msg_sender.onion_messenger.release_pending_msgs(); let mut pk_to_msg = Vec::new(); for (pk, msgs) in per_msg_recipient_msgs { for msg in msgs { pk_to_msg.push((pk, msg)); } } - let mut num_offer_paths_reqs: u8 = 0; + let mut msgs = Vec::new(); while let Some((pk, msg)) = pk_to_msg.pop() { let node = next_hop_nodes.iter().find(|node| node.node.get_our_node_id() == pk).unwrap(); let peeled_msg = node.onion_messenger.peel_onion_message(&msg).unwrap(); match peeled_msg { - PeeledOnion::AsyncPayments(AsyncPaymentsMessage::OfferPathsRequest(_), _, _) => { - num_offer_paths_reqs += 1; - }, PeeledOnion::Forward(next_hop, msg) => { let next_pk = match next_hop { - crate::blinded_path::message::NextMessageHop::NodeId(pk) => pk, - _ => panic!(), + NextMessageHop::NodeId(pk) => pk, + NextMessageHop::ShortChannelId(scid) => { + let mut next_pk = None; + for node in next_hop_nodes { + if node.node.get_our_node_id() == pk { + continue; + } + for channel in node.node.list_channels() { + if channel.short_channel_id.unwrap() == scid + || channel.inbound_scid_alias.unwrap_or(0) == scid + { + next_pk = Some(node.node.get_our_node_id()); + } + } + } + next_pk.unwrap() + }, }; pk_to_msg.push((next_pk, msg)); }, - _ => panic!("Unexpected message"), + peeled_onion if expected_msg_type(&peeled_onion) => msgs.push((pk, msg)), + peeled_onion => panic!("Unexpected message: {:#?}", peeled_onion), } } - assert!(num_offer_paths_reqs > 0); + assert!(!msgs.is_empty()); + msgs } fn advance_time_by(duration: Duration, node: &Node) { @@ -365,6 +448,14 @@ fn advance_time_by(duration: Duration, node: &Node) { connect_block(node, &block); } +fn often_offline_node_cfg() -> UserConfig { + let mut cfg = test_default_channel_config(); + cfg.channel_handshake_config.announce_for_forwarding = false; + cfg.channel_handshake_limits.force_announced_channel_preference = true; + cfg.hold_outbound_htlcs_at_next_hop = true; + cfg +} + #[test] fn invalid_keysend_payment_secret() { let chanmon_cfgs = create_chanmon_cfgs(3); @@ -2206,3 +2297,108 @@ fn invoice_server_is_not_channel_peer() { let res = claim_payment_along_route(ClaimAlongRouteArgs::new(sender, route, keysend_preimage)); assert_eq!(res.0, Some(PaidBolt12Invoice::StaticInvoice(invoice))); } + +#[test] +fn simple_async_sender() { + // Test the basic case of an async sender paying an async recipient. + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let (sender_cfg, recipient_cfg) = (often_offline_node_cfg(), often_offline_node_cfg()); + let mut invoice_server_cfg = test_default_channel_config(); + invoice_server_cfg.accept_forwards_to_priv_channels = true; + let node_chanmgrs = create_node_chanmgrs( + 4, + &node_cfgs, + &[Some(sender_cfg), None, Some(invoice_server_cfg), Some(recipient_cfg)], + ); + let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0); + create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0); + // Make sure all nodes are at the same block height + let node_max_height = + nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; + connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1); + connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1); + let sender = &nodes[0]; + let sender_lsp = &nodes[1]; + let invoice_server = &nodes[2]; + let recipient = &nodes[3]; + + let recipient_id = vec![42; 32]; + let inv_server_paths = + invoice_server.node.blinded_paths_for_async_recipient(recipient_id.clone(), None).unwrap(); + recipient.node.set_paths_to_static_invoice_server(inv_server_paths).unwrap(); + expect_offer_paths_requests(recipient, &[sender, sender_lsp, invoice_server]); + let invoice = + pass_static_invoice_server_messages(invoice_server, recipient, recipient_id.clone()) + .invoice; + + let offer = recipient.node.get_async_receive_offer().unwrap(); + let amt_msat = 5000; + let payment_id = PaymentId([1; 32]); + let params = RouteParametersConfig::default(); + sender + .node + .pay_for_offer(&offer, None, Some(amt_msat), None, payment_id, Retry::Attempts(0), params) + .unwrap(); + + // Forward invreq to server, pass static invoice back, check that htlc was locked in/monitor was + // added + let (peer_id, invreq_om) = extract_invoice_request_om(sender, &[sender_lsp, invoice_server]); + invoice_server.onion_messenger.handle_onion_message(peer_id, &invreq_om); + + let mut events = invoice_server.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + let reply_path = match events.pop().unwrap() { + Event::StaticInvoiceRequested { recipient_id: ev_id, invoice_slot: _, reply_path } => { + assert_eq!(recipient_id, ev_id); + reply_path + }, + _ => panic!(), + }; + + invoice_server.node.send_static_invoice(invoice, reply_path).unwrap(); + let (peer_node_id, static_invoice_om, static_invoice) = + extract_static_invoice_om(invoice_server, &[sender_lsp, sender, recipient]); + + // The sender should lock in the held HTLC with their LSP right after receiving the static invoice. + sender.onion_messenger.handle_onion_message(peer_node_id, &static_invoice_om); + check_added_monitors(sender, 1); + let commitment_update = get_htlc_update_msgs!(sender, sender_lsp.node.get_our_node_id()); + let update_add = commitment_update.update_add_htlcs[0].clone(); + let payment_hash = update_add.payment_hash; + assert!(update_add.hold_htlc.is_some()); + sender_lsp.node.handle_update_add_htlc(sender.node.get_our_node_id(), &update_add); + commitment_signed_dance!(sender_lsp, sender, &commitment_update.commitment_signed, false, true); + + // Ensure that after the held HTLC is locked in, the sender's lsp does not forward it immediately. + sender_lsp.node.process_pending_htlc_forwards(); + assert!(sender_lsp.node.get_and_clear_pending_msg_events().is_empty()); + + let (peer_id, held_htlc_om) = + extract_held_htlc_available_om(sender, &[sender_lsp, invoice_server, recipient]); + recipient.onion_messenger.handle_onion_message(peer_id, &held_htlc_om); + let (peer_id, release_htlc_om) = + extract_release_htlc_om(recipient, &[sender, sender_lsp, invoice_server]); + sender_lsp.onion_messenger.handle_onion_message(peer_id, &release_htlc_om); + + // After the sender's LSP receives release_held_htlc from the recipient, the payment can complete + sender_lsp.node.process_pending_htlc_forwards(); + let mut events = sender_lsp.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev = remove_first_msg_event_to_node(&invoice_server.node.get_our_node_id(), &mut events); + check_added_monitors!(sender_lsp, 1); + + let path: &[&Node] = &[invoice_server, recipient]; + let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev); + let claimable_ev = do_pass_along_path(args).unwrap(); + + let route: &[&[&Node]] = &[&[sender_lsp, invoice_server, recipient]]; + let keysend_preimage = extract_payment_preimage(&claimable_ev); + let (res, _) = + claim_payment_along_route(ClaimAlongRouteArgs::new(sender, route, keysend_preimage)); + assert_eq!(res, Some(PaidBolt12Invoice::StaticInvoice(static_invoice))); +} diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 5b0c37a4731..4d1683b44d3 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -4261,6 +4261,7 @@ pub fn test_default_channel_config() -> UserConfig { // feerate of 253). default_config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(50_000_000 / 253); + default_config.enable_htlc_hold = true; default_config } From e1fa5eb9c78f0ead2205c3515d2998ec33cf3868 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 4 Sep 2025 21:33:05 -0400 Subject: [PATCH 20/20] Conditionally advertise htlc_hold feature Now that we support the feature of sending payments as an often-offline sender to an often-offline recipient, including the sender's LSP-side, we can start conditionally advertising the feature bit to other nodes on the network. --- lightning/src/ln/channelmanager.rs | 1 - lightning/src/util/config.rs | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ee831db494f..cedc919b810 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -15058,7 +15058,6 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { // If we are configured to be an announced node, we are expected to be always-online and can // advertise the htlc_hold feature. - #[cfg(test)] if config.enable_htlc_hold { features.set_htlc_hold_optional(); } diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index b997afb4824..f7c266af9a6 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -945,7 +945,6 @@ pub struct UserConfig { /// Setting this to `true` may break backwards compatibility with LDK versions < 0.2. /// /// Default value: `false` - #[cfg(test)] pub enable_htlc_hold: bool, /// If this is set to true, then if we as an often-offline payer receive a [`StaticInvoice`] to /// pay, we will attempt to hold the corresponding outbound HTLCs with our next-hop channel @@ -973,7 +972,6 @@ impl Default for UserConfig { accept_intercept_htlcs: false, manually_handle_bolt12_invoices: false, enable_dual_funded_channels: false, - #[cfg(test)] enable_htlc_hold: false, hold_outbound_htlcs_at_next_hop: true, } @@ -997,6 +995,7 @@ impl Readable for UserConfig { manually_handle_bolt12_invoices: Readable::read(reader)?, enable_dual_funded_channels: Readable::read(reader)?, hold_outbound_htlcs_at_next_hop: Readable::read(reader)?, + enable_htlc_hold: Readable::read(reader)?, }) } }