From c41fe2fadf270fa2de9089ca7cc27a8284298394 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 29 Aug 2025 17:37:45 -0400 Subject: [PATCH 01/11] Cache peers in OffersMessageFlow In upcoming commits, we'll be creating blinded paths during the process of creating a revoke_and_ack message within the Channel struct. These paths will be included in said RAA to be used as reply paths for often-offline senders held_htlc_available messages. Because we hold the per-peer lock corresponding to the Channel while creating this RAA, we can't use our typical approach of calling ChannelManager::get_peers_for_blinded_path to create these blinded paths. The ::get_peers method takes each peer's lock in turn in order to check for usable channels/onion message feature support, and it's not permitted to hold multiple peer state locks at the same time due to the potential for deadlocks (see the debug_sync module). To avoid taking other peer state locks while holding a particular Channel's peer state lock, here we cache the set of peers in the OffersMessageFlow, which is the struct that ultimately creates the blinded paths for the RAA. --- lightning/src/ln/channelmanager.rs | 9 +++++ lightning/src/offers/flow.rs | 64 ++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 0397116f96d..78137dac033 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -12983,6 +12983,8 @@ where for (err, counterparty_node_id) in failed_channels.drain(..) { let _ = handle_error!(self, err, counterparty_node_id); } + + let _ = self.flow.peer_disconnected(counterparty_node_id); } #[rustfmt::skip] @@ -12993,6 +12995,7 @@ where return Err(()); } + let mut peer_has_announced_channel = false; let mut res = Ok(()); PersistenceNotifierGuard::optionally_notify(self, || { @@ -13080,6 +13083,9 @@ where }), ReconnectionMsg::None => {}, } + if chan.context().should_announce() { + peer_has_announced_channel = true; + } } } @@ -13092,6 +13098,9 @@ where // until we have some peer connection(s) to receive onion messages over, so as a minor optimization // refresh the cache when a peer connects. self.check_refresh_async_receive_offer_cache(false); + let _ = self.flow.peer_connected( + counterparty_node_id, &init_msg.features, peer_has_announced_channel + ); res } diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index 38f472b2f5b..ea0cd347455 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -61,6 +61,7 @@ use crate::sign::{EntropySource, NodeSigner, ReceiveAuthKey}; use crate::offers::static_invoice::{StaticInvoice, StaticInvoiceBuilder}; use crate::sync::{Mutex, RwLock}; +use crate::types::features::InitFeatures; use crate::types::payment::{PaymentHash, PaymentSecret}; use crate::util::ser::Writeable; @@ -98,6 +99,7 @@ where pending_async_payments_messages: Mutex>, async_receive_offer_cache: Mutex, + peers_cache: Mutex>, #[cfg(feature = "dnssec")] pub(crate) hrn_resolver: OMNameResolver, @@ -130,6 +132,7 @@ where pending_offers_messages: Mutex::new(Vec::new()), pending_async_payments_messages: Mutex::new(Vec::new()), + peers_cache: Mutex::new(Vec::with_capacity(MAX_CACHED_PEERS)), #[cfg(feature = "dnssec")] hrn_resolver: OMNameResolver::new(current_timestamp, best_block.height), @@ -260,6 +263,10 @@ const DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY: Duration = Duration::from_secs(365 * 2 pub(crate) const TEST_DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY: Duration = DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY; +/// The maximum number of peers that the [`OffersMessageFlow`] will hold in their peer cache. +/// See [`OffersMessageFlow::peer_connected`] and [`OffersMessageFlow::peer_disconnected`]. +pub const MAX_CACHED_PEERS: usize = 100; + impl OffersMessageFlow where MR::Target: MessageRouter, @@ -1627,4 +1634,61 @@ where pub fn writeable_async_receive_offer_cache(&self) -> Vec { self.async_receive_offer_cache.encode() } + + /// Indicates that a peer was connected to our node. Useful for the [`OffersMessageFlow`] to keep + /// track of which peers are connected, which allows for methods that can create blinded paths + /// without requiring a fresh set of [`MessageForwardNode`]s to be passed in. + /// + /// MUST be called by always-online nodes that support holding HTLCs on behalf of often-offline + /// senders. + /// + /// Errors if the peer does not support onion messages or we have more than [`MAX_CACHED_PEERS`] + /// connected already. + pub fn peer_connected( + &self, peer_node_id: PublicKey, features: &InitFeatures, announced_channel: bool, + ) -> Result<(), ()> { + if !features.supports_onion_messages() { + return Err(()); + } + + let mut peers_cache = self.peers_cache.lock().unwrap(); + let existing_peer = peers_cache.iter_mut().find(|(peer, _)| peer.node_id == peer_node_id); + if let Some((_, is_announced)) = existing_peer { + *is_announced = announced_channel; + return Ok(()); + } + + // Cache this peer if we have space or if they are a public node and can replace an existing + // peer that is potentially private, since private peers are not useful as introduction nodes + // for blinded paths. + let peer = MessageForwardNode { node_id: peer_node_id, short_channel_id: None }; + match peers_cache.len() { + num_peers if num_peers >= MAX_CACHED_PEERS => { + if !announced_channel { + return Err(()); + } + match peers_cache.iter().position(|(_, announced)| !announced) { + Some(idx) => peers_cache[idx] = (peer, announced_channel), + None => return Err(()), + } + }, + _ => peers_cache.push((peer, announced_channel)), + } + + Ok(()) + } + + /// Indicates that a peer was disconnected from our node. See [`Self::peer_connected`]. + /// + /// Errors if the peer is unknown. + pub fn peer_disconnected(&self, peer_node_id: PublicKey) -> Result<(), ()> { + let mut peers_cache = self.peers_cache.lock().unwrap(); + let peer_idx = match peers_cache.iter().position(|(peer, _)| peer.node_id == peer_node_id) { + Some(idx) => idx, + None => return Err(()), + }; + peers_cache.swap_remove(peer_idx); + + Ok(()) + } } From bbe17ed0f3ac1e60af1a0af24535bc9867d72518 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 4 Sep 2025 16:37:18 -0400 Subject: [PATCH 02/11] f remove limits on peer cache --- lightning/src/ln/channelmanager.rs | 8 +------ lightning/src/offers/flow.rs | 37 +++++------------------------- 2 files changed, 7 insertions(+), 38 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 78137dac033..f927a669c23 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -12995,7 +12995,6 @@ where return Err(()); } - let mut peer_has_announced_channel = false; let mut res = Ok(()); PersistenceNotifierGuard::optionally_notify(self, || { @@ -13083,9 +13082,6 @@ where }), ReconnectionMsg::None => {}, } - if chan.context().should_announce() { - peer_has_announced_channel = true; - } } } @@ -13098,9 +13094,7 @@ where // until we have some peer connection(s) to receive onion messages over, so as a minor optimization // refresh the cache when a peer connects. self.check_refresh_async_receive_offer_cache(false); - let _ = self.flow.peer_connected( - counterparty_node_id, &init_msg.features, peer_has_announced_channel - ); + let _ = self.flow.peer_connected(counterparty_node_id, &init_msg.features); res } diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index ea0cd347455..8890431123e 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -99,7 +99,7 @@ where pending_async_payments_messages: Mutex>, async_receive_offer_cache: Mutex, - peers_cache: Mutex>, + peers_cache: Mutex>, #[cfg(feature = "dnssec")] pub(crate) hrn_resolver: OMNameResolver, @@ -132,7 +132,7 @@ where pending_offers_messages: Mutex::new(Vec::new()), pending_async_payments_messages: Mutex::new(Vec::new()), - peers_cache: Mutex::new(Vec::with_capacity(MAX_CACHED_PEERS)), + peers_cache: Mutex::new(Vec::new()), #[cfg(feature = "dnssec")] hrn_resolver: OMNameResolver::new(current_timestamp, best_block.height), @@ -263,10 +263,6 @@ const DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY: Duration = Duration::from_secs(365 * 2 pub(crate) const TEST_DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY: Duration = DEFAULT_ASYNC_RECEIVE_OFFER_EXPIRY; -/// The maximum number of peers that the [`OffersMessageFlow`] will hold in their peer cache. -/// See [`OffersMessageFlow::peer_connected`] and [`OffersMessageFlow::peer_disconnected`]. -pub const MAX_CACHED_PEERS: usize = 100; - impl OffersMessageFlow where MR::Target: MessageRouter, @@ -1642,38 +1638,17 @@ where /// MUST be called by always-online nodes that support holding HTLCs on behalf of often-offline /// senders. /// - /// Errors if the peer does not support onion messages or we have more than [`MAX_CACHED_PEERS`] - /// connected already. + /// Errors if the peer does not support onion messages. pub fn peer_connected( - &self, peer_node_id: PublicKey, features: &InitFeatures, announced_channel: bool, + &self, peer_node_id: PublicKey, features: &InitFeatures, ) -> Result<(), ()> { if !features.supports_onion_messages() { return Err(()); } let mut peers_cache = self.peers_cache.lock().unwrap(); - let existing_peer = peers_cache.iter_mut().find(|(peer, _)| peer.node_id == peer_node_id); - if let Some((_, is_announced)) = existing_peer { - *is_announced = announced_channel; - return Ok(()); - } - - // Cache this peer if we have space or if they are a public node and can replace an existing - // peer that is potentially private, since private peers are not useful as introduction nodes - // for blinded paths. let peer = MessageForwardNode { node_id: peer_node_id, short_channel_id: None }; - match peers_cache.len() { - num_peers if num_peers >= MAX_CACHED_PEERS => { - if !announced_channel { - return Err(()); - } - match peers_cache.iter().position(|(_, announced)| !announced) { - Some(idx) => peers_cache[idx] = (peer, announced_channel), - None => return Err(()), - } - }, - _ => peers_cache.push((peer, announced_channel)), - } + peers_cache.push(peer); Ok(()) } @@ -1683,7 +1658,7 @@ where /// Errors if the peer is unknown. pub fn peer_disconnected(&self, peer_node_id: PublicKey) -> Result<(), ()> { let mut peers_cache = self.peers_cache.lock().unwrap(); - let peer_idx = match peers_cache.iter().position(|(peer, _)| peer.node_id == peer_node_id) { + let peer_idx = match peers_cache.iter().position(|peer| peer.node_id == peer_node_id) { Some(idx) => idx, None => return Err(()), }; From b8258aae174918dbfc74e90a5a121cc863d3618c Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 15 Aug 2025 14:57:14 -0400 Subject: [PATCH 03/11] Add UpdateAddHTLC::hold_htlc As part of supporting sending payments as an often-offline sender, the sender needs to be able to set a flag in their update_add_htlc message indicating that the HTLC should be held until receipt of a release_held_htlc onion message from the often-offline payment recipient. We don't yet ever set this flag, but lay the groundwork by including the field in the update_add struct. See-also --- lightning/src/ln/blinded_payment_tests.rs | 1 + lightning/src/ln/channel.rs | 1 + lightning/src/ln/functional_tests.rs | 1 + lightning/src/ln/htlc_reserve_unit_tests.rs | 5 +++++ lightning/src/ln/msgs.rs | 11 ++++++++++- lightning/src/ln/onion_payment.rs | 1 + lightning/src/ln/payment_tests.rs | 1 + 7 files changed, 20 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index a8e7af23984..25fa5e71e5d 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -1522,6 +1522,7 @@ fn update_add_msg( onion_routing_packet, skimmed_fee_msat: None, blinding_point, + hold_htlc: None, } } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 198569bd77e..2f7853e192b 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -9027,6 +9027,7 @@ where onion_routing_packet: (**onion_packet).clone(), skimmed_fee_msat: htlc.skimmed_fee_msat, blinding_point: htlc.blinding_point, + hold_htlc: None, // Will be set by the async sender when support is added }); } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 5e78049664c..04aa9a75457 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2288,6 +2288,7 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[0].node.handle_update_add_htlc(node_b_id, &update_add_htlc); } diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index f90b8b880bc..7f411a6fcb6 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -835,6 +835,7 @@ pub fn do_test_fee_spike_buffer(cfg: Option, htlc_fails: bool) { onion_routing_packet: onion_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); @@ -1072,6 +1073,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { onion_routing_packet: onion_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[0].node.handle_update_add_htlc(node_b_id, &msg); @@ -1255,6 +1257,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { onion_routing_packet: onion_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); @@ -1637,6 +1640,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { onion_routing_packet: onion_packet.clone(), skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; for i in 0..50 { @@ -2242,6 +2246,7 @@ pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 6d025293b9e..5540007e88d 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -765,6 +765,11 @@ pub struct UpdateAddHTLC { /// Provided if we are relaying or receiving a payment within a blinded path, to decrypt the onion /// routing packet and the recipient-provided encrypted payload within. pub blinding_point: Option, + /// Set to `Some` if the sender wants the receiver of this message to hold onto this HTLC until + /// receipt of a [`ReleaseHeldHtlc`] onion message from the payment recipient. + /// + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + pub hold_htlc: Option<()>, } /// An onion message to be sent to or received from a peer. @@ -3268,7 +3273,10 @@ impl_writeable_msg!(UpdateAddHTLC, { onion_routing_packet, }, { (0, blinding_point, option), - (65537, skimmed_fee_msat, option) + (65537, skimmed_fee_msat, option), + // TODO: currently we may fail to read the `ChannelManager` if we write a new even TLV in this message + // and then downgrade. Once this is fixed, update the type here to match BOLTs PR 989. + (75537, hold_htlc, option), }); impl LengthReadable for OnionMessage { @@ -5701,6 +5709,7 @@ mod tests { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; let encoded_value = update_add_htlc.encode(); let target_value = >::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d32144668701144760101010101010101010101010101010101010101010101010101010101010101000c89d4ff031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202").unwrap(); diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index 79952faca9a..acaa0304f29 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -753,6 +753,7 @@ mod tests { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, } } diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 0af0463134e..3dab164e619 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -5017,6 +5017,7 @@ fn peel_payment_onion_custom_tlvs() { skimmed_fee_msat: None, onion_routing_packet, blinding_point: None, + hold_htlc: None, }; let peeled_onion = crate::ln::onion_payment::peel_payment_onion( &update_add, From f74ce6574756dc98c629aa70cfc572a4fe75a7c2 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 15 Aug 2025 12:16:31 -0400 Subject: [PATCH 04/11] Add feature bit for HtlcHold (52/53) As part of supporting sending payments as an often-offline sender, the often-offline sender's channel counterparty needs to advertise a feature bit indicating that they support holding onto the sender's HTLC until they receive a release_held_htlc onion message from the recipient indicating that they are online and ready to receive the payment. See-also We don't yet advertise support of this feature, so here we also disconnect from peers that try to send us hold HTLCs. --- lightning-types/src/features.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/lightning-types/src/features.rs b/lightning-types/src/features.rs index aca4bb6e5a9..521aefc2184 100644 --- a/lightning-types/src/features.rs +++ b/lightning-types/src/features.rs @@ -80,6 +80,8 @@ //! (see [BOLT-2](https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#channel-quiescence) for more information). //! - `ZeroFeeCommitments` - A channel type which always uses zero transaction fee on commitment transactions. //! (see [BOLT PR #1228](https://github.com/lightning/bolts/pull/1228) for more info). +//! - `HtlcHold` - requires/supports holding HTLCs and forwarding on receipt of an onion message +//! (see [BOLT-2](https://github.com/lightning/bolts/pull/989/files) for more information). //! //! LDK knows about the following features, but does not support them: //! - `AnchorsNonzeroFeeHtlcTx` - the initial version of anchor outputs, which was later found to be @@ -161,7 +163,7 @@ mod sealed { // Byte 5 ProvideStorage | ChannelType | SCIDPrivacy | AnchorZeroFeeCommitments, // Byte 6 - ZeroConf, + ZeroConf | HtlcHold, // Byte 7 Trampoline | SimpleClose, ] @@ -182,7 +184,7 @@ mod sealed { // Byte 5 ProvideStorage | ChannelType | SCIDPrivacy | AnchorZeroFeeCommitments, // Byte 6 - ZeroConf | Keysend, + ZeroConf | HtlcHold | Keysend, // Byte 7 Trampoline | SimpleClose, // Byte 8 - 31 @@ -640,6 +642,17 @@ mod sealed { define_feature!(51, ZeroConf, [InitContext, NodeContext, ChannelTypeContext], "Feature flags for accepting channels with zero confirmations. Called `option_zeroconf` in the BOLTs", set_zero_conf_optional, set_zero_conf_required, supports_zero_conf, requires_zero_conf); + define_feature!( + 53, + HtlcHold, + [InitContext, NodeContext], + "Feature flags for holding HTLCs and forwarding on receipt of an onion message", + set_htlc_hold_optional, + set_htlc_hold_required, + clear_htlc_hold, + supports_htlc_hold, + requires_htlc_hold + ); define_feature!( 55, Keysend, From 1653cf22488698f32b3b35a44bcb14c223732609 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 3 Sep 2025 17:09:21 -0400 Subject: [PATCH 05/11] Add enable_htlc_hold cfg flag + fail hold htlcs As part of supporting sending payments as an often-offline sender, the often-offline sender's channel counterparty needs to advertise a feature bit indicating that they support holding onto the sender's HTLC until they receive a release_held_htlc onion message from the recipient indicating that they are online and ready to receive the payment. Here we add a config flag to turn on advertising this feature, and fail back hold_htlcs if this config flag is not set. See-also --- lightning/src/ln/channelmanager.rs | 33 ++++++++++++++++++++++++++++++ lightning/src/util/config.rs | 14 +++++++++++++ 2 files changed, 47 insertions(+) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f927a669c23..79f4519673b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6406,6 +6406,32 @@ where }); let shared_secret = next_hop.shared_secret().secret_bytes(); + // Nodes shouldn't expect us to hold HTLCs for them if we don't advertise htlc_hold feature + // support. + // + // If we wanted to pretend to be a node that didn't understand the feature at all here, the + // correct behavior would've been to disconnect the sender when we first received the + // update_add message. However, this would make the `UserConfig::enable_htlc_hold` option + // unsafe -- if our node switched the config option from on to off just after the sender + // enqueued their update_add + CS, the sender would continue retransmitting those messages + // and we would keep disconnecting them until the HTLC timed out. + if update_add_htlc.hold_htlc.is_some() + && !BaseMessageHandler::provided_node_features(self).supports_htlc_hold() + { + let reason = LocalHTLCFailureReason::TemporaryNodeFailure; + let htlc_fail = self.htlc_failure_from_update_add_err( + &update_add_htlc, + &incoming_counterparty_node_id, + reason, + is_intro_node_blinded_forward, + &shared_secret, + ); + let failure_type = + get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); + htlc_fails.push((htlc_fail, failure_type, reason.into())); + continue; + } + // Process the HTLC on the incoming channel. match self.do_funded_channel_callback( incoming_scid, @@ -14792,6 +14818,13 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { features.set_anchor_zero_fee_commitments_optional(); } + // If we are configured to be an announced node, we are expected to be always-online and can + // advertise the htlc_hold feature. + #[cfg(test)] + if config.enable_htlc_hold { + features.set_htlc_hold_optional(); + } + features } diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index a0c74673799..f0c0330e14d 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -935,6 +935,18 @@ pub struct UserConfig { /// /// Default value: `false` pub enable_dual_funded_channels: bool, + /// LDK supports a feature for always-online nodes such that these nodes can hold onto an HTLC + /// from an often-offline channel peer until the often-offline payment recipient sends an onion + /// message telling the always-online node to release the HTLC. If this is set to `true`, our node + /// will carry out this feature for channel peers that request it. + /// + /// This should only be set to `true` for nodes which expect to be online reliably. + /// + /// Setting this to `true` may break backwards compatibility with LDK versions < 0.2. + /// + /// Default value: `false` + #[cfg(test)] + pub enable_htlc_hold: bool, } impl Default for UserConfig { @@ -949,6 +961,8 @@ impl Default for UserConfig { accept_intercept_htlcs: false, manually_handle_bolt12_invoices: false, enable_dual_funded_channels: false, + #[cfg(test)] + enable_htlc_hold: false, } } } From 1878efdbb0fb8a620f9ec970c89a8adaada352fa Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 19 Aug 2025 15:10:04 -0400 Subject: [PATCH 06/11] Add RevokeAndACK::release_htlc_message_paths As part of supporting sending payments as an often-offline sender, the sender needs to send held_htlc_available onion messages where the reply path terminates at their always-online channel counterparty that is holding the HTLC until the recipient comes online. That way when the recipient sends release_held_htlc, the sender's counterparty will receive that message. To accomplish this, the sender's always-online counterparty includes said reply path in the revoke_and_ack message corresponding to the held HTLC. Here we add support for this field, though we don't set it yet. We also had to tweak the ser macros for this because impl_writeable_msg had never had to write a Vec in a message TLV field before. --- lightning/src/ln/channel.rs | 1 + lightning/src/ln/functional_tests.rs | 1 + lightning/src/ln/htlc_reserve_unit_tests.rs | 2 ++ lightning/src/ln/msgs.rs | 14 +++++++++++++- lightning/src/util/ser_macros.rs | 4 ++-- 5 files changed, 19 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 2f7853e192b..10270ef900e 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -8980,6 +8980,7 @@ where next_per_commitment_point: self.holder_commitment_point.next_point(), #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }); } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 04aa9a75457..ab2d730ac6b 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -6544,6 +6544,7 @@ pub fn test_counterparty_raa_skip_no_crash() { next_per_commitment_point, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); assert_eq!( diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index 7f411a6fcb6..dc5d07c180e 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -936,6 +936,7 @@ pub fn do_test_fee_spike_buffer(cfg: Option, htlc_fails: bool) { next_per_commitment_point: next_local_point, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); expect_and_process_pending_htlcs(&nodes[1], false); @@ -2381,6 +2382,7 @@ pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { next_per_commitment_point: next_local_point, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 5540007e88d..b4034391564 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -31,6 +31,7 @@ use bitcoin::secp256k1::ecdsa::Signature; use bitcoin::secp256k1::PublicKey; use bitcoin::{secp256k1, Transaction, Witness}; +use crate::blinded_path::message::BlindedMessagePath; use crate::blinded_path::payment::{ BlindedPaymentTlvs, ForwardTlvs, ReceiveTlvs, UnauthenticatedReceiveTlvs, }; @@ -888,6 +889,13 @@ pub struct RevokeAndACK { #[cfg(taproot)] /// Musig nonce the recipient should use in their next commitment signature message pub next_local_nonce: Option, + /// A list of `(htlc_id, blinded_path)`. The receiver of this message will use the blinded paths + /// as reply paths to [`HeldHtlcAvailable`] onion messages that they send to the often-offline + /// receiver of this HTLC. The `htlc_id` is used by the receiver of this message to identify which + /// held HTLC a given blinded path corresponds to. + /// + /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable + pub release_htlc_message_paths: Vec<(u64, BlindedMessagePath)>, } /// An [`update_fee`] message to be sent to or received from a peer @@ -3178,7 +3186,9 @@ impl_writeable_msg!(RevokeAndACK, { channel_id, per_commitment_secret, next_per_commitment_point -}, {}); +}, { + (1, release_htlc_message_paths, optional_vec) +}); #[cfg(taproot)] impl_writeable_msg!(RevokeAndACK, { @@ -3186,6 +3196,7 @@ impl_writeable_msg!(RevokeAndACK, { per_commitment_secret, next_per_commitment_point }, { + (1, release_htlc_message_paths, optional_vec), (4, next_local_nonce, option) }); @@ -5830,6 +5841,7 @@ mod tests { next_per_commitment_point: pubkey_1, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; let encoded_value = raa.encode(); let target_value = >::from_hex("02020202020202020202020202020202020202020202020202020202020202020101010101010101010101010101010101010101010101010101010101010101031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f").unwrap(); diff --git a/lightning/src/util/ser_macros.rs b/lightning/src/util/ser_macros.rs index ea7a3e8a2a2..647e7c77a6c 100644 --- a/lightning/src/util/ser_macros.rs +++ b/lightning/src/util/ser_macros.rs @@ -700,7 +700,7 @@ macro_rules! impl_writeable_msg { impl $crate::util::ser::Writeable for $st { fn write(&self, w: &mut W) -> Result<(), $crate::io::Error> { $( self.$field.write(w)?; )* - $crate::encode_tlv_stream!(w, {$(($type, self.$tlvfield.as_ref(), $fieldty)),*}); + $crate::encode_tlv_stream!(w, {$(($type, &self.$tlvfield, $fieldty)),*}); Ok(()) } } @@ -713,7 +713,7 @@ macro_rules! impl_writeable_msg { $crate::decode_tlv_stream!(r, {$(($type, $tlvfield, $fieldty)),*}); Ok(Self { $($field,)* - $($tlvfield),* + $($tlvfield: $crate::_init_tlv_based_struct_field!($tlvfield, $fieldty)),* }) } } From 324312e83ed5afb3a59980ea82abd6a06c69cc50 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 3 Sep 2025 13:38:05 -0400 Subject: [PATCH 07/11] Extract helper for failing HTLC intercepts Makes the next commit cleaner. --- lightning/src/ln/channelmanager.rs | 33 +++++++++++++++--------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 79f4519673b..594fd51a956 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -10664,6 +10664,22 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ prev_user_channel_id, forward_info, }; + let mut fail_intercepted_htlc = || { + let htlc_source = + HTLCSource::PreviousHopData(pending_add.htlc_previous_hop_data()); + let reason = HTLCFailReason::from_failure_code( + LocalHTLCFailureReason::UnknownNextPeer, + ); + let failure_type = HTLCHandlingFailureType::InvalidForward { + requested_forward_scid: scid, + }; + failed_intercept_forwards.push(( + htlc_source, + payment_hash, + reason, + failure_type, + )); + }; match forward_htlcs.entry(scid) { hash_map::Entry::Occupied(mut entry) => { entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add)); @@ -10713,22 +10729,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid ); - let htlc_source = HTLCSource::PreviousHopData( - pending_add.htlc_previous_hop_data(), - ); - let reason = HTLCFailReason::from_failure_code( - LocalHTLCFailureReason::UnknownNextPeer, - ); - let failure_type = - HTLCHandlingFailureType::InvalidForward { - requested_forward_scid: scid, - }; - failed_intercept_forwards.push(( - htlc_source, - payment_hash, - reason, - failure_type, - )); + fail_intercepted_htlc(); }, } } else { From 380ac9602f5ecb959ff52e4e0131e96c35b03553 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Sat, 16 Aug 2025 00:53:41 -0400 Subject: [PATCH 08/11] Store held htlcs in pending_intercepted_htlcs As part of supporting sending payments as an often-offline sender, the sender's always-online channel counterparty needs to hold onto the sender's HTLC until they receive a release_held_htlc onion message from the often-offline recipient. Here we implement storing these held HTLCs in the existing ChannelManager::pending_intercepted_htlcs map. We want to move in the direction of obviating the need to persistence the ChannelManager entirely, so it doesn't really make sense to add a whole new map for these HTLCs. --- lightning/src/ln/channelmanager.rs | 111 +++++++++++++++++++++++++---- lightning/src/ln/onion_payment.rs | 1 + 2 files changed, 99 insertions(+), 13 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 594fd51a956..14789a81af9 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -229,6 +229,9 @@ pub enum PendingHTLCRouting { blinded: Option, /// The absolute CLTV of the inbound HTLC incoming_cltv_expiry: Option, + /// Whether this HTLC should be held by our node until we receive a corresponding + /// [`ReleaseHeldHtlc`] onion message. + hold_htlc: Option<()>, }, /// An HTLC which should be forwarded on to another Trampoline node. TrampolineForward { @@ -371,6 +374,15 @@ impl PendingHTLCRouting { Self::ReceiveKeysend { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry), } } + + /// Whether this HTLC should be held by our node until we receive a corresponding + /// [`ReleaseHeldHtlc`] onion message. + fn should_hold_htlc(&self) -> bool { + match self { + Self::Forward { hold_htlc: Some(()), .. } => true, + _ => false, + } + } } /// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it @@ -641,6 +653,34 @@ impl Readable for PaymentId { #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] pub struct InterceptId(pub [u8; 32]); +impl InterceptId { + /// This intercept id corresponds to an HTLC that will be forwarded on + /// [`ChannelManager::forward_intercepted_htlc`]. + fn from_incoming_shared_secret(ss: &[u8; 32]) -> Self { + Self(Sha256::hash(ss).to_byte_array()) + } + + /// This intercept id corresponds to an HTLC that will be forwarded on receipt of a + /// [`ReleaseHeldHtlc`] onion message. + fn from_htlc_id_and_chan_id( + htlc_id: u64, channel_id: &ChannelId, counterparty_node_id: &PublicKey, + ) -> Self { + let htlc_id_size = core::mem::size_of::(); + let chan_id_size = core::mem::size_of::(); + let cp_id_serialized = counterparty_node_id.serialize(); + + const RES_SIZE: usize = 8 + 32 + 33; + debug_assert_eq!(RES_SIZE, htlc_id_size + chan_id_size + cp_id_serialized.len()); + + let mut res = [0u8; RES_SIZE]; + res[..htlc_id_size].copy_from_slice(&htlc_id.to_be_bytes()); + res[htlc_id_size..htlc_id_size + chan_id_size].copy_from_slice(&channel_id.0); + res[htlc_id_size + chan_id_size..].copy_from_slice(&cp_id_serialized); + + Self(Sha256::hash(&res[..]).to_byte_array()) + } +} + impl Writeable for InterceptId { fn write(&self, w: &mut W) -> Result<(), io::Error> { self.0.write(w) @@ -2588,8 +2628,14 @@ pub struct ChannelManager< pub(super) forward_htlcs: Mutex>>, #[cfg(not(test))] forward_htlcs: Mutex>>, - /// Storage for HTLCs that have been intercepted and bubbled up to the user. We hold them here - /// until the user tells us what we should do with them. + /// Storage for HTLCs that have been intercepted. + /// + /// These HTLCs fall into two categories: + /// 1. HTLCs that are bubbled up to the user and held until the invocation of + /// [`ChannelManager::forward_intercepted_htlc`] or [`ChannelManager::fail_intercepted_htlc`] + /// (or timeout) + /// 2. HTLCs that are being held on behalf of an often-offline sender until receipt of a + /// [`ReleaseHeldHtlc`] onion message from an often-offline recipient /// /// See `ChannelManager` struct-level documentation for lock order requirements. pending_intercepted_htlcs: Mutex>, @@ -6268,13 +6314,18 @@ where })?; let routing = match payment.forward_info.routing { - PendingHTLCRouting::Forward { onion_packet, blinded, incoming_cltv_expiry, .. } => { - PendingHTLCRouting::Forward { - onion_packet, - blinded, - incoming_cltv_expiry, - short_channel_id: next_hop_scid, - } + PendingHTLCRouting::Forward { + onion_packet, + blinded, + incoming_cltv_expiry, + hold_htlc, + .. + } => PendingHTLCRouting::Forward { + onion_packet, + blinded, + incoming_cltv_expiry, + hold_htlc, + short_channel_id: next_hop_scid, }, _ => unreachable!(), // Only `PendingHTLCRouting::Forward`s are intercepted }; @@ -10682,7 +10733,25 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; match forward_htlcs.entry(scid) { hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add)); + if pending_add.forward_info.routing.should_hold_htlc() { + let intercept_id = InterceptId::from_htlc_id_and_chan_id( + prev_htlc_id, + &prev_channel_id, + &prev_counterparty_node_id, + ); + let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); + match held_htlcs.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + entry.insert(pending_add); + }, + hash_map::Entry::Occupied(_) => { + debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); + fail_intercepted_htlc(); + }, + } + } else { + entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add)); + } }, hash_map::Entry::Vacant(entry) => { if !is_our_scid @@ -10692,9 +10761,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ scid, &self.chain_hash, ) { - let intercept_id = InterceptId( - Sha256::hash(&pending_add.forward_info.incoming_shared_secret) - .to_byte_array(), + let intercept_id = InterceptId::from_incoming_shared_secret( + &pending_add.forward_info.incoming_shared_secret, ); let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); @@ -10732,6 +10800,22 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fail_intercepted_htlc(); }, } + } else if pending_add.forward_info.routing.should_hold_htlc() { + let intercept_id = InterceptId::from_htlc_id_and_chan_id( + prev_htlc_id, + &prev_channel_id, + &prev_counterparty_node_id, + ); + let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); + match held_htlcs.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + entry.insert(pending_add); + }, + hash_map::Entry::Occupied(_) => { + debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); + fail_intercepted_htlc(); + }, + } } else { entry.insert(vec![HTLCForwardInfo::AddHTLC(pending_add)]); } @@ -14850,6 +14934,7 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (1, blinded, option), (2, short_channel_id, required), (3, incoming_cltv_expiry, option), + (5, hold_htlc, option), }, (1, Receive) => { (0, payment_data, required), diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index acaa0304f29..0934c6c812b 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -190,6 +190,7 @@ pub(super) fn create_fwd_pending_htlc_info( onion_packet: outgoing_packet, short_channel_id, incoming_cltv_expiry: Some(msg.cltv_expiry), + hold_htlc: msg.hold_htlc, blinded: intro_node_blinding_point.or(msg.blinding_point) .map(|bp| BlindedForward { inbound_blinding_point: bp, From c575274ead1c41ef86f2e2a3be61eea62a6486ec Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 29 Aug 2025 23:40:30 -0400 Subject: [PATCH 09/11] Support creating reply_path for HeldHtlcAvailable As part of supporting sending payments as an often-offline sender, the sender needs to send held_htlc_available onion messages such that the reply path to the message terminates at their always-online channel counterparty that is holding the HTLC. That way when the recipient responds with release_held_htlc, the sender's counterparty will receive that message. Here we add a method for creating said reply path, which will be used in the next commit. --- lightning/src/blinded_path/message.rs | 18 +++++++++++-- lightning/src/ln/channelmanager.rs | 12 +++++++++ lightning/src/offers/flow.rs | 39 +++++++++++++++++++++++++-- 3 files changed, 65 insertions(+), 4 deletions(-) diff --git a/lightning/src/blinded_path/message.rs b/lightning/src/blinded_path/message.rs index 7d721cd1fdc..e291c83b66c 100644 --- a/lightning/src/blinded_path/message.rs +++ b/lightning/src/blinded_path/message.rs @@ -19,7 +19,7 @@ use crate::blinded_path::{BlindedHop, BlindedPath, Direction, IntroductionNode, use crate::crypto::streams::ChaChaPolyReadAdapter; use crate::io; use crate::io::Cursor; -use crate::ln::channelmanager::PaymentId; +use crate::ln::channelmanager::{InterceptId, PaymentId}; use crate::ln::msgs::DecodeError; use crate::ln::onion_utils; use crate::offers::nonce::Nonce; @@ -556,7 +556,7 @@ pub enum AsyncPaymentsContext { }, /// Context contained within the reply [`BlindedMessagePath`] we put in outbound /// [`HeldHtlcAvailable`] messages, provided back to us in corresponding [`ReleaseHeldHtlc`] - /// messages. + /// messages if we are an always-online sender paying an async recipient. /// /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc @@ -577,6 +577,17 @@ pub enum AsyncPaymentsContext { /// able to trivially ask if we're online forever. path_absolute_expiry: core::time::Duration, }, + /// Context contained within the reply [`BlindedMessagePath`] put in outbound + /// [`HeldHtlcAvailable`] messages, provided back to the async sender's always-online counterparty + /// in corresponding [`ReleaseHeldHtlc`] messages. + /// + /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + ReleaseHeldHtlc { + /// An identifier for the HTLC that should be released by us as the sender's always-online + /// channel counterparty to the often-offline recipient. + intercept_id: InterceptId, + }, } impl_writeable_tlv_based_enum!(MessageContext, @@ -632,6 +643,9 @@ impl_writeable_tlv_based_enum!(AsyncPaymentsContext, (2, invoice_slot, required), (4, path_absolute_expiry, required), }, + (6, ReleaseHeldHtlc) => { + (0, intercept_id, required), + }, ); /// Contains a simple nonce for use in a blinded path's context. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 14789a81af9..b59d54dfe2f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -5450,6 +5450,18 @@ where res } + /// If we are holding an HTLC on behalf of an often-offline sender, this method allows us to + /// create a path for the sender to use as the reply path when they send the recipient a + /// [`HeldHtlcAvailable`] onion message, so the recipient's [`ReleaseHeldHtlc`] response will be + /// received to our node. + fn path_for_release_held_htlc( + &self, htlc_id: u64, channel_id: &ChannelId, counterparty_node_id: &PublicKey, + ) -> BlindedMessagePath { + let intercept_id = + InterceptId::from_htlc_id_and_chan_id(htlc_id, channel_id, counterparty_node_id); + self.flow.path_for_release_held_htlc(intercept_id, &*self.entropy_source) + } + /// Signals that no further attempts for the given payment should occur. Useful if you have a /// pending outbound payment with retries remaining, but wish to stop retrying the payment before /// retries are exhausted. diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index 8890431123e..1541d0e01c7 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -32,7 +32,7 @@ use crate::prelude::*; use crate::chain::BestBlock; use crate::ln::channel_state::ChannelDetails; -use crate::ln::channelmanager::{PaymentId, CLTV_FAR_FAR_AWAY}; +use crate::ln::channelmanager::{InterceptId, PaymentId, CLTV_FAR_FAR_AWAY}; use crate::ln::inbound_payment; use crate::offers::async_receive_offer_cache::AsyncReceiveOfferCache; use crate::offers::invoice::{ @@ -52,7 +52,7 @@ use crate::onion_message::async_payments::{ StaticInvoicePersisted, }; use crate::onion_message::messenger::{ - Destination, MessageRouter, MessageSendInstructions, Responder, + Destination, MessageRouter, MessageSendInstructions, Responder, PADDED_PATH_LENGTH, }; use crate::onion_message::offers::OffersMessage; use crate::onion_message::packet::OnionMessageContents; @@ -1154,6 +1154,41 @@ where Ok(()) } + /// If we are holding an HTLC on behalf of an often-offline sender, this method allows us to + /// create a path for the sender to use as the reply path when they send the recipient a + /// [`HeldHtlcAvailable`] onion message, so the recipient's [`ReleaseHeldHtlc`] response will be + /// received to our node. + /// + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + pub fn path_for_release_held_htlc( + &self, intercept_id: InterceptId, entropy: ES, + ) -> BlindedMessagePath + where + ES::Target: EntropySource, + { + let peers = self.peers_cache.lock().unwrap().clone(); + let context = + MessageContext::AsyncPayments(AsyncPaymentsContext::ReleaseHeldHtlc { intercept_id }); + if let Ok(mut paths) = self.create_blinded_paths(peers, context.clone()) { + if let Some(path) = paths.pop() { + return path; + } + } + + // If the `MessageRouter` fails on blinded path creation, fall back to creating a 1-hop blinded + // path. + let num_dummy_hops = PADDED_PATH_LENGTH.saturating_sub(1); + BlindedMessagePath::new_with_dummy_hops( + &[], + self.get_our_node_id(), + num_dummy_hops, + self.receive_auth_key, + context, + &*entropy, + &self.secp_ctx, + ) + } + /// Enqueues the created [`DNSSECQuery`] to be sent to the counterparty. /// /// # Peers From 6c252dfc9baedce2a80e0fe749296778f6d7cf97 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Sat, 30 Aug 2025 00:33:20 -0400 Subject: [PATCH 10/11] Include release_held_htlc blinded paths in RAA As part of supporting sending payments as an often-offline sender, the sender needs to send held_htlc_available onion messages such that the reply path to the message terminates at their always-online channel counterparty that is holding the HTLC. That way when the recipient responds with release_held_htlc, the sender's counterparty will receive that message. Here the counterparty starts including said reply paths in the revoke_and_ack message destined for the sender, so the sender can use these paths in subsequent held_htlc_available messages. We put the paths in the RAA to ensure the sender receives the blinded paths, because failure to deliver the paths means the HTLC will timeout/fail. --- lightning/src/ln/channel.rs | 74 +++++++++++++++++++++++------- lightning/src/ln/channelmanager.rs | 22 +++++---- 2 files changed, 72 insertions(+), 24 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 10270ef900e..4f7a995f434 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -28,6 +28,7 @@ use bitcoin::{secp256k1, sighash, TxIn}; #[cfg(splicing)] use bitcoin::{FeeRate, Sequence}; +use crate::blinded_path::message::BlindedMessagePath; use crate::chain::chaininterface::{ fee_for_weight, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator, }; @@ -282,6 +283,29 @@ impl InboundHTLCState { _ => None, } } + + /// Whether we need to hold onto this HTLC until receipt of a corresponding [`ReleaseHeldHtlc`] + /// onion message. + /// + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + fn should_hold_htlc(&self) -> bool { + match self { + InboundHTLCState::RemoteAnnounced(res) + | InboundHTLCState::AwaitingRemoteRevokeToAnnounce(res) + | InboundHTLCState::AwaitingAnnouncedRemoteRevoke(res) => match res { + InboundHTLCResolution::Resolved { pending_htlc_status } => { + match pending_htlc_status { + PendingHTLCStatus::Forward(info) => info.routing.should_hold_htlc(), + _ => false, + } + }, + InboundHTLCResolution::Pending { update_add_htlc } => { + update_add_htlc.hold_htlc.is_some() + }, + }, + InboundHTLCState::Committed | InboundHTLCState::LocalRemoved(_) => false, + } + } } struct InboundHTLCOutput { @@ -1602,12 +1626,12 @@ where } #[rustfmt::skip] - pub fn signer_maybe_unblocked( - &mut self, chain_hash: ChainHash, logger: &L, - ) -> Option where L::Target: Logger { + pub fn signer_maybe_unblocked( + &mut self, chain_hash: ChainHash, logger: &L, path_for_release_htlc: CBP + ) -> Option where L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath { match &mut self.phase { ChannelPhase::Undefined => unreachable!(), - ChannelPhase::Funded(chan) => Some(chan.signer_maybe_unblocked(logger)), + ChannelPhase::Funded(chan) => Some(chan.signer_maybe_unblocked(logger, path_for_release_htlc)), ChannelPhase::UnfundedOutboundV1(chan) => { let (open_channel, funding_created) = chan.signer_maybe_unblocked(chain_hash, logger); Some(SignerResumeUpdates { @@ -8707,13 +8731,14 @@ where /// successfully and we should restore normal operation. Returns messages which should be sent /// to the remote side. #[rustfmt::skip] - pub fn monitor_updating_restored( + pub fn monitor_updating_restored( &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash, - user_config: &UserConfig, best_block_height: u32 + user_config: &UserConfig, best_block_height: u32, path_for_release_htlc: CBP ) -> MonitorRestoreUpdates where L::Target: Logger, - NS::Target: NodeSigner + NS::Target: NodeSigner, + CBP: Fn(u64) -> BlindedMessagePath { assert!(self.context.channel_state.is_monitor_update_in_progress()); self.context.channel_state.clear_monitor_update_in_progress(); @@ -8770,7 +8795,7 @@ where } let mut raa = if self.context.monitor_pending_revoke_and_ack { - self.get_last_revoke_and_ack(logger) + self.get_last_revoke_and_ack(path_for_release_htlc, logger) } else { None }; let mut commitment_update = if self.context.monitor_pending_commitment_signed { self.get_last_commitment_update_for_send(logger).ok() @@ -8859,7 +8884,9 @@ where /// Indicates that the signer may have some signatures for us, so we should retry if we're /// blocked. #[rustfmt::skip] - pub fn signer_maybe_unblocked(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger { + pub fn signer_maybe_unblocked( + &mut self, logger: &L, path_for_release_htlc: CBP + ) -> SignerResumeUpdates where L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath { if !self.holder_commitment_point.can_advance() { log_trace!(logger, "Attempting to update holder per-commitment point..."); self.holder_commitment_point.try_resolve_pending(&self.context.holder_signer, &self.context.secp_ctx, logger); @@ -8887,7 +8914,7 @@ where } else { None }; let mut revoke_and_ack = if self.context.signer_pending_revoke_and_ack { log_trace!(logger, "Attempting to generate pending revoke and ack..."); - self.get_last_revoke_and_ack(logger) + self.get_last_revoke_and_ack(path_for_release_htlc, logger) } else { None }; if self.context.resend_order == RAACommitmentOrder::CommitmentFirst @@ -8958,9 +8985,12 @@ where } } - fn get_last_revoke_and_ack(&mut self, logger: &L) -> Option + fn get_last_revoke_and_ack( + &mut self, path_for_release_htlc: CBP, logger: &L, + ) -> Option where L::Target: Logger, + CBP: Fn(u64) -> BlindedMessagePath, { debug_assert!( self.holder_commitment_point.next_transaction_number() <= INITIAL_COMMITMENT_NUMBER - 2 @@ -8973,6 +9003,14 @@ where .ok(); if let Some(per_commitment_secret) = per_commitment_secret { if self.holder_commitment_point.can_advance() { + let mut release_htlc_message_paths = Vec::new(); + for htlc in &self.context.pending_inbound_htlcs { + if htlc.state.should_hold_htlc() { + let path = path_for_release_htlc(htlc.htlc_id); + release_htlc_message_paths.push((htlc.htlc_id, path)); + } + } + self.context.signer_pending_revoke_and_ack = false; return Some(msgs::RevokeAndACK { channel_id: self.context.channel_id, @@ -8980,7 +9018,7 @@ where next_per_commitment_point: self.holder_commitment_point.next_point(), #[cfg(taproot)] next_local_nonce: None, - release_htlc_message_paths: Vec::new(), + release_htlc_message_paths, }); } } @@ -9128,13 +9166,15 @@ where /// May panic if some calls other than message-handling calls (which will all Err immediately) /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call. #[rustfmt::skip] - pub fn channel_reestablish( + pub fn channel_reestablish( &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS, - chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock + chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock, + path_for_release_htlc: CBP, ) -> Result where L::Target: Logger, - NS::Target: NodeSigner + NS::Target: NodeSigner, + CBP: Fn(u64) -> BlindedMessagePath { if !self.context.channel_state.is_peer_disconnected() { // While BOLT 2 doesn't indicate explicitly we should error this channel here, it @@ -9235,7 +9275,7 @@ where self.context.monitor_pending_revoke_and_ack = true; None } else { - self.get_last_revoke_and_ack(logger) + self.get_last_revoke_and_ack(path_for_release_htlc, logger) } } else { debug_assert!(false, "All values should have been handled in the four cases above"); @@ -16490,6 +16530,7 @@ mod tests { chain_hash, &config, 0, + |_| unreachable!() ); // Receive funding_signed, but the channel will be configured to hold sending channel_ready and @@ -16504,6 +16545,7 @@ mod tests { chain_hash, &config, 0, + |_| unreachable!() ); // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set, // as the funding transaction depends on all channels in the batch becoming ready. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b59d54dfe2f..8f6c882bcfb 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -377,7 +377,7 @@ impl PendingHTLCRouting { /// Whether this HTLC should be held by our node until we receive a corresponding /// [`ReleaseHeldHtlc`] onion message. - fn should_hold_htlc(&self) -> bool { + pub(super) fn should_hold_htlc(&self) -> bool { match self { Self::Forward { hold_htlc: Some(()), .. } => true, _ => false, @@ -3430,18 +3430,20 @@ macro_rules! emit_initial_channel_ready_event { /// set for this channel is empty! macro_rules! handle_monitor_update_completion { ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { + let channel_id = $chan.context.channel_id(); + let counterparty_node_id = $chan.context.get_counterparty_node_id(); #[cfg(debug_assertions)] { let in_flight_updates = - $peer_state.in_flight_monitor_updates.get(&$chan.context.channel_id()); + $peer_state.in_flight_monitor_updates.get(&channel_id); assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true)); assert_eq!($chan.blocked_monitor_updates_pending(), 0); } let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); let mut updates = $chan.monitor_updating_restored(&&logger, &$self.node_signer, $self.chain_hash, &*$self.config.read().unwrap(), - $self.best_block.read().unwrap().height); - let counterparty_node_id = $chan.context.get_counterparty_node_id(); + $self.best_block.read().unwrap().height, + |htlc_id| $self.path_for_release_held_htlc(htlc_id, &channel_id, &counterparty_node_id)); let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() { // We only send a channel_update in the case where we are just now sending a // channel_ready and the channel is in a usable state. We may re-send a @@ -3457,7 +3459,7 @@ macro_rules! handle_monitor_update_completion { } else { None }; let update_actions = $peer_state.monitor_update_blocked_actions - .remove(&$chan.context.channel_id()).unwrap_or(Vec::new()); + .remove(&channel_id).unwrap_or(Vec::new()); let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption( &mut $peer_state.pending_msg_events, $chan, updates.raa, @@ -3468,7 +3470,6 @@ macro_rules! handle_monitor_update_completion { $peer_state.pending_msg_events.push(upd); } - let channel_id = $chan.context.channel_id(); let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid(&$chan.funding); core::mem::drop($peer_state_lock); core::mem::drop($per_peer_state_lock); @@ -11145,6 +11146,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.chain_hash, &self.config.read().unwrap(), &*self.best_block.read().unwrap(), + |htlc_id| self.path_for_release_held_htlc(htlc_id, &msg.channel_id, counterparty_node_id) ); let responses = try_channel_entry!(self, peer_state, res, chan_entry); let mut channel_update = None; @@ -11608,9 +11610,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Returns whether we should remove this channel as it's just been closed. let unblock_chan = |chan: &mut Channel, pending_msg_events: &mut Vec| -> Option { + let channel_id = chan.context().channel_id(); let logger = WithChannelContext::from(&self.logger, &chan.context(), None); let node_id = chan.context().get_counterparty_node_id(); - if let Some(msgs) = chan.signer_maybe_unblocked(self.chain_hash, &&logger) { + if let Some(msgs) = chan.signer_maybe_unblocked( + self.chain_hash, &&logger, + |htlc_id| self.path_for_release_held_htlc(htlc_id, &channel_id, &node_id) + ) { if let Some(msg) = msgs.open_channel { pending_msg_events.push(MessageSendEvent::SendOpenChannel { node_id, @@ -11631,7 +11637,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } let cu_msg = msgs.commitment_update.map(|updates| MessageSendEvent::UpdateHTLCs { node_id, - channel_id: chan.context().channel_id(), + channel_id, updates, }); let raa_msg = msgs.revoke_and_ack.map(|msg| MessageSendEvent::SendRevokeAndACK { From aeaa42dfcaf05aef54aed56bc2613c6528f1c22f Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 19 Aug 2025 17:27:53 -0400 Subject: [PATCH 11/11] Release held htlcs on release_held_htlc As part of supporting sending payments as an often-offline sender, the sender's always-online channel counterparty needs to hold onto the sender's HTLC until they receive a release_held_htlc onion message from the often-offline recipient. Here we implement forwarding these held HTLCs upon receipt of the release message from the recipient. --- lightning/src/ln/channelmanager.rs | 52 +++++++++++++++++++++++------- 1 file changed, 41 insertions(+), 11 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 8f6c882bcfb..4c84ef99a27 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -14726,18 +14726,48 @@ where } fn handle_release_held_htlc(&self, _message: ReleaseHeldHtlc, context: AsyncPaymentsContext) { - let payment_id = match context { - AsyncPaymentsContext::OutboundPayment { payment_id } => payment_id, + match context { + AsyncPaymentsContext::OutboundPayment { payment_id } => { + if let Err(e) = self.send_payment_for_static_invoice(payment_id) { + log_trace!( + self.logger, + "Failed to release held HTLC with payment id {}: {:?}", + payment_id, + e + ); + } + }, + AsyncPaymentsContext::ReleaseHeldHtlc { intercept_id } => { + let mut htlc = { + let mut pending_intercept_htlcs = + self.pending_intercepted_htlcs.lock().unwrap(); + match pending_intercept_htlcs.remove(&intercept_id) { + Some(htlc) => htlc, + None => return, + } + }; + match htlc.forward_info.routing { + PendingHTLCRouting::Forward { ref mut hold_htlc, .. } => { + debug_assert!(hold_htlc.is_some()); + *hold_htlc = None; + }, + _ => { + debug_assert!(false, "HTLC intercepts can only be forwards"); + return; + }, + } + let mut per_source_pending_forward = [( + htlc.prev_short_channel_id, + htlc.prev_counterparty_node_id, + htlc.prev_funding_outpoint, + htlc.prev_channel_id, + htlc.prev_user_channel_id, + vec![(htlc.forward_info, htlc.prev_htlc_id)], + )]; + self.forward_htlcs(&mut per_source_pending_forward); + PersistenceNotifierGuard::notify_on_drop(self); + }, _ => return, - }; - - if let Err(e) = self.send_payment_for_static_invoice(payment_id) { - log_trace!( - self.logger, - "Failed to release held HTLC with payment id {}: {:?}", - payment_id, - e - ); } }