diff --git a/lightning-types/src/features.rs b/lightning-types/src/features.rs index aca4bb6e5a9..521aefc2184 100644 --- a/lightning-types/src/features.rs +++ b/lightning-types/src/features.rs @@ -80,6 +80,8 @@ //! (see [BOLT-2](https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#channel-quiescence) for more information). //! - `ZeroFeeCommitments` - A channel type which always uses zero transaction fee on commitment transactions. //! (see [BOLT PR #1228](https://github.com/lightning/bolts/pull/1228) for more info). +//! - `HtlcHold` - requires/supports holding HTLCs and forwarding on receipt of an onion message +//! (see [BOLT-2](https://github.com/lightning/bolts/pull/989/files) for more information). //! //! LDK knows about the following features, but does not support them: //! - `AnchorsNonzeroFeeHtlcTx` - the initial version of anchor outputs, which was later found to be @@ -161,7 +163,7 @@ mod sealed { // Byte 5 ProvideStorage | ChannelType | SCIDPrivacy | AnchorZeroFeeCommitments, // Byte 6 - ZeroConf, + ZeroConf | HtlcHold, // Byte 7 Trampoline | SimpleClose, ] @@ -182,7 +184,7 @@ mod sealed { // Byte 5 ProvideStorage | ChannelType | SCIDPrivacy | AnchorZeroFeeCommitments, // Byte 6 - ZeroConf | Keysend, + ZeroConf | HtlcHold | Keysend, // Byte 7 Trampoline | SimpleClose, // Byte 8 - 31 @@ -640,6 +642,17 @@ mod sealed { define_feature!(51, ZeroConf, [InitContext, NodeContext, ChannelTypeContext], "Feature flags for accepting channels with zero confirmations. Called `option_zeroconf` in the BOLTs", set_zero_conf_optional, set_zero_conf_required, supports_zero_conf, requires_zero_conf); + define_feature!( + 53, + HtlcHold, + [InitContext, NodeContext], + "Feature flags for holding HTLCs and forwarding on receipt of an onion message", + set_htlc_hold_optional, + set_htlc_hold_required, + clear_htlc_hold, + supports_htlc_hold, + requires_htlc_hold + ); define_feature!( 55, Keysend, diff --git a/lightning/src/blinded_path/message.rs b/lightning/src/blinded_path/message.rs index 7d721cd1fdc..e291c83b66c 100644 --- a/lightning/src/blinded_path/message.rs +++ b/lightning/src/blinded_path/message.rs @@ -19,7 +19,7 @@ use crate::blinded_path::{BlindedHop, BlindedPath, Direction, IntroductionNode, use crate::crypto::streams::ChaChaPolyReadAdapter; use crate::io; use crate::io::Cursor; -use crate::ln::channelmanager::PaymentId; +use crate::ln::channelmanager::{InterceptId, PaymentId}; use crate::ln::msgs::DecodeError; use crate::ln::onion_utils; use crate::offers::nonce::Nonce; @@ -556,7 +556,7 @@ pub enum AsyncPaymentsContext { }, /// Context contained within the reply [`BlindedMessagePath`] we put in outbound /// [`HeldHtlcAvailable`] messages, provided back to us in corresponding [`ReleaseHeldHtlc`] - /// messages. + /// messages if we are an always-online sender paying an async recipient. /// /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc @@ -577,6 +577,17 @@ pub enum AsyncPaymentsContext { /// able to trivially ask if we're online forever. path_absolute_expiry: core::time::Duration, }, + /// Context contained within the reply [`BlindedMessagePath`] put in outbound + /// [`HeldHtlcAvailable`] messages, provided back to the async sender's always-online counterparty + /// in corresponding [`ReleaseHeldHtlc`] messages. + /// + /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + ReleaseHeldHtlc { + /// An identifier for the HTLC that should be released by us as the sender's always-online + /// channel counterparty to the often-offline recipient. + intercept_id: InterceptId, + }, } impl_writeable_tlv_based_enum!(MessageContext, @@ -632,6 +643,9 @@ impl_writeable_tlv_based_enum!(AsyncPaymentsContext, (2, invoice_slot, required), (4, path_absolute_expiry, required), }, + (6, ReleaseHeldHtlc) => { + (0, intercept_id, required), + }, ); /// Contains a simple nonce for use in a blinded path's context. diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index a8e7af23984..25fa5e71e5d 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -1522,6 +1522,7 @@ fn update_add_msg( onion_routing_packet, skimmed_fee_msat: None, blinding_point, + hold_htlc: None, } } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 198569bd77e..4f7a995f434 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -28,6 +28,7 @@ use bitcoin::{secp256k1, sighash, TxIn}; #[cfg(splicing)] use bitcoin::{FeeRate, Sequence}; +use crate::blinded_path::message::BlindedMessagePath; use crate::chain::chaininterface::{ fee_for_weight, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator, }; @@ -282,6 +283,29 @@ impl InboundHTLCState { _ => None, } } + + /// Whether we need to hold onto this HTLC until receipt of a corresponding [`ReleaseHeldHtlc`] + /// onion message. + /// + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + fn should_hold_htlc(&self) -> bool { + match self { + InboundHTLCState::RemoteAnnounced(res) + | InboundHTLCState::AwaitingRemoteRevokeToAnnounce(res) + | InboundHTLCState::AwaitingAnnouncedRemoteRevoke(res) => match res { + InboundHTLCResolution::Resolved { pending_htlc_status } => { + match pending_htlc_status { + PendingHTLCStatus::Forward(info) => info.routing.should_hold_htlc(), + _ => false, + } + }, + InboundHTLCResolution::Pending { update_add_htlc } => { + update_add_htlc.hold_htlc.is_some() + }, + }, + InboundHTLCState::Committed | InboundHTLCState::LocalRemoved(_) => false, + } + } } struct InboundHTLCOutput { @@ -1602,12 +1626,12 @@ where } #[rustfmt::skip] - pub fn signer_maybe_unblocked( - &mut self, chain_hash: ChainHash, logger: &L, - ) -> Option where L::Target: Logger { + pub fn signer_maybe_unblocked( + &mut self, chain_hash: ChainHash, logger: &L, path_for_release_htlc: CBP + ) -> Option where L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath { match &mut self.phase { ChannelPhase::Undefined => unreachable!(), - ChannelPhase::Funded(chan) => Some(chan.signer_maybe_unblocked(logger)), + ChannelPhase::Funded(chan) => Some(chan.signer_maybe_unblocked(logger, path_for_release_htlc)), ChannelPhase::UnfundedOutboundV1(chan) => { let (open_channel, funding_created) = chan.signer_maybe_unblocked(chain_hash, logger); Some(SignerResumeUpdates { @@ -8707,13 +8731,14 @@ where /// successfully and we should restore normal operation. Returns messages which should be sent /// to the remote side. #[rustfmt::skip] - pub fn monitor_updating_restored( + pub fn monitor_updating_restored( &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash, - user_config: &UserConfig, best_block_height: u32 + user_config: &UserConfig, best_block_height: u32, path_for_release_htlc: CBP ) -> MonitorRestoreUpdates where L::Target: Logger, - NS::Target: NodeSigner + NS::Target: NodeSigner, + CBP: Fn(u64) -> BlindedMessagePath { assert!(self.context.channel_state.is_monitor_update_in_progress()); self.context.channel_state.clear_monitor_update_in_progress(); @@ -8770,7 +8795,7 @@ where } let mut raa = if self.context.monitor_pending_revoke_and_ack { - self.get_last_revoke_and_ack(logger) + self.get_last_revoke_and_ack(path_for_release_htlc, logger) } else { None }; let mut commitment_update = if self.context.monitor_pending_commitment_signed { self.get_last_commitment_update_for_send(logger).ok() @@ -8859,7 +8884,9 @@ where /// Indicates that the signer may have some signatures for us, so we should retry if we're /// blocked. #[rustfmt::skip] - pub fn signer_maybe_unblocked(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger { + pub fn signer_maybe_unblocked( + &mut self, logger: &L, path_for_release_htlc: CBP + ) -> SignerResumeUpdates where L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath { if !self.holder_commitment_point.can_advance() { log_trace!(logger, "Attempting to update holder per-commitment point..."); self.holder_commitment_point.try_resolve_pending(&self.context.holder_signer, &self.context.secp_ctx, logger); @@ -8887,7 +8914,7 @@ where } else { None }; let mut revoke_and_ack = if self.context.signer_pending_revoke_and_ack { log_trace!(logger, "Attempting to generate pending revoke and ack..."); - self.get_last_revoke_and_ack(logger) + self.get_last_revoke_and_ack(path_for_release_htlc, logger) } else { None }; if self.context.resend_order == RAACommitmentOrder::CommitmentFirst @@ -8958,9 +8985,12 @@ where } } - fn get_last_revoke_and_ack(&mut self, logger: &L) -> Option + fn get_last_revoke_and_ack( + &mut self, path_for_release_htlc: CBP, logger: &L, + ) -> Option where L::Target: Logger, + CBP: Fn(u64) -> BlindedMessagePath, { debug_assert!( self.holder_commitment_point.next_transaction_number() <= INITIAL_COMMITMENT_NUMBER - 2 @@ -8973,6 +9003,14 @@ where .ok(); if let Some(per_commitment_secret) = per_commitment_secret { if self.holder_commitment_point.can_advance() { + let mut release_htlc_message_paths = Vec::new(); + for htlc in &self.context.pending_inbound_htlcs { + if htlc.state.should_hold_htlc() { + let path = path_for_release_htlc(htlc.htlc_id); + release_htlc_message_paths.push((htlc.htlc_id, path)); + } + } + self.context.signer_pending_revoke_and_ack = false; return Some(msgs::RevokeAndACK { channel_id: self.context.channel_id, @@ -8980,6 +9018,7 @@ where next_per_commitment_point: self.holder_commitment_point.next_point(), #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths, }); } } @@ -9027,6 +9066,7 @@ where onion_routing_packet: (**onion_packet).clone(), skimmed_fee_msat: htlc.skimmed_fee_msat, blinding_point: htlc.blinding_point, + hold_htlc: None, // Will be set by the async sender when support is added }); } } @@ -9126,13 +9166,15 @@ where /// May panic if some calls other than message-handling calls (which will all Err immediately) /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call. #[rustfmt::skip] - pub fn channel_reestablish( + pub fn channel_reestablish( &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS, - chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock + chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock, + path_for_release_htlc: CBP, ) -> Result where L::Target: Logger, - NS::Target: NodeSigner + NS::Target: NodeSigner, + CBP: Fn(u64) -> BlindedMessagePath { if !self.context.channel_state.is_peer_disconnected() { // While BOLT 2 doesn't indicate explicitly we should error this channel here, it @@ -9233,7 +9275,7 @@ where self.context.monitor_pending_revoke_and_ack = true; None } else { - self.get_last_revoke_and_ack(logger) + self.get_last_revoke_and_ack(path_for_release_htlc, logger) } } else { debug_assert!(false, "All values should have been handled in the four cases above"); @@ -16488,6 +16530,7 @@ mod tests { chain_hash, &config, 0, + |_| unreachable!() ); // Receive funding_signed, but the channel will be configured to hold sending channel_ready and @@ -16502,6 +16545,7 @@ mod tests { chain_hash, &config, 0, + |_| unreachable!() ); // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set, // as the funding transaction depends on all channels in the batch becoming ready. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 0397116f96d..4c84ef99a27 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -229,6 +229,9 @@ pub enum PendingHTLCRouting { blinded: Option, /// The absolute CLTV of the inbound HTLC incoming_cltv_expiry: Option, + /// Whether this HTLC should be held by our node until we receive a corresponding + /// [`ReleaseHeldHtlc`] onion message. + hold_htlc: Option<()>, }, /// An HTLC which should be forwarded on to another Trampoline node. TrampolineForward { @@ -371,6 +374,15 @@ impl PendingHTLCRouting { Self::ReceiveKeysend { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry), } } + + /// Whether this HTLC should be held by our node until we receive a corresponding + /// [`ReleaseHeldHtlc`] onion message. + pub(super) fn should_hold_htlc(&self) -> bool { + match self { + Self::Forward { hold_htlc: Some(()), .. } => true, + _ => false, + } + } } /// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it @@ -641,6 +653,34 @@ impl Readable for PaymentId { #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] pub struct InterceptId(pub [u8; 32]); +impl InterceptId { + /// This intercept id corresponds to an HTLC that will be forwarded on + /// [`ChannelManager::forward_intercepted_htlc`]. + fn from_incoming_shared_secret(ss: &[u8; 32]) -> Self { + Self(Sha256::hash(ss).to_byte_array()) + } + + /// This intercept id corresponds to an HTLC that will be forwarded on receipt of a + /// [`ReleaseHeldHtlc`] onion message. + fn from_htlc_id_and_chan_id( + htlc_id: u64, channel_id: &ChannelId, counterparty_node_id: &PublicKey, + ) -> Self { + let htlc_id_size = core::mem::size_of::(); + let chan_id_size = core::mem::size_of::(); + let cp_id_serialized = counterparty_node_id.serialize(); + + const RES_SIZE: usize = 8 + 32 + 33; + debug_assert_eq!(RES_SIZE, htlc_id_size + chan_id_size + cp_id_serialized.len()); + + let mut res = [0u8; RES_SIZE]; + res[..htlc_id_size].copy_from_slice(&htlc_id.to_be_bytes()); + res[htlc_id_size..htlc_id_size + chan_id_size].copy_from_slice(&channel_id.0); + res[htlc_id_size + chan_id_size..].copy_from_slice(&cp_id_serialized); + + Self(Sha256::hash(&res[..]).to_byte_array()) + } +} + impl Writeable for InterceptId { fn write(&self, w: &mut W) -> Result<(), io::Error> { self.0.write(w) @@ -2588,8 +2628,14 @@ pub struct ChannelManager< pub(super) forward_htlcs: Mutex>>, #[cfg(not(test))] forward_htlcs: Mutex>>, - /// Storage for HTLCs that have been intercepted and bubbled up to the user. We hold them here - /// until the user tells us what we should do with them. + /// Storage for HTLCs that have been intercepted. + /// + /// These HTLCs fall into two categories: + /// 1. HTLCs that are bubbled up to the user and held until the invocation of + /// [`ChannelManager::forward_intercepted_htlc`] or [`ChannelManager::fail_intercepted_htlc`] + /// (or timeout) + /// 2. HTLCs that are being held on behalf of an often-offline sender until receipt of a + /// [`ReleaseHeldHtlc`] onion message from an often-offline recipient /// /// See `ChannelManager` struct-level documentation for lock order requirements. pending_intercepted_htlcs: Mutex>, @@ -3384,18 +3430,20 @@ macro_rules! emit_initial_channel_ready_event { /// set for this channel is empty! macro_rules! handle_monitor_update_completion { ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { + let channel_id = $chan.context.channel_id(); + let counterparty_node_id = $chan.context.get_counterparty_node_id(); #[cfg(debug_assertions)] { let in_flight_updates = - $peer_state.in_flight_monitor_updates.get(&$chan.context.channel_id()); + $peer_state.in_flight_monitor_updates.get(&channel_id); assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true)); assert_eq!($chan.blocked_monitor_updates_pending(), 0); } let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); let mut updates = $chan.monitor_updating_restored(&&logger, &$self.node_signer, $self.chain_hash, &*$self.config.read().unwrap(), - $self.best_block.read().unwrap().height); - let counterparty_node_id = $chan.context.get_counterparty_node_id(); + $self.best_block.read().unwrap().height, + |htlc_id| $self.path_for_release_held_htlc(htlc_id, &channel_id, &counterparty_node_id)); let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() { // We only send a channel_update in the case where we are just now sending a // channel_ready and the channel is in a usable state. We may re-send a @@ -3411,7 +3459,7 @@ macro_rules! handle_monitor_update_completion { } else { None }; let update_actions = $peer_state.monitor_update_blocked_actions - .remove(&$chan.context.channel_id()).unwrap_or(Vec::new()); + .remove(&channel_id).unwrap_or(Vec::new()); let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption( &mut $peer_state.pending_msg_events, $chan, updates.raa, @@ -3422,7 +3470,6 @@ macro_rules! handle_monitor_update_completion { $peer_state.pending_msg_events.push(upd); } - let channel_id = $chan.context.channel_id(); let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid(&$chan.funding); core::mem::drop($peer_state_lock); core::mem::drop($per_peer_state_lock); @@ -5404,6 +5451,18 @@ where res } + /// If we are holding an HTLC on behalf of an often-offline sender, this method allows us to + /// create a path for the sender to use as the reply path when they send the recipient a + /// [`HeldHtlcAvailable`] onion message, so the recipient's [`ReleaseHeldHtlc`] response will be + /// received to our node. + fn path_for_release_held_htlc( + &self, htlc_id: u64, channel_id: &ChannelId, counterparty_node_id: &PublicKey, + ) -> BlindedMessagePath { + let intercept_id = + InterceptId::from_htlc_id_and_chan_id(htlc_id, channel_id, counterparty_node_id); + self.flow.path_for_release_held_htlc(intercept_id, &*self.entropy_source) + } + /// Signals that no further attempts for the given payment should occur. Useful if you have a /// pending outbound payment with retries remaining, but wish to stop retrying the payment before /// retries are exhausted. @@ -6268,13 +6327,18 @@ where })?; let routing = match payment.forward_info.routing { - PendingHTLCRouting::Forward { onion_packet, blinded, incoming_cltv_expiry, .. } => { - PendingHTLCRouting::Forward { - onion_packet, - blinded, - incoming_cltv_expiry, - short_channel_id: next_hop_scid, - } + PendingHTLCRouting::Forward { + onion_packet, + blinded, + incoming_cltv_expiry, + hold_htlc, + .. + } => PendingHTLCRouting::Forward { + onion_packet, + blinded, + incoming_cltv_expiry, + hold_htlc, + short_channel_id: next_hop_scid, }, _ => unreachable!(), // Only `PendingHTLCRouting::Forward`s are intercepted }; @@ -6406,6 +6470,32 @@ where }); let shared_secret = next_hop.shared_secret().secret_bytes(); + // Nodes shouldn't expect us to hold HTLCs for them if we don't advertise htlc_hold feature + // support. + // + // If we wanted to pretend to be a node that didn't understand the feature at all here, the + // correct behavior would've been to disconnect the sender when we first received the + // update_add message. However, this would make the `UserConfig::enable_htlc_hold` option + // unsafe -- if our node switched the config option from on to off just after the sender + // enqueued their update_add + CS, the sender would continue retransmitting those messages + // and we would keep disconnecting them until the HTLC timed out. + if update_add_htlc.hold_htlc.is_some() + && !BaseMessageHandler::provided_node_features(self).supports_htlc_hold() + { + let reason = LocalHTLCFailureReason::TemporaryNodeFailure; + let htlc_fail = self.htlc_failure_from_update_add_err( + &update_add_htlc, + &incoming_counterparty_node_id, + reason, + is_intro_node_blinded_forward, + &shared_secret, + ); + let failure_type = + get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); + htlc_fails.push((htlc_fail, failure_type, reason.into())); + continue; + } + // Process the HTLC on the incoming channel. match self.do_funded_channel_callback( incoming_scid, @@ -10638,9 +10728,43 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ prev_user_channel_id, forward_info, }; + let mut fail_intercepted_htlc = || { + let htlc_source = + HTLCSource::PreviousHopData(pending_add.htlc_previous_hop_data()); + let reason = HTLCFailReason::from_failure_code( + LocalHTLCFailureReason::UnknownNextPeer, + ); + let failure_type = HTLCHandlingFailureType::InvalidForward { + requested_forward_scid: scid, + }; + failed_intercept_forwards.push(( + htlc_source, + payment_hash, + reason, + failure_type, + )); + }; match forward_htlcs.entry(scid) { hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add)); + if pending_add.forward_info.routing.should_hold_htlc() { + let intercept_id = InterceptId::from_htlc_id_and_chan_id( + prev_htlc_id, + &prev_channel_id, + &prev_counterparty_node_id, + ); + let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); + match held_htlcs.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + entry.insert(pending_add); + }, + hash_map::Entry::Occupied(_) => { + debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); + fail_intercepted_htlc(); + }, + } + } else { + entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add)); + } }, hash_map::Entry::Vacant(entry) => { if !is_our_scid @@ -10650,9 +10774,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ scid, &self.chain_hash, ) { - let intercept_id = InterceptId( - Sha256::hash(&pending_add.forward_info.incoming_shared_secret) - .to_byte_array(), + let intercept_id = InterceptId::from_incoming_shared_secret( + &pending_add.forward_info.incoming_shared_secret, ); let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); @@ -10687,22 +10810,23 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid ); - let htlc_source = HTLCSource::PreviousHopData( - pending_add.htlc_previous_hop_data(), - ); - let reason = HTLCFailReason::from_failure_code( - LocalHTLCFailureReason::UnknownNextPeer, - ); - let failure_type = - HTLCHandlingFailureType::InvalidForward { - requested_forward_scid: scid, - }; - failed_intercept_forwards.push(( - htlc_source, - payment_hash, - reason, - failure_type, - )); + fail_intercepted_htlc(); + }, + } + } else if pending_add.forward_info.routing.should_hold_htlc() { + let intercept_id = InterceptId::from_htlc_id_and_chan_id( + prev_htlc_id, + &prev_channel_id, + &prev_counterparty_node_id, + ); + let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); + match held_htlcs.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + entry.insert(pending_add); + }, + hash_map::Entry::Occupied(_) => { + debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); + fail_intercepted_htlc(); }, } } else { @@ -11022,6 +11146,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.chain_hash, &self.config.read().unwrap(), &*self.best_block.read().unwrap(), + |htlc_id| self.path_for_release_held_htlc(htlc_id, &msg.channel_id, counterparty_node_id) ); let responses = try_channel_entry!(self, peer_state, res, chan_entry); let mut channel_update = None; @@ -11485,9 +11610,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Returns whether we should remove this channel as it's just been closed. let unblock_chan = |chan: &mut Channel, pending_msg_events: &mut Vec| -> Option { + let channel_id = chan.context().channel_id(); let logger = WithChannelContext::from(&self.logger, &chan.context(), None); let node_id = chan.context().get_counterparty_node_id(); - if let Some(msgs) = chan.signer_maybe_unblocked(self.chain_hash, &&logger) { + if let Some(msgs) = chan.signer_maybe_unblocked( + self.chain_hash, &&logger, + |htlc_id| self.path_for_release_held_htlc(htlc_id, &channel_id, &node_id) + ) { if let Some(msg) = msgs.open_channel { pending_msg_events.push(MessageSendEvent::SendOpenChannel { node_id, @@ -11508,7 +11637,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } let cu_msg = msgs.commitment_update.map(|updates| MessageSendEvent::UpdateHTLCs { node_id, - channel_id: chan.context().channel_id(), + channel_id, updates, }); let raa_msg = msgs.revoke_and_ack.map(|msg| MessageSendEvent::SendRevokeAndACK { @@ -12983,6 +13112,8 @@ where for (err, counterparty_node_id) in failed_channels.drain(..) { let _ = handle_error!(self, err, counterparty_node_id); } + + let _ = self.flow.peer_disconnected(counterparty_node_id); } #[rustfmt::skip] @@ -13092,6 +13223,7 @@ where // until we have some peer connection(s) to receive onion messages over, so as a minor optimization // refresh the cache when a peer connects. self.check_refresh_async_receive_offer_cache(false); + let _ = self.flow.peer_connected(counterparty_node_id, &init_msg.features); res } @@ -14594,18 +14726,48 @@ where } fn handle_release_held_htlc(&self, _message: ReleaseHeldHtlc, context: AsyncPaymentsContext) { - let payment_id = match context { - AsyncPaymentsContext::OutboundPayment { payment_id } => payment_id, + match context { + AsyncPaymentsContext::OutboundPayment { payment_id } => { + if let Err(e) = self.send_payment_for_static_invoice(payment_id) { + log_trace!( + self.logger, + "Failed to release held HTLC with payment id {}: {:?}", + payment_id, + e + ); + } + }, + AsyncPaymentsContext::ReleaseHeldHtlc { intercept_id } => { + let mut htlc = { + let mut pending_intercept_htlcs = + self.pending_intercepted_htlcs.lock().unwrap(); + match pending_intercept_htlcs.remove(&intercept_id) { + Some(htlc) => htlc, + None => return, + } + }; + match htlc.forward_info.routing { + PendingHTLCRouting::Forward { ref mut hold_htlc, .. } => { + debug_assert!(hold_htlc.is_some()); + *hold_htlc = None; + }, + _ => { + debug_assert!(false, "HTLC intercepts can only be forwards"); + return; + }, + } + let mut per_source_pending_forward = [( + htlc.prev_short_channel_id, + htlc.prev_counterparty_node_id, + htlc.prev_funding_outpoint, + htlc.prev_channel_id, + htlc.prev_user_channel_id, + vec![(htlc.forward_info, htlc.prev_htlc_id)], + )]; + self.forward_htlcs(&mut per_source_pending_forward); + PersistenceNotifierGuard::notify_on_drop(self); + }, _ => return, - }; - - if let Err(e) = self.send_payment_for_static_invoice(payment_id) { - log_trace!( - self.logger, - "Failed to release held HTLC with payment id {}: {:?}", - payment_id, - e - ); } } @@ -14789,6 +14951,13 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { features.set_anchor_zero_fee_commitments_optional(); } + // If we are configured to be an announced node, we are expected to be always-online and can + // advertise the htlc_hold feature. + #[cfg(test)] + if config.enable_htlc_hold { + features.set_htlc_hold_optional(); + } + features } @@ -14813,6 +14982,7 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (1, blinded, option), (2, short_channel_id, required), (3, incoming_cltv_expiry, option), + (5, hold_htlc, option), }, (1, Receive) => { (0, payment_data, required), diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 5e78049664c..ab2d730ac6b 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2288,6 +2288,7 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[0].node.handle_update_add_htlc(node_b_id, &update_add_htlc); } @@ -6543,6 +6544,7 @@ pub fn test_counterparty_raa_skip_no_crash() { next_per_commitment_point, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); assert_eq!( diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index f90b8b880bc..dc5d07c180e 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -835,6 +835,7 @@ pub fn do_test_fee_spike_buffer(cfg: Option, htlc_fails: bool) { onion_routing_packet: onion_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); @@ -935,6 +936,7 @@ pub fn do_test_fee_spike_buffer(cfg: Option, htlc_fails: bool) { next_per_commitment_point: next_local_point, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); expect_and_process_pending_htlcs(&nodes[1], false); @@ -1072,6 +1074,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { onion_routing_packet: onion_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[0].node.handle_update_add_htlc(node_b_id, &msg); @@ -1255,6 +1258,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { onion_routing_packet: onion_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); @@ -1637,6 +1641,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { onion_routing_packet: onion_packet.clone(), skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; for i in 0..50 { @@ -2242,6 +2247,7 @@ pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); @@ -2376,6 +2382,7 @@ pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { next_per_commitment_point: next_local_point, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 6d025293b9e..b4034391564 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -31,6 +31,7 @@ use bitcoin::secp256k1::ecdsa::Signature; use bitcoin::secp256k1::PublicKey; use bitcoin::{secp256k1, Transaction, Witness}; +use crate::blinded_path::message::BlindedMessagePath; use crate::blinded_path::payment::{ BlindedPaymentTlvs, ForwardTlvs, ReceiveTlvs, UnauthenticatedReceiveTlvs, }; @@ -765,6 +766,11 @@ pub struct UpdateAddHTLC { /// Provided if we are relaying or receiving a payment within a blinded path, to decrypt the onion /// routing packet and the recipient-provided encrypted payload within. pub blinding_point: Option, + /// Set to `Some` if the sender wants the receiver of this message to hold onto this HTLC until + /// receipt of a [`ReleaseHeldHtlc`] onion message from the payment recipient. + /// + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + pub hold_htlc: Option<()>, } /// An onion message to be sent to or received from a peer. @@ -883,6 +889,13 @@ pub struct RevokeAndACK { #[cfg(taproot)] /// Musig nonce the recipient should use in their next commitment signature message pub next_local_nonce: Option, + /// A list of `(htlc_id, blinded_path)`. The receiver of this message will use the blinded paths + /// as reply paths to [`HeldHtlcAvailable`] onion messages that they send to the often-offline + /// receiver of this HTLC. The `htlc_id` is used by the receiver of this message to identify which + /// held HTLC a given blinded path corresponds to. + /// + /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable + pub release_htlc_message_paths: Vec<(u64, BlindedMessagePath)>, } /// An [`update_fee`] message to be sent to or received from a peer @@ -3173,7 +3186,9 @@ impl_writeable_msg!(RevokeAndACK, { channel_id, per_commitment_secret, next_per_commitment_point -}, {}); +}, { + (1, release_htlc_message_paths, optional_vec) +}); #[cfg(taproot)] impl_writeable_msg!(RevokeAndACK, { @@ -3181,6 +3196,7 @@ impl_writeable_msg!(RevokeAndACK, { per_commitment_secret, next_per_commitment_point }, { + (1, release_htlc_message_paths, optional_vec), (4, next_local_nonce, option) }); @@ -3268,7 +3284,10 @@ impl_writeable_msg!(UpdateAddHTLC, { onion_routing_packet, }, { (0, blinding_point, option), - (65537, skimmed_fee_msat, option) + (65537, skimmed_fee_msat, option), + // TODO: currently we may fail to read the `ChannelManager` if we write a new even TLV in this message + // and then downgrade. Once this is fixed, update the type here to match BOLTs PR 989. + (75537, hold_htlc, option), }); impl LengthReadable for OnionMessage { @@ -5701,6 +5720,7 @@ mod tests { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; let encoded_value = update_add_htlc.encode(); let target_value = >::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d32144668701144760101010101010101010101010101010101010101010101010101010101010101000c89d4ff031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202").unwrap(); @@ -5821,6 +5841,7 @@ mod tests { next_per_commitment_point: pubkey_1, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; let encoded_value = raa.encode(); let target_value = >::from_hex("02020202020202020202020202020202020202020202020202020202020202020101010101010101010101010101010101010101010101010101010101010101031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f").unwrap(); diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index 79952faca9a..0934c6c812b 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -190,6 +190,7 @@ pub(super) fn create_fwd_pending_htlc_info( onion_packet: outgoing_packet, short_channel_id, incoming_cltv_expiry: Some(msg.cltv_expiry), + hold_htlc: msg.hold_htlc, blinded: intro_node_blinding_point.or(msg.blinding_point) .map(|bp| BlindedForward { inbound_blinding_point: bp, @@ -753,6 +754,7 @@ mod tests { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, } } diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 0af0463134e..3dab164e619 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -5017,6 +5017,7 @@ fn peel_payment_onion_custom_tlvs() { skimmed_fee_msat: None, onion_routing_packet, blinding_point: None, + hold_htlc: None, }; let peeled_onion = crate::ln::onion_payment::peel_payment_onion( &update_add, diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index 38f472b2f5b..1541d0e01c7 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -32,7 +32,7 @@ use crate::prelude::*; use crate::chain::BestBlock; use crate::ln::channel_state::ChannelDetails; -use crate::ln::channelmanager::{PaymentId, CLTV_FAR_FAR_AWAY}; +use crate::ln::channelmanager::{InterceptId, PaymentId, CLTV_FAR_FAR_AWAY}; use crate::ln::inbound_payment; use crate::offers::async_receive_offer_cache::AsyncReceiveOfferCache; use crate::offers::invoice::{ @@ -52,7 +52,7 @@ use crate::onion_message::async_payments::{ StaticInvoicePersisted, }; use crate::onion_message::messenger::{ - Destination, MessageRouter, MessageSendInstructions, Responder, + Destination, MessageRouter, MessageSendInstructions, Responder, PADDED_PATH_LENGTH, }; use crate::onion_message::offers::OffersMessage; use crate::onion_message::packet::OnionMessageContents; @@ -61,6 +61,7 @@ use crate::sign::{EntropySource, NodeSigner, ReceiveAuthKey}; use crate::offers::static_invoice::{StaticInvoice, StaticInvoiceBuilder}; use crate::sync::{Mutex, RwLock}; +use crate::types::features::InitFeatures; use crate::types::payment::{PaymentHash, PaymentSecret}; use crate::util::ser::Writeable; @@ -98,6 +99,7 @@ where pending_async_payments_messages: Mutex>, async_receive_offer_cache: Mutex, + peers_cache: Mutex>, #[cfg(feature = "dnssec")] pub(crate) hrn_resolver: OMNameResolver, @@ -130,6 +132,7 @@ where pending_offers_messages: Mutex::new(Vec::new()), pending_async_payments_messages: Mutex::new(Vec::new()), + peers_cache: Mutex::new(Vec::new()), #[cfg(feature = "dnssec")] hrn_resolver: OMNameResolver::new(current_timestamp, best_block.height), @@ -1151,6 +1154,41 @@ where Ok(()) } + /// If we are holding an HTLC on behalf of an often-offline sender, this method allows us to + /// create a path for the sender to use as the reply path when they send the recipient a + /// [`HeldHtlcAvailable`] onion message, so the recipient's [`ReleaseHeldHtlc`] response will be + /// received to our node. + /// + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + pub fn path_for_release_held_htlc( + &self, intercept_id: InterceptId, entropy: ES, + ) -> BlindedMessagePath + where + ES::Target: EntropySource, + { + let peers = self.peers_cache.lock().unwrap().clone(); + let context = + MessageContext::AsyncPayments(AsyncPaymentsContext::ReleaseHeldHtlc { intercept_id }); + if let Ok(mut paths) = self.create_blinded_paths(peers, context.clone()) { + if let Some(path) = paths.pop() { + return path; + } + } + + // If the `MessageRouter` fails on blinded path creation, fall back to creating a 1-hop blinded + // path. + let num_dummy_hops = PADDED_PATH_LENGTH.saturating_sub(1); + BlindedMessagePath::new_with_dummy_hops( + &[], + self.get_our_node_id(), + num_dummy_hops, + self.receive_auth_key, + context, + &*entropy, + &self.secp_ctx, + ) + } + /// Enqueues the created [`DNSSECQuery`] to be sent to the counterparty. /// /// # Peers @@ -1627,4 +1665,40 @@ where pub fn writeable_async_receive_offer_cache(&self) -> Vec { self.async_receive_offer_cache.encode() } + + /// Indicates that a peer was connected to our node. Useful for the [`OffersMessageFlow`] to keep + /// track of which peers are connected, which allows for methods that can create blinded paths + /// without requiring a fresh set of [`MessageForwardNode`]s to be passed in. + /// + /// MUST be called by always-online nodes that support holding HTLCs on behalf of often-offline + /// senders. + /// + /// Errors if the peer does not support onion messages. + pub fn peer_connected( + &self, peer_node_id: PublicKey, features: &InitFeatures, + ) -> Result<(), ()> { + if !features.supports_onion_messages() { + return Err(()); + } + + let mut peers_cache = self.peers_cache.lock().unwrap(); + let peer = MessageForwardNode { node_id: peer_node_id, short_channel_id: None }; + peers_cache.push(peer); + + Ok(()) + } + + /// Indicates that a peer was disconnected from our node. See [`Self::peer_connected`]. + /// + /// Errors if the peer is unknown. + pub fn peer_disconnected(&self, peer_node_id: PublicKey) -> Result<(), ()> { + let mut peers_cache = self.peers_cache.lock().unwrap(); + let peer_idx = match peers_cache.iter().position(|peer| peer.node_id == peer_node_id) { + Some(idx) => idx, + None => return Err(()), + }; + peers_cache.swap_remove(peer_idx); + + Ok(()) + } } diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index a0c74673799..f0c0330e14d 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -935,6 +935,18 @@ pub struct UserConfig { /// /// Default value: `false` pub enable_dual_funded_channels: bool, + /// LDK supports a feature for always-online nodes such that these nodes can hold onto an HTLC + /// from an often-offline channel peer until the often-offline payment recipient sends an onion + /// message telling the always-online node to release the HTLC. If this is set to `true`, our node + /// will carry out this feature for channel peers that request it. + /// + /// This should only be set to `true` for nodes which expect to be online reliably. + /// + /// Setting this to `true` may break backwards compatibility with LDK versions < 0.2. + /// + /// Default value: `false` + #[cfg(test)] + pub enable_htlc_hold: bool, } impl Default for UserConfig { @@ -949,6 +961,8 @@ impl Default for UserConfig { accept_intercept_htlcs: false, manually_handle_bolt12_invoices: false, enable_dual_funded_channels: false, + #[cfg(test)] + enable_htlc_hold: false, } } } diff --git a/lightning/src/util/ser_macros.rs b/lightning/src/util/ser_macros.rs index ea7a3e8a2a2..647e7c77a6c 100644 --- a/lightning/src/util/ser_macros.rs +++ b/lightning/src/util/ser_macros.rs @@ -700,7 +700,7 @@ macro_rules! impl_writeable_msg { impl $crate::util::ser::Writeable for $st { fn write(&self, w: &mut W) -> Result<(), $crate::io::Error> { $( self.$field.write(w)?; )* - $crate::encode_tlv_stream!(w, {$(($type, self.$tlvfield.as_ref(), $fieldty)),*}); + $crate::encode_tlv_stream!(w, {$(($type, &self.$tlvfield, $fieldty)),*}); Ok(()) } } @@ -713,7 +713,7 @@ macro_rules! impl_writeable_msg { $crate::decode_tlv_stream!(r, {$(($type, $tlvfield, $fieldty)),*}); Ok(Self { $($field,)* - $($tlvfield),* + $($tlvfield: $crate::_init_tlv_based_struct_field!($tlvfield, $fieldty)),* }) } }