diff --git a/fuzz/src/process_onion_failure.rs b/fuzz/src/process_onion_failure.rs index 1bc9900718a..2f038000ece 100644 --- a/fuzz/src/process_onion_failure.rs +++ b/fuzz/src/process_onion_failure.rs @@ -120,6 +120,7 @@ fn do_test(data: &[u8], out: Out) { first_hop_htlc_msat: 0, payment_id, bolt12_invoice: None, + hold_htlc: None, }; let failure_len = get_u16!(); diff --git a/lightning-types/src/features.rs b/lightning-types/src/features.rs index aca4bb6e5a9..521aefc2184 100644 --- a/lightning-types/src/features.rs +++ b/lightning-types/src/features.rs @@ -80,6 +80,8 @@ //! (see [BOLT-2](https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#channel-quiescence) for more information). //! - `ZeroFeeCommitments` - A channel type which always uses zero transaction fee on commitment transactions. //! (see [BOLT PR #1228](https://github.com/lightning/bolts/pull/1228) for more info). +//! - `HtlcHold` - requires/supports holding HTLCs and forwarding on receipt of an onion message +//! (see [BOLT-2](https://github.com/lightning/bolts/pull/989/files) for more information). //! //! LDK knows about the following features, but does not support them: //! - `AnchorsNonzeroFeeHtlcTx` - the initial version of anchor outputs, which was later found to be @@ -161,7 +163,7 @@ mod sealed { // Byte 5 ProvideStorage | ChannelType | SCIDPrivacy | AnchorZeroFeeCommitments, // Byte 6 - ZeroConf, + ZeroConf | HtlcHold, // Byte 7 Trampoline | SimpleClose, ] @@ -182,7 +184,7 @@ mod sealed { // Byte 5 ProvideStorage | ChannelType | SCIDPrivacy | AnchorZeroFeeCommitments, // Byte 6 - ZeroConf | Keysend, + ZeroConf | HtlcHold | Keysend, // Byte 7 Trampoline | SimpleClose, // Byte 8 - 31 @@ -640,6 +642,17 @@ mod sealed { define_feature!(51, ZeroConf, [InitContext, NodeContext, ChannelTypeContext], "Feature flags for accepting channels with zero confirmations. Called `option_zeroconf` in the BOLTs", set_zero_conf_optional, set_zero_conf_required, supports_zero_conf, requires_zero_conf); + define_feature!( + 53, + HtlcHold, + [InitContext, NodeContext], + "Feature flags for holding HTLCs and forwarding on receipt of an onion message", + set_htlc_hold_optional, + set_htlc_hold_required, + clear_htlc_hold, + supports_htlc_hold, + requires_htlc_hold + ); define_feature!( 55, Keysend, diff --git a/lightning/src/blinded_path/message.rs b/lightning/src/blinded_path/message.rs index 7d721cd1fdc..e291c83b66c 100644 --- a/lightning/src/blinded_path/message.rs +++ b/lightning/src/blinded_path/message.rs @@ -19,7 +19,7 @@ use crate::blinded_path::{BlindedHop, BlindedPath, Direction, IntroductionNode, use crate::crypto::streams::ChaChaPolyReadAdapter; use crate::io; use crate::io::Cursor; -use crate::ln::channelmanager::PaymentId; +use crate::ln::channelmanager::{InterceptId, PaymentId}; use crate::ln::msgs::DecodeError; use crate::ln::onion_utils; use crate::offers::nonce::Nonce; @@ -556,7 +556,7 @@ pub enum AsyncPaymentsContext { }, /// Context contained within the reply [`BlindedMessagePath`] we put in outbound /// [`HeldHtlcAvailable`] messages, provided back to us in corresponding [`ReleaseHeldHtlc`] - /// messages. + /// messages if we are an always-online sender paying an async recipient. /// /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc @@ -577,6 +577,17 @@ pub enum AsyncPaymentsContext { /// able to trivially ask if we're online forever. path_absolute_expiry: core::time::Duration, }, + /// Context contained within the reply [`BlindedMessagePath`] put in outbound + /// [`HeldHtlcAvailable`] messages, provided back to the async sender's always-online counterparty + /// in corresponding [`ReleaseHeldHtlc`] messages. + /// + /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + ReleaseHeldHtlc { + /// An identifier for the HTLC that should be released by us as the sender's always-online + /// channel counterparty to the often-offline recipient. + intercept_id: InterceptId, + }, } impl_writeable_tlv_based_enum!(MessageContext, @@ -632,6 +643,9 @@ impl_writeable_tlv_based_enum!(AsyncPaymentsContext, (2, invoice_slot, required), (4, path_absolute_expiry, required), }, + (6, ReleaseHeldHtlc) => { + (0, intercept_id, required), + }, ); /// Contains a simple nonce for use in a blinded path's context. diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index e617f6fbf1f..ed7c294956d 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -7,7 +7,7 @@ // You may not use this file except in accordance with one or both of these // licenses. -use crate::blinded_path::message::{MessageContext, OffersContext}; +use crate::blinded_path::message::{MessageContext, NextMessageHop, OffersContext}; use crate::blinded_path::payment::PaymentContext; use crate::blinded_path::payment::{AsyncBolt12OfferContext, BlindedPaymentTlvs}; use crate::chain::channelmonitor::{HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS}; @@ -54,11 +54,12 @@ use crate::sign::NodeSigner; use crate::sync::Mutex; use crate::types::features::Bolt12InvoiceFeatures; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; +use crate::util::config::UserConfig; use crate::util::ser::Writeable; use bitcoin::constants::ChainHash; use bitcoin::network::Network; use bitcoin::secp256k1; -use bitcoin::secp256k1::Secp256k1; +use bitcoin::secp256k1::{PublicKey, Secp256k1}; use core::convert::Infallible; use core::time::Duration; @@ -331,32 +332,114 @@ fn expect_offer_paths_requests(recipient: &Node, next_hop_nodes: &[&Node]) { // We want to check that the async recipient has enqueued at least one `OfferPathsRequest` and no // other message types. Check this by iterating through all their outbound onion messages, peeling // multiple times if the messages are forwarded through other nodes. - let per_msg_recipient_msgs = recipient.onion_messenger.release_pending_msgs(); + let offer_paths_reqs = extract_expected_om(recipient, next_hop_nodes, |peeled_onion| { + matches!( + peeled_onion, + PeeledOnion::AsyncPayments(AsyncPaymentsMessage::OfferPathsRequest(_), _, _) + ) + }); + assert!(!offer_paths_reqs.is_empty()); +} + +fn extract_invoice_request_om<'a>( + payer: &'a Node, next_hop_nodes: &[&'a Node], +) -> (PublicKey, msgs::OnionMessage) { + extract_expected_om(payer, next_hop_nodes, |peeled_onion| { + matches!(peeled_onion, &PeeledOnion::Offers(OffersMessage::InvoiceRequest(_), _, _)) + }) + .pop() + .unwrap() +} + +fn extract_static_invoice_om<'a>( + invoice_server: &'a Node, next_hop_nodes: &[&'a Node], +) -> (PublicKey, msgs::OnionMessage, StaticInvoice) { + let mut static_invoice = None; + let (peer_id, om) = extract_expected_om(invoice_server, next_hop_nodes, |peeled_onion| { + if let &PeeledOnion::Offers(OffersMessage::StaticInvoice(inv), _, _) = &peeled_onion { + static_invoice = Some(inv.clone()); + true + } else { + false + } + }) + .pop() + .unwrap(); + (peer_id, om, static_invoice.unwrap()) +} + +fn extract_held_htlc_available_om<'a>( + payer: &'a Node, next_hop_nodes: &[&'a Node], +) -> (PublicKey, msgs::OnionMessage) { + extract_expected_om(payer, next_hop_nodes, |peeled_onion| { + matches!( + peeled_onion, + &PeeledOnion::AsyncPayments(AsyncPaymentsMessage::HeldHtlcAvailable(_), _, _) + ) + }) + .pop() + .unwrap() +} + +fn extract_release_htlc_om<'a>( + recipient: &'a Node, next_hop_nodes: &[&'a Node], +) -> (PublicKey, msgs::OnionMessage) { + extract_expected_om(recipient, next_hop_nodes, |peeled_onion| { + matches!( + peeled_onion, + &PeeledOnion::AsyncPayments(AsyncPaymentsMessage::ReleaseHeldHtlc(_), _, _) + ) + }) + .pop() + .unwrap() +} + +fn extract_expected_om( + msg_sender: &Node, next_hop_nodes: &[&Node], mut expected_msg_type: F, +) -> Vec<(PublicKey, msgs::OnionMessage)> +where + F: FnMut(&PeeledOnion) -> bool, +{ + let per_msg_recipient_msgs = msg_sender.onion_messenger.release_pending_msgs(); let mut pk_to_msg = Vec::new(); for (pk, msgs) in per_msg_recipient_msgs { for msg in msgs { pk_to_msg.push((pk, msg)); } } - let mut num_offer_paths_reqs: u8 = 0; + let mut msgs = Vec::new(); while let Some((pk, msg)) = pk_to_msg.pop() { let node = next_hop_nodes.iter().find(|node| node.node.get_our_node_id() == pk).unwrap(); let peeled_msg = node.onion_messenger.peel_onion_message(&msg).unwrap(); match peeled_msg { - PeeledOnion::AsyncPayments(AsyncPaymentsMessage::OfferPathsRequest(_), _, _) => { - num_offer_paths_reqs += 1; - }, PeeledOnion::Forward(next_hop, msg) => { let next_pk = match next_hop { - crate::blinded_path::message::NextMessageHop::NodeId(pk) => pk, - _ => panic!(), + NextMessageHop::NodeId(pk) => pk, + NextMessageHop::ShortChannelId(scid) => { + let mut next_pk = None; + for node in next_hop_nodes { + if node.node.get_our_node_id() == pk { + continue; + } + for channel in node.node.list_channels() { + if channel.short_channel_id.unwrap() == scid + || channel.inbound_scid_alias.unwrap_or(0) == scid + { + next_pk = Some(node.node.get_our_node_id()); + } + } + } + next_pk.unwrap() + }, }; pk_to_msg.push((next_pk, msg)); }, - _ => panic!("Unexpected message"), + peeled_onion if expected_msg_type(&peeled_onion) => msgs.push((pk, msg)), + peeled_onion => panic!("Unexpected message: {:#?}", peeled_onion), } } - assert!(num_offer_paths_reqs > 0); + assert!(!msgs.is_empty()); + msgs } fn advance_time_by(duration: Duration, node: &Node) { @@ -365,6 +448,14 @@ fn advance_time_by(duration: Duration, node: &Node) { connect_block(node, &block); } +fn often_offline_node_cfg() -> UserConfig { + let mut cfg = test_default_channel_config(); + cfg.channel_handshake_config.announce_for_forwarding = false; + cfg.channel_handshake_limits.force_announced_channel_preference = true; + cfg.hold_outbound_htlcs_at_next_hop = true; + cfg +} + #[test] fn invalid_keysend_payment_secret() { let chanmon_cfgs = create_chanmon_cfgs(3); @@ -2206,3 +2297,108 @@ fn invoice_server_is_not_channel_peer() { let res = claim_payment_along_route(ClaimAlongRouteArgs::new(sender, route, keysend_preimage)); assert_eq!(res.0, Some(PaidBolt12Invoice::StaticInvoice(invoice))); } + +#[test] +fn simple_async_sender() { + // Test the basic case of an async sender paying an async recipient. + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let (sender_cfg, recipient_cfg) = (often_offline_node_cfg(), often_offline_node_cfg()); + let mut invoice_server_cfg = test_default_channel_config(); + invoice_server_cfg.accept_forwards_to_priv_channels = true; + let node_chanmgrs = create_node_chanmgrs( + 4, + &node_cfgs, + &[Some(sender_cfg), None, Some(invoice_server_cfg), Some(recipient_cfg)], + ); + let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0); + create_unannounced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0); + // Make sure all nodes are at the same block height + let node_max_height = + nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; + connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1); + connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1); + let sender = &nodes[0]; + let sender_lsp = &nodes[1]; + let invoice_server = &nodes[2]; + let recipient = &nodes[3]; + + let recipient_id = vec![42; 32]; + let inv_server_paths = + invoice_server.node.blinded_paths_for_async_recipient(recipient_id.clone(), None).unwrap(); + recipient.node.set_paths_to_static_invoice_server(inv_server_paths).unwrap(); + expect_offer_paths_requests(recipient, &[sender, sender_lsp, invoice_server]); + let invoice = + pass_static_invoice_server_messages(invoice_server, recipient, recipient_id.clone()) + .invoice; + + let offer = recipient.node.get_async_receive_offer().unwrap(); + let amt_msat = 5000; + let payment_id = PaymentId([1; 32]); + let params = RouteParametersConfig::default(); + sender + .node + .pay_for_offer(&offer, None, Some(amt_msat), None, payment_id, Retry::Attempts(0), params) + .unwrap(); + + // Forward invreq to server, pass static invoice back, check that htlc was locked in/monitor was + // added + let (peer_id, invreq_om) = extract_invoice_request_om(sender, &[sender_lsp, invoice_server]); + invoice_server.onion_messenger.handle_onion_message(peer_id, &invreq_om); + + let mut events = invoice_server.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + let reply_path = match events.pop().unwrap() { + Event::StaticInvoiceRequested { recipient_id: ev_id, invoice_slot: _, reply_path } => { + assert_eq!(recipient_id, ev_id); + reply_path + }, + _ => panic!(), + }; + + invoice_server.node.send_static_invoice(invoice, reply_path).unwrap(); + let (peer_node_id, static_invoice_om, static_invoice) = + extract_static_invoice_om(invoice_server, &[sender_lsp, sender, recipient]); + + // The sender should lock in the held HTLC with their LSP right after receiving the static invoice. + sender.onion_messenger.handle_onion_message(peer_node_id, &static_invoice_om); + check_added_monitors(sender, 1); + let commitment_update = get_htlc_update_msgs!(sender, sender_lsp.node.get_our_node_id()); + let update_add = commitment_update.update_add_htlcs[0].clone(); + let payment_hash = update_add.payment_hash; + assert!(update_add.hold_htlc.is_some()); + sender_lsp.node.handle_update_add_htlc(sender.node.get_our_node_id(), &update_add); + commitment_signed_dance!(sender_lsp, sender, &commitment_update.commitment_signed, false, true); + + // Ensure that after the held HTLC is locked in, the sender's lsp does not forward it immediately. + sender_lsp.node.process_pending_htlc_forwards(); + assert!(sender_lsp.node.get_and_clear_pending_msg_events().is_empty()); + + let (peer_id, held_htlc_om) = + extract_held_htlc_available_om(sender, &[sender_lsp, invoice_server, recipient]); + recipient.onion_messenger.handle_onion_message(peer_id, &held_htlc_om); + let (peer_id, release_htlc_om) = + extract_release_htlc_om(recipient, &[sender, sender_lsp, invoice_server]); + sender_lsp.onion_messenger.handle_onion_message(peer_id, &release_htlc_om); + + // After the sender's LSP receives release_held_htlc from the recipient, the payment can complete + sender_lsp.node.process_pending_htlc_forwards(); + let mut events = sender_lsp.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev = remove_first_msg_event_to_node(&invoice_server.node.get_our_node_id(), &mut events); + check_added_monitors!(sender_lsp, 1); + + let path: &[&Node] = &[invoice_server, recipient]; + let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev); + let claimable_ev = do_pass_along_path(args).unwrap(); + + let route: &[&[&Node]] = &[&[sender_lsp, invoice_server, recipient]]; + let keysend_preimage = extract_payment_preimage(&claimable_ev); + let (res, _) = + claim_payment_along_route(ClaimAlongRouteArgs::new(sender, route, keysend_preimage)); + assert_eq!(res, Some(PaidBolt12Invoice::StaticInvoice(static_invoice))); +} diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index a8e7af23984..25fa5e71e5d 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -1522,6 +1522,7 @@ fn update_add_msg( onion_routing_packet, skimmed_fee_msat: None, blinding_point, + hold_htlc: None, } } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 198569bd77e..fc8dacc8596 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -28,6 +28,7 @@ use bitcoin::{secp256k1, sighash, TxIn}; #[cfg(splicing)] use bitcoin::{FeeRate, Sequence}; +use crate::blinded_path::message::BlindedMessagePath; use crate::chain::chaininterface::{ fee_for_weight, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator, }; @@ -78,6 +79,7 @@ use crate::ln::script::{self, ShutdownScript}; use crate::ln::types::ChannelId; #[cfg(splicing)] use crate::ln::LN_MAX_MSG_LEN; +use crate::offers::static_invoice::StaticInvoice; use crate::routing::gossip::NodeId; use crate::sign::ecdsa::EcdsaChannelSigner; use crate::sign::tx_builder::{HTLCAmountDirection, NextCommitmentStats, SpecTxBuilder, TxBuilder}; @@ -282,6 +284,29 @@ impl InboundHTLCState { _ => None, } } + + /// Whether we need to hold onto this HTLC until receipt of a corresponding [`ReleaseHeldHtlc`] + /// onion message. + /// + ///[`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + fn should_hold_htlc(&self) -> bool { + match self { + InboundHTLCState::RemoteAnnounced(res) + | InboundHTLCState::AwaitingRemoteRevokeToAnnounce(res) + | InboundHTLCState::AwaitingAnnouncedRemoteRevoke(res) => match res { + InboundHTLCResolution::Resolved { pending_htlc_status } => { + match pending_htlc_status { + PendingHTLCStatus::Forward(info) => info.routing.should_hold_htlc(), + _ => false, + } + }, + InboundHTLCResolution::Pending { update_add_htlc } => { + update_add_htlc.hold_htlc.is_some() + }, + }, + InboundHTLCState::Committed | InboundHTLCState::LocalRemoved(_) => false, + } + } } struct InboundHTLCOutput { @@ -1602,12 +1627,12 @@ where } #[rustfmt::skip] - pub fn signer_maybe_unblocked( - &mut self, chain_hash: ChainHash, logger: &L, - ) -> Option where L::Target: Logger { + pub fn signer_maybe_unblocked( + &mut self, chain_hash: ChainHash, logger: &L, path_for_release_htlc: CBP + ) -> Option where L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath { match &mut self.phase { ChannelPhase::Undefined => unreachable!(), - ChannelPhase::Funded(chan) => Some(chan.signer_maybe_unblocked(logger)), + ChannelPhase::Funded(chan) => Some(chan.signer_maybe_unblocked(logger, path_for_release_htlc)), ChannelPhase::UnfundedOutboundV1(chan) => { let (open_channel, funding_created) = chan.signer_maybe_unblocked(chain_hash, logger); Some(SignerResumeUpdates { @@ -7968,10 +7993,25 @@ where /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail, /// generating an appropriate error *after* the channel state has been updated based on the /// revoke_and_ack message. + /// + /// The static invoices will be used by us as an async sender to enqueue [`HeldHtlcAvailable`] + /// onion messages for the often-offline recipient, and the blinded reply paths the invoices are + /// paired with were created by our channel counterparty and will be used as reply paths for + /// corresponding [`ReleaseHeldHtlc`] messages. + /// + /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc pub fn revoke_and_ack( &mut self, msg: &msgs::RevokeAndACK, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, hold_mon_update: bool, - ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option), ChannelError> + ) -> Result< + ( + Vec<(HTLCSource, PaymentHash)>, + Vec<(StaticInvoice, BlindedMessagePath)>, + Option, + ), + ChannelError, + > where F::Target: FeeEstimator, L::Target: Logger, @@ -8086,6 +8126,7 @@ where let mut finalized_claimed_htlcs = Vec::new(); let mut update_fail_htlcs = Vec::new(); let mut update_fail_malformed_htlcs = Vec::new(); + let mut static_invoices = Vec::new(); let mut require_commitment = false; let mut value_to_self_msat_diff: i64 = 0; @@ -8201,6 +8242,20 @@ where } } for htlc in pending_outbound_htlcs.iter_mut() { + for (htlc_id, blinded_path) in &msg.release_htlc_message_paths { + if htlc.htlc_id != *htlc_id { + continue; + } + let static_invoice = match htlc.source.static_invoice() { + Some(inv) => inv, + None => { + // This is reachable but it means the counterparty is buggy and included a release + // path for an HTLC that we didn't originally flag as a hold_htlc. + continue; + }, + }; + static_invoices.push((static_invoice, blinded_path.clone())); + } if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { log_trace!( logger, @@ -8268,9 +8323,9 @@ where self.context .blocked_monitor_updates .push(PendingChannelMonitorUpdate { update: monitor_update }); - return Ok(($htlcs_to_fail, None)); + return Ok(($htlcs_to_fail, static_invoices, None)); } else { - return Ok(($htlcs_to_fail, Some(monitor_update))); + return Ok(($htlcs_to_fail, static_invoices, Some(monitor_update))); } }; } @@ -8707,13 +8762,14 @@ where /// successfully and we should restore normal operation. Returns messages which should be sent /// to the remote side. #[rustfmt::skip] - pub fn monitor_updating_restored( + pub fn monitor_updating_restored( &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash, - user_config: &UserConfig, best_block_height: u32 + user_config: &UserConfig, best_block_height: u32, path_for_release_htlc: CBP ) -> MonitorRestoreUpdates where L::Target: Logger, - NS::Target: NodeSigner + NS::Target: NodeSigner, + CBP: Fn(u64) -> BlindedMessagePath { assert!(self.context.channel_state.is_monitor_update_in_progress()); self.context.channel_state.clear_monitor_update_in_progress(); @@ -8770,7 +8826,7 @@ where } let mut raa = if self.context.monitor_pending_revoke_and_ack { - self.get_last_revoke_and_ack(logger) + self.get_last_revoke_and_ack(path_for_release_htlc, logger) } else { None }; let mut commitment_update = if self.context.monitor_pending_commitment_signed { self.get_last_commitment_update_for_send(logger).ok() @@ -8859,7 +8915,9 @@ where /// Indicates that the signer may have some signatures for us, so we should retry if we're /// blocked. #[rustfmt::skip] - pub fn signer_maybe_unblocked(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger { + pub fn signer_maybe_unblocked( + &mut self, logger: &L, path_for_release_htlc: CBP + ) -> SignerResumeUpdates where L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath { if !self.holder_commitment_point.can_advance() { log_trace!(logger, "Attempting to update holder per-commitment point..."); self.holder_commitment_point.try_resolve_pending(&self.context.holder_signer, &self.context.secp_ctx, logger); @@ -8887,7 +8945,7 @@ where } else { None }; let mut revoke_and_ack = if self.context.signer_pending_revoke_and_ack { log_trace!(logger, "Attempting to generate pending revoke and ack..."); - self.get_last_revoke_and_ack(logger) + self.get_last_revoke_and_ack(path_for_release_htlc, logger) } else { None }; if self.context.resend_order == RAACommitmentOrder::CommitmentFirst @@ -8958,9 +9016,12 @@ where } } - fn get_last_revoke_and_ack(&mut self, logger: &L) -> Option + fn get_last_revoke_and_ack( + &mut self, path_for_release_htlc: CBP, logger: &L, + ) -> Option where L::Target: Logger, + CBP: Fn(u64) -> BlindedMessagePath, { debug_assert!( self.holder_commitment_point.next_transaction_number() <= INITIAL_COMMITMENT_NUMBER - 2 @@ -8973,6 +9034,14 @@ where .ok(); if let Some(per_commitment_secret) = per_commitment_secret { if self.holder_commitment_point.can_advance() { + let mut release_htlc_message_paths = Vec::new(); + for htlc in &self.context.pending_inbound_htlcs { + if htlc.state.should_hold_htlc() { + let path = path_for_release_htlc(htlc.htlc_id); + release_htlc_message_paths.push((htlc.htlc_id, path)); + } + } + self.context.signer_pending_revoke_and_ack = false; return Some(msgs::RevokeAndACK { channel_id: self.context.channel_id, @@ -8980,6 +9049,7 @@ where next_per_commitment_point: self.holder_commitment_point.next_point(), #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths, }); } } @@ -9027,6 +9097,7 @@ where onion_routing_packet: (**onion_packet).clone(), skimmed_fee_msat: htlc.skimmed_fee_msat, blinding_point: htlc.blinding_point, + hold_htlc: htlc.source.hold_htlc_at_next_hop().then(|| ()), }); } } @@ -9126,13 +9197,15 @@ where /// May panic if some calls other than message-handling calls (which will all Err immediately) /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call. #[rustfmt::skip] - pub fn channel_reestablish( + pub fn channel_reestablish( &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS, - chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock + chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock, + path_for_release_htlc: CBP, ) -> Result where L::Target: Logger, - NS::Target: NodeSigner + NS::Target: NodeSigner, + CBP: Fn(u64) -> BlindedMessagePath { if !self.context.channel_state.is_peer_disconnected() { // While BOLT 2 doesn't indicate explicitly we should error this channel here, it @@ -9233,7 +9306,7 @@ where self.context.monitor_pending_revoke_and_ack = true; None } else { - self.get_last_revoke_and_ack(logger) + self.get_last_revoke_and_ack(path_for_release_htlc, logger) } } else { debug_assert!(false, "All values should have been handled in the four cases above"); @@ -15056,6 +15129,7 @@ mod tests { first_hop_htlc_msat: 548, payment_id: PaymentId([42; 32]), bolt12_invoice: None, + hold_htlc: None, }, skimmed_fee_msat: None, blinding_point: None, @@ -15503,6 +15577,7 @@ mod tests { first_hop_htlc_msat: 0, payment_id: PaymentId([42; 32]), bolt12_invoice: None, + hold_htlc: None, }; let dummy_outbound_output = OutboundHTLCOutput { htlc_id: 0, @@ -16488,6 +16563,7 @@ mod tests { chain_hash, &config, 0, + |_| unreachable!() ); // Receive funding_signed, but the channel will be configured to hold sending channel_ready and @@ -16502,6 +16578,7 @@ mod tests { chain_hash, &config, 0, + |_| unreachable!() ); // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set, // as the funding transaction depends on all channels in the batch becoming ready. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 0397116f96d..cedc919b810 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -92,7 +92,7 @@ use crate::ln::outbound_payment::{ }; use crate::ln::types::ChannelId; use crate::offers::async_receive_offer_cache::AsyncReceiveOfferCache; -use crate::offers::flow::{InvreqResponseInstructions, OffersMessageFlow}; +use crate::offers::flow::{HeldHtlcReplyPath, InvreqResponseInstructions, OffersMessageFlow}; use crate::offers::invoice::{ Bolt12Invoice, DerivedSigningPubkey, InvoiceBuilder, DEFAULT_RELATIVE_EXPIRY, }; @@ -229,6 +229,9 @@ pub enum PendingHTLCRouting { blinded: Option, /// The absolute CLTV of the inbound HTLC incoming_cltv_expiry: Option, + /// Whether this HTLC should be held by our node until we receive a corresponding + /// [`ReleaseHeldHtlc`] onion message. + hold_htlc: Option<()>, }, /// An HTLC which should be forwarded on to another Trampoline node. TrampolineForward { @@ -371,6 +374,15 @@ impl PendingHTLCRouting { Self::ReceiveKeysend { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry), } } + + /// Whether this HTLC should be held by our node until we receive a corresponding + /// [`ReleaseHeldHtlc`] onion message. + pub(super) fn should_hold_htlc(&self) -> bool { + match self { + Self::Forward { hold_htlc: Some(()), .. } => true, + _ => false, + } + } } /// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it @@ -641,6 +653,34 @@ impl Readable for PaymentId { #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] pub struct InterceptId(pub [u8; 32]); +impl InterceptId { + /// This intercept id corresponds to an HTLC that will be forwarded on + /// [`ChannelManager::forward_intercepted_htlc`]. + fn from_incoming_shared_secret(ss: &[u8; 32]) -> Self { + Self(Sha256::hash(ss).to_byte_array()) + } + + /// This intercept id corresponds to an HTLC that will be forwarded on receipt of a + /// [`ReleaseHeldHtlc`] onion message. + fn from_htlc_id_and_chan_id( + htlc_id: u64, channel_id: &ChannelId, counterparty_node_id: &PublicKey, + ) -> Self { + let htlc_id_size = core::mem::size_of::(); + let chan_id_size = core::mem::size_of::(); + let cp_id_serialized = counterparty_node_id.serialize(); + + const RES_SIZE: usize = 8 + 32 + 33; + debug_assert_eq!(RES_SIZE, htlc_id_size + chan_id_size + cp_id_serialized.len()); + + let mut res = [0u8; RES_SIZE]; + res[..htlc_id_size].copy_from_slice(&htlc_id.to_be_bytes()); + res[htlc_id_size..htlc_id_size + chan_id_size].copy_from_slice(&channel_id.0); + res[htlc_id_size + chan_id_size..].copy_from_slice(&cp_id_serialized); + + Self(Sha256::hash(&res[..]).to_byte_array()) + } +} + impl Writeable for InterceptId { fn write(&self, w: &mut W) -> Result<(), io::Error> { self.0.write(w) @@ -708,6 +748,10 @@ mod fuzzy_channelmanager { /// we can provide proof-of-payment details in payment claim events even after a restart /// with a stale ChannelManager state. bolt12_invoice: Option, + /// Whether we want our next-hop channel peer to hold onto this HTLC until they receive an + /// onion message from the often-offline recipient indicating that the recipient is online and + /// ready to receive the HTLC. + hold_htlc: Option<()>, }, } @@ -751,6 +795,7 @@ impl core::hash::Hash for HTLCSource { payment_id, first_hop_htlc_msat, bolt12_invoice, + hold_htlc, } => { 1u8.hash(hasher); path.hash(hasher); @@ -758,6 +803,7 @@ impl core::hash::Hash for HTLCSource { payment_id.hash(hasher); first_hop_htlc_msat.hash(hasher); bolt12_invoice.hash(hasher); + hold_htlc.hash(hasher); }, } } @@ -771,6 +817,7 @@ impl HTLCSource { first_hop_htlc_msat: 0, payment_id: PaymentId([2; 32]), bolt12_invoice: None, + hold_htlc: None, } } @@ -795,6 +842,23 @@ impl HTLCSource { _ => None, } } + + pub(crate) fn hold_htlc_at_next_hop(&self) -> bool { + match self { + Self::OutboundRoute { hold_htlc, .. } => hold_htlc.is_some(), + _ => false, + } + } + + pub(crate) fn static_invoice(&self) -> Option { + match self { + Self::OutboundRoute { + bolt12_invoice: Some(PaidBolt12Invoice::StaticInvoice(inv)), + .. + } => Some(inv.clone()), + _ => None, + } + } } /// This enum is used to specify which error data to send to peers when failing back an HTLC @@ -2588,8 +2652,14 @@ pub struct ChannelManager< pub(super) forward_htlcs: Mutex>>, #[cfg(not(test))] forward_htlcs: Mutex>>, - /// Storage for HTLCs that have been intercepted and bubbled up to the user. We hold them here - /// until the user tells us what we should do with them. + /// Storage for HTLCs that have been intercepted. + /// + /// These HTLCs fall into two categories: + /// 1. HTLCs that are bubbled up to the user and held until the invocation of + /// [`ChannelManager::forward_intercepted_htlc`] or [`ChannelManager::fail_intercepted_htlc`] + /// (or timeout) + /// 2. HTLCs that are being held on behalf of an often-offline sender until receipt of a + /// [`ReleaseHeldHtlc`] onion message from an often-offline recipient /// /// See `ChannelManager` struct-level documentation for lock order requirements. pending_intercepted_htlcs: Mutex>, @@ -3384,18 +3454,20 @@ macro_rules! emit_initial_channel_ready_event { /// set for this channel is empty! macro_rules! handle_monitor_update_completion { ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { + let channel_id = $chan.context.channel_id(); + let counterparty_node_id = $chan.context.get_counterparty_node_id(); #[cfg(debug_assertions)] { let in_flight_updates = - $peer_state.in_flight_monitor_updates.get(&$chan.context.channel_id()); + $peer_state.in_flight_monitor_updates.get(&channel_id); assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true)); assert_eq!($chan.blocked_monitor_updates_pending(), 0); } let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); let mut updates = $chan.monitor_updating_restored(&&logger, &$self.node_signer, $self.chain_hash, &*$self.config.read().unwrap(), - $self.best_block.read().unwrap().height); - let counterparty_node_id = $chan.context.get_counterparty_node_id(); + $self.best_block.read().unwrap().height, + |htlc_id| $self.path_for_release_held_htlc(htlc_id, &channel_id, &counterparty_node_id)); let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() { // We only send a channel_update in the case where we are just now sending a // channel_ready and the channel is in a usable state. We may re-send a @@ -3411,7 +3483,7 @@ macro_rules! handle_monitor_update_completion { } else { None }; let update_actions = $peer_state.monitor_update_blocked_actions - .remove(&$chan.context.channel_id()).unwrap_or(Vec::new()); + .remove(&channel_id).unwrap_or(Vec::new()); let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption( &mut $peer_state.pending_msg_events, $chan, updates.raa, @@ -3422,7 +3494,6 @@ macro_rules! handle_monitor_update_completion { $peer_state.pending_msg_events.push(upd); } - let channel_id = $chan.context.channel_id(); let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid(&$chan.funding); core::mem::drop($peer_state_lock); core::mem::drop($per_peer_state_lock); @@ -3967,7 +4038,9 @@ where Ok(temporary_channel_id) } - fn list_funded_channels_with_filter)) -> bool + Copy>( + fn list_funded_channels_with_filter< + Fn: FnMut(&(&InitFeatures, &ChannelId, &Channel)) -> bool + Copy, + >( &self, f: Fn, ) -> Vec { // Allocate our best estimate of the number of channels we have in the `res` @@ -3983,9 +4056,13 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; // Only `Channels` in the `Channel::Funded` phase can be considered funded. - let filtered_chan_by_id = - peer_state.channel_by_id.iter().filter(|(_, chan)| chan.is_funded()).filter(f); - res.extend(filtered_chan_by_id.map(|(_channel_id, channel)| { + let filtered_chan_by_id = peer_state + .channel_by_id + .iter() + .map(|(cid, c)| (&peer_state.latest_features, cid, c)) + .filter(|(_, _, chan)| chan.is_funded()) + .filter(f); + res.extend(filtered_chan_by_id.map(|(_, _channel_id, channel)| { ChannelDetails::from_channel( channel, best_block_height, @@ -4037,7 +4114,7 @@ where // Note we use is_live here instead of usable which leads to somewhat confused // internal/external nomenclature, but that's ok cause that's probably what the user // really wanted anyway. - self.list_funded_channels_with_filter(|&(_, ref channel)| channel.context().is_live()) + self.list_funded_channels_with_filter(|&(_, _, ref channel)| channel.context().is_live()) } /// Gets the list of channels we have with a given counterparty, in random order. @@ -4870,6 +4947,7 @@ where invoice_request: None, bolt12_invoice: None, session_priv_bytes, + hold_htlc_at_next_hop: false, }) } @@ -4885,6 +4963,7 @@ where invoice_request, bolt12_invoice, session_priv_bytes, + hold_htlc_at_next_hop, } = args; // The top-level caller should hold the total_consistency_lock read lock. debug_assert!(self.total_consistency_lock.try_write().is_err()); @@ -4966,6 +5045,7 @@ where first_hop_htlc_msat: htlc_msat, payment_id, bolt12_invoice: bolt12_invoice.cloned(), + hold_htlc: hold_htlc_at_next_hop.then(|| ()), }; let send_res = chan.send_htlc_and_commit( htlc_msat, @@ -5324,12 +5404,14 @@ where &self, invoice: &StaticInvoice, payment_id: PaymentId, ) -> Result<(), Bolt12PaymentError> { let mut res = Ok(()); + let hold_htlc_channels_res = self.hold_htlc_channels(); PersistenceNotifierGuard::optionally_notify(self, || { let best_block_height = self.best_block.read().unwrap().height; let features = self.bolt12_invoice_features(); let outbound_pmts_res = self.pending_outbound_payments.static_invoice_received( invoice, payment_id, + hold_htlc_channels_res.is_ok(), features, best_block_height, self.duration_since_epoch(), @@ -5349,19 +5431,36 @@ where }, }; - let enqueue_held_htlc_available_res = self.flow.enqueue_held_htlc_available( - invoice, - payment_id, - self.get_peers_for_blinded_path(), - ); - if enqueue_held_htlc_available_res.is_err() { - self.abandon_payment_with_reason( + // If the call to `Self::hold_htlc_channels` succeeded, then we are a private node and can + // hold the HTLCs for this payment at our next-hop channel counterparty until the recipient + // comes online. This allows us to go offline after locking in the HTLCs. + if let Ok(channels) = hold_htlc_channels_res { + if let Err(e) = + self.send_payment_for_static_invoice_no_persist(payment_id, channels) + { + log_trace!( + self.logger, + "Failed to send held HTLC with payment id {}: {:?}", + payment_id, + e + ); + } + } else { + let reply_path = HeldHtlcReplyPath::ToUs { payment_id, - PaymentFailureReason::BlindedPathCreationFailed, - ); - res = Err(Bolt12PaymentError::BlindedPathCreationFailed); - return NotifyOption::DoPersist; - }; + peers: self.get_peers_for_blinded_path(), + }; + let enqueue_held_htlc_available_res = + self.flow.enqueue_held_htlc_available(invoice, reply_path); + if enqueue_held_htlc_available_res.is_err() { + self.abandon_payment_with_reason( + payment_id, + PaymentFailureReason::BlindedPathCreationFailed, + ); + res = Err(Bolt12PaymentError::BlindedPathCreationFailed); + return NotifyOption::DoPersist; + }; + } NotifyOption::DoPersist }); @@ -5369,26 +5468,52 @@ where res } + /// Returns a list of channels where our counterparty supports + /// [`InitFeatures::supports_htlc_hold`], or an error if there are none or we detect that we are + /// an announced node. Useful for sending async payments to [`StaticInvoice`]s. + fn hold_htlc_channels(&self) -> Result, ()> { + let should_send_async = { + let cfg = self.config.read().unwrap(); + cfg.hold_outbound_htlcs_at_next_hop + && !cfg.channel_handshake_config.announce_for_forwarding + && cfg.channel_handshake_limits.force_announced_channel_preference + }; + if !should_send_async { + return Err(()); + } + + let any_announced_channels = AtomicBool::new(false); + let hold_htlc_channels = + self.list_funded_channels_with_filter(|&(init_features, _, ref channel)| { + // If we have an announced channel, we are a node that is expected to be always-online and + // shouldn't be relying on channel counterparties to hold onto our HTLCs for us while + // waiting for the payment recipient to come online. + if channel.context().should_announce() { + any_announced_channels.store(true, Ordering::Relaxed); + } + if any_announced_channels.load(Ordering::Relaxed) { + return false; + } + + init_features.supports_htlc_hold() && channel.context().is_live() + }); + + if any_announced_channels.load(Ordering::Relaxed) || hold_htlc_channels.is_empty() { + Err(()) + } else { + Ok(hold_htlc_channels) + } + } + + /// If we want the HTLCs for this payment to be held at the next-hop channel counterparty, use + /// [`Self::hold_htlc_channels`] and pass the resulting channels in here. fn send_payment_for_static_invoice( - &self, payment_id: PaymentId, + &self, payment_id: PaymentId, first_hops: Vec, ) -> Result<(), Bolt12PaymentError> { - let best_block_height = self.best_block.read().unwrap().height; let mut res = Ok(()); PersistenceNotifierGuard::optionally_notify(self, || { - let outbound_pmts_res = self.pending_outbound_payments.send_payment_for_static_invoice( - payment_id, - &self.router, - self.list_usable_channels(), - || self.compute_inflight_htlcs(), - &self.entropy_source, - &self.node_signer, - &self, - &self.secp_ctx, - best_block_height, - &self.logger, - &self.pending_events, - |args| self.send_payment_along_path(args), - ); + let outbound_pmts_res = + self.send_payment_for_static_invoice_no_persist(payment_id, first_hops); match outbound_pmts_res { Err(Bolt12PaymentError::UnexpectedInvoice) | Err(Bolt12PaymentError::DuplicateInvoice) => { @@ -5404,6 +5529,39 @@ where res } + /// Useful if the caller is already triggering a persist of the `ChannelManager`. + fn send_payment_for_static_invoice_no_persist( + &self, payment_id: PaymentId, first_hops: Vec, + ) -> Result<(), Bolt12PaymentError> { + let best_block_height = self.best_block.read().unwrap().height; + self.pending_outbound_payments.send_payment_for_static_invoice( + payment_id, + &self.router, + first_hops, + || self.compute_inflight_htlcs(), + &self.entropy_source, + &self.node_signer, + &self, + &self.secp_ctx, + best_block_height, + &self.logger, + &self.pending_events, + |args| self.send_payment_along_path(args), + ) + } + + /// If we are holding an HTLC on behalf of an often-offline sender, this method allows us to + /// create a path for the sender to use as the reply path when they send the recipient a + /// [`HeldHtlcAvailable`] onion message, so the recipient's [`ReleaseHeldHtlc`] response will be + /// received to our node. + fn path_for_release_held_htlc( + &self, htlc_id: u64, channel_id: &ChannelId, counterparty_node_id: &PublicKey, + ) -> BlindedMessagePath { + let intercept_id = + InterceptId::from_htlc_id_and_chan_id(htlc_id, channel_id, counterparty_node_id); + self.flow.path_for_release_held_htlc(intercept_id, &*self.entropy_source) + } + /// Signals that no further attempts for the given payment should occur. Useful if you have a /// pending outbound payment with retries remaining, but wish to stop retrying the payment before /// retries are exhausted. @@ -6268,13 +6426,18 @@ where })?; let routing = match payment.forward_info.routing { - PendingHTLCRouting::Forward { onion_packet, blinded, incoming_cltv_expiry, .. } => { - PendingHTLCRouting::Forward { - onion_packet, - blinded, - incoming_cltv_expiry, - short_channel_id: next_hop_scid, - } + PendingHTLCRouting::Forward { + onion_packet, + blinded, + incoming_cltv_expiry, + hold_htlc, + .. + } => PendingHTLCRouting::Forward { + onion_packet, + blinded, + incoming_cltv_expiry, + hold_htlc, + short_channel_id: next_hop_scid, }, _ => unreachable!(), // Only `PendingHTLCRouting::Forward`s are intercepted }; @@ -6406,6 +6569,32 @@ where }); let shared_secret = next_hop.shared_secret().secret_bytes(); + // Nodes shouldn't expect us to hold HTLCs for them if we don't advertise htlc_hold feature + // support. + // + // If we wanted to pretend to be a node that didn't understand the feature at all here, the + // correct behavior would've been to disconnect the sender when we first received the + // update_add message. However, this would make the `UserConfig::enable_htlc_hold` option + // unsafe -- if our node switched the config option from on to off just after the sender + // enqueued their update_add + CS, the sender would continue retransmitting those messages + // and we would keep disconnecting them until the HTLC timed out. + if update_add_htlc.hold_htlc.is_some() + && !BaseMessageHandler::provided_node_features(self).supports_htlc_hold() + { + let reason = LocalHTLCFailureReason::TemporaryNodeFailure; + let htlc_fail = self.htlc_failure_from_update_add_err( + &update_add_htlc, + &incoming_counterparty_node_id, + reason, + is_intro_node_blinded_forward, + &shared_secret, + ); + let failure_type = + get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); + htlc_fails.push((htlc_fail, failure_type, reason.into())); + continue; + } + // Process the HTLC on the incoming channel. match self.do_funded_channel_callback( incoming_scid, @@ -10638,9 +10827,43 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ prev_user_channel_id, forward_info, }; + let mut fail_intercepted_htlc = || { + let htlc_source = + HTLCSource::PreviousHopData(pending_add.htlc_previous_hop_data()); + let reason = HTLCFailReason::from_failure_code( + LocalHTLCFailureReason::UnknownNextPeer, + ); + let failure_type = HTLCHandlingFailureType::InvalidForward { + requested_forward_scid: scid, + }; + failed_intercept_forwards.push(( + htlc_source, + payment_hash, + reason, + failure_type, + )); + }; match forward_htlcs.entry(scid) { hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add)); + if pending_add.forward_info.routing.should_hold_htlc() { + let intercept_id = InterceptId::from_htlc_id_and_chan_id( + prev_htlc_id, + &prev_channel_id, + &prev_counterparty_node_id, + ); + let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); + match held_htlcs.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + entry.insert(pending_add); + }, + hash_map::Entry::Occupied(_) => { + debug_assert!(false, "Should never have two HTLCs with the same scid and htlc id"); + fail_intercepted_htlc(); + }, + } + } else { + entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add)); + } }, hash_map::Entry::Vacant(entry) => { if !is_our_scid @@ -10650,9 +10873,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ scid, &self.chain_hash, ) { - let intercept_id = InterceptId( - Sha256::hash(&pending_add.forward_info.incoming_shared_secret) - .to_byte_array(), + let intercept_id = InterceptId::from_incoming_shared_secret( + &pending_add.forward_info.incoming_shared_secret, ); let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); @@ -10687,22 +10909,23 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid ); - let htlc_source = HTLCSource::PreviousHopData( - pending_add.htlc_previous_hop_data(), - ); - let reason = HTLCFailReason::from_failure_code( - LocalHTLCFailureReason::UnknownNextPeer, - ); - let failure_type = - HTLCHandlingFailureType::InvalidForward { - requested_forward_scid: scid, - }; - failed_intercept_forwards.push(( - htlc_source, - payment_hash, - reason, - failure_type, - )); + fail_intercepted_htlc(); + }, + } + } else if pending_add.forward_info.routing.should_hold_htlc() { + let intercept_id = InterceptId::from_htlc_id_and_chan_id( + prev_htlc_id, + &prev_channel_id, + &prev_counterparty_node_id, + ); + let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); + match held_htlcs.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + entry.insert(pending_add); + }, + hash_map::Entry::Occupied(_) => { + debug_assert!(false, "Should never have two HTLCs with the same scid and htlc id"); + fail_intercepted_htlc(); }, } } else { @@ -10776,7 +10999,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ #[rustfmt::skip] fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> { - let htlcs_to_fail = { + let (htlcs_to_fail, static_invoices) = { let per_peer_state = self.per_peer_state.read().unwrap(); let mut peer_state_lock = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { @@ -10792,7 +11015,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let mon_update_blocked = self.raa_monitor_updates_held( &peer_state.actions_blocking_raa_monitor_updates, msg.channel_id, *counterparty_node_id); - let (htlcs_to_fail, monitor_update_opt) = try_channel_entry!(self, peer_state, + let (htlcs_to_fail, static_invoices, monitor_update_opt) = try_channel_entry!(self, peer_state, chan.revoke_and_ack(&msg, &self.fee_estimator, &&logger, mon_update_blocked), chan_entry); if let Some(monitor_update) = monitor_update_opt { let funding_txo = funding_txo_opt @@ -10800,7 +11023,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } - htlcs_to_fail + (htlcs_to_fail, static_invoices) } else { return try_channel_entry!(self, peer_state, Err(ChannelError::close( "Got a revoke_and_ack message for an unfunded channel!".into())), chan_entry); @@ -10810,6 +11033,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }; self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id); + for (static_invoice, reply_path) in static_invoices { + let res = self.flow.enqueue_held_htlc_available(&static_invoice, HeldHtlcReplyPath::ToCounterparty { path: reply_path }); + debug_assert!(res.is_ok(), "enqueue_held_htlc_available can only fail for non-async senders"); + } Ok(()) } @@ -11022,6 +11249,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.chain_hash, &self.config.read().unwrap(), &*self.best_block.read().unwrap(), + |htlc_id| self.path_for_release_held_htlc(htlc_id, &msg.channel_id, counterparty_node_id) ); let responses = try_channel_entry!(self, peer_state, res, chan_entry); let mut channel_update = None; @@ -11485,9 +11713,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Returns whether we should remove this channel as it's just been closed. let unblock_chan = |chan: &mut Channel, pending_msg_events: &mut Vec| -> Option { + let channel_id = chan.context().channel_id(); let logger = WithChannelContext::from(&self.logger, &chan.context(), None); let node_id = chan.context().get_counterparty_node_id(); - if let Some(msgs) = chan.signer_maybe_unblocked(self.chain_hash, &&logger) { + if let Some(msgs) = chan.signer_maybe_unblocked( + self.chain_hash, &&logger, + |htlc_id| self.path_for_release_held_htlc(htlc_id, &channel_id, &node_id) + ) { if let Some(msg) = msgs.open_channel { pending_msg_events.push(MessageSendEvent::SendOpenChannel { node_id, @@ -11508,7 +11740,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } let cu_msg = msgs.commitment_update.map(|updates| MessageSendEvent::UpdateHTLCs { node_id, - channel_id: chan.context().channel_id(), + channel_id, updates, }); let raa_msg = msgs.revoke_and_ack.map(|msg| MessageSendEvent::SendRevokeAndACK { @@ -12983,6 +13215,8 @@ where for (err, counterparty_node_id) in failed_channels.drain(..) { let _ = handle_error!(self, err, counterparty_node_id); } + + let _ = self.flow.peer_disconnected(counterparty_node_id); } #[rustfmt::skip] @@ -13092,6 +13326,7 @@ where // until we have some peer connection(s) to receive onion messages over, so as a minor optimization // refresh the cache when a peer connects. self.check_refresh_async_receive_offer_cache(false); + let _ = self.flow.peer_connected(counterparty_node_id, &init_msg.features); res } @@ -14594,18 +14829,50 @@ where } fn handle_release_held_htlc(&self, _message: ReleaseHeldHtlc, context: AsyncPaymentsContext) { - let payment_id = match context { - AsyncPaymentsContext::OutboundPayment { payment_id } => payment_id, + match context { + AsyncPaymentsContext::OutboundPayment { payment_id } => { + if let Err(e) = + self.send_payment_for_static_invoice(payment_id, self.list_usable_channels()) + { + log_trace!( + self.logger, + "Failed to release held HTLC with payment id {}: {:?}", + payment_id, + e + ); + } + }, + AsyncPaymentsContext::ReleaseHeldHtlc { intercept_id } => { + let mut htlc = { + let mut pending_intercept_htlcs = + self.pending_intercepted_htlcs.lock().unwrap(); + match pending_intercept_htlcs.remove(&intercept_id) { + Some(htlc) => htlc, + None => return, + } + }; + match htlc.forward_info.routing { + PendingHTLCRouting::Forward { ref mut hold_htlc, .. } => { + debug_assert!(hold_htlc.is_some()); + *hold_htlc = None; + }, + _ => { + debug_assert!(false, "HTLC intercepts can only be forwards"); + return; + }, + } + let mut per_source_pending_forward = [( + htlc.prev_short_channel_id, + htlc.prev_counterparty_node_id, + htlc.prev_funding_outpoint, + htlc.prev_channel_id, + htlc.prev_user_channel_id, + vec![(htlc.forward_info, htlc.prev_htlc_id)], + )]; + self.forward_htlcs(&mut per_source_pending_forward); + PersistenceNotifierGuard::notify_on_drop(self); + }, _ => return, - }; - - if let Err(e) = self.send_payment_for_static_invoice(payment_id) { - log_trace!( - self.logger, - "Failed to release held HTLC with payment id {}: {:?}", - payment_id, - e - ); } } @@ -14789,6 +15056,12 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { features.set_anchor_zero_fee_commitments_optional(); } + // If we are configured to be an announced node, we are expected to be always-online and can + // advertise the htlc_hold feature. + if config.enable_htlc_hold { + features.set_htlc_hold_optional(); + } + features } @@ -14813,6 +15086,7 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (1, blinded, option), (2, short_channel_id, required), (3, incoming_cltv_expiry, option), + (5, hold_htlc, option), }, (1, Receive) => { (0, payment_data, required), @@ -15036,6 +15310,7 @@ impl Readable for HTLCSource { let mut payment_params: Option = None; let mut blinded_tail: Option = None; let mut bolt12_invoice: Option = None; + let mut hold_htlc: Option<()> = None; read_tlv_fields!(reader, { (0, session_priv, required), (1, payment_id, option), @@ -15044,6 +15319,7 @@ impl Readable for HTLCSource { (5, payment_params, (option: ReadableArgs, 0)), (6, blinded_tail, option), (7, bolt12_invoice, option), + (9, hold_htlc, option), }); if payment_id.is_none() { // For backwards compat, if there was no payment_id written, use the session_priv bytes @@ -15067,6 +15343,7 @@ impl Readable for HTLCSource { path, payment_id: payment_id.unwrap(), bolt12_invoice, + hold_htlc, }) } 1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)), @@ -15084,6 +15361,7 @@ impl Writeable for HTLCSource { ref path, payment_id, bolt12_invoice, + hold_htlc, } => { 0u8.write(writer)?; let payment_id_opt = Some(payment_id); @@ -15096,6 +15374,7 @@ impl Writeable for HTLCSource { (5, None::, option), // payment_params in LDK versions prior to 0.0.115 (6, path.blinded_tail, option), (7, bolt12_invoice, option), + (9, hold_htlc, option), }); }, HTLCSource::PreviousHopData(ref field) => { diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 5b0c37a4731..4d1683b44d3 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -4261,6 +4261,7 @@ pub fn test_default_channel_config() -> UserConfig { // feerate of 253). default_config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(50_000_000 / 253); + default_config.enable_htlc_hold = true; default_config } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 5e78049664c..ab2d730ac6b 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2288,6 +2288,7 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[0].node.handle_update_add_htlc(node_b_id, &update_add_htlc); } @@ -6543,6 +6544,7 @@ pub fn test_counterparty_raa_skip_no_crash() { next_per_commitment_point, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); assert_eq!( diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index f90b8b880bc..dc5d07c180e 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -835,6 +835,7 @@ pub fn do_test_fee_spike_buffer(cfg: Option, htlc_fails: bool) { onion_routing_packet: onion_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); @@ -935,6 +936,7 @@ pub fn do_test_fee_spike_buffer(cfg: Option, htlc_fails: bool) { next_per_commitment_point: next_local_point, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); expect_and_process_pending_htlcs(&nodes[1], false); @@ -1072,6 +1074,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { onion_routing_packet: onion_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[0].node.handle_update_add_htlc(node_b_id, &msg); @@ -1255,6 +1258,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { onion_routing_packet: onion_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); @@ -1637,6 +1641,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { onion_routing_packet: onion_packet.clone(), skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; for i in 0..50 { @@ -2242,6 +2247,7 @@ pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); @@ -2376,6 +2382,7 @@ pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { next_per_commitment_point: next_local_point, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 6d025293b9e..b4034391564 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -31,6 +31,7 @@ use bitcoin::secp256k1::ecdsa::Signature; use bitcoin::secp256k1::PublicKey; use bitcoin::{secp256k1, Transaction, Witness}; +use crate::blinded_path::message::BlindedMessagePath; use crate::blinded_path::payment::{ BlindedPaymentTlvs, ForwardTlvs, ReceiveTlvs, UnauthenticatedReceiveTlvs, }; @@ -765,6 +766,11 @@ pub struct UpdateAddHTLC { /// Provided if we are relaying or receiving a payment within a blinded path, to decrypt the onion /// routing packet and the recipient-provided encrypted payload within. pub blinding_point: Option, + /// Set to `Some` if the sender wants the receiver of this message to hold onto this HTLC until + /// receipt of a [`ReleaseHeldHtlc`] onion message from the payment recipient. + /// + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + pub hold_htlc: Option<()>, } /// An onion message to be sent to or received from a peer. @@ -883,6 +889,13 @@ pub struct RevokeAndACK { #[cfg(taproot)] /// Musig nonce the recipient should use in their next commitment signature message pub next_local_nonce: Option, + /// A list of `(htlc_id, blinded_path)`. The receiver of this message will use the blinded paths + /// as reply paths to [`HeldHtlcAvailable`] onion messages that they send to the often-offline + /// receiver of this HTLC. The `htlc_id` is used by the receiver of this message to identify which + /// held HTLC a given blinded path corresponds to. + /// + /// [`HeldHtlcAvailable`]: crate::onion_message::async_payments::HeldHtlcAvailable + pub release_htlc_message_paths: Vec<(u64, BlindedMessagePath)>, } /// An [`update_fee`] message to be sent to or received from a peer @@ -3173,7 +3186,9 @@ impl_writeable_msg!(RevokeAndACK, { channel_id, per_commitment_secret, next_per_commitment_point -}, {}); +}, { + (1, release_htlc_message_paths, optional_vec) +}); #[cfg(taproot)] impl_writeable_msg!(RevokeAndACK, { @@ -3181,6 +3196,7 @@ impl_writeable_msg!(RevokeAndACK, { per_commitment_secret, next_per_commitment_point }, { + (1, release_htlc_message_paths, optional_vec), (4, next_local_nonce, option) }); @@ -3268,7 +3284,10 @@ impl_writeable_msg!(UpdateAddHTLC, { onion_routing_packet, }, { (0, blinding_point, option), - (65537, skimmed_fee_msat, option) + (65537, skimmed_fee_msat, option), + // TODO: currently we may fail to read the `ChannelManager` if we write a new even TLV in this message + // and then downgrade. Once this is fixed, update the type here to match BOLTs PR 989. + (75537, hold_htlc, option), }); impl LengthReadable for OnionMessage { @@ -5701,6 +5720,7 @@ mod tests { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, }; let encoded_value = update_add_htlc.encode(); let target_value = >::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d32144668701144760101010101010101010101010101010101010101010101010101010101010101000c89d4ff031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202").unwrap(); @@ -5821,6 +5841,7 @@ mod tests { next_per_commitment_point: pubkey_1, #[cfg(taproot)] next_local_nonce: None, + release_htlc_message_paths: Vec::new(), }; let encoded_value = raa.encode(); let target_value = >::from_hex("02020202020202020202020202020202020202020202020202020202020202020101010101010101010101010101010101010101010101010101010101010101031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f").unwrap(); diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index 79952faca9a..0934c6c812b 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -190,6 +190,7 @@ pub(super) fn create_fwd_pending_htlc_info( onion_packet: outgoing_packet, short_channel_id, incoming_cltv_expiry: Some(msg.cltv_expiry), + hold_htlc: msg.hold_htlc, blinded: intro_node_blinding_point.or(msg.blinding_point) .map(|bp| BlindedForward { inbound_blinding_point: bp, @@ -753,6 +754,7 @@ mod tests { onion_routing_packet, skimmed_fee_msat: None, blinding_point: None, + hold_htlc: None, } } diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index d45860b0e26..ac008f29137 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -3413,6 +3413,7 @@ mod tests { first_hop_htlc_msat: 0, payment_id: PaymentId([1; 32]), bolt12_invoice: None, + hold_htlc: None, }; process_onion_failure(&ctx_full, &logger, &htlc_source, onion_error) @@ -3603,6 +3604,7 @@ mod tests { first_hop_htlc_msat: dummy_amt_msat, payment_id: PaymentId([1; 32]), bolt12_invoice: None, + hold_htlc: None, }; { @@ -3791,6 +3793,7 @@ mod tests { first_hop_htlc_msat: 0, payment_id: PaymentId([1; 32]), bolt12_invoice: None, + hold_htlc: None, }; // Iterate over all possible failure positions and check that the cases that can be attributed are. @@ -3900,6 +3903,7 @@ mod tests { first_hop_htlc_msat: 0, payment_id: PaymentId([1; 32]), bolt12_invoice: None, + hold_htlc: None, }; let decrypted_failure = process_onion_failure(&ctx_full, &logger, &htlc_source, packet); diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index a66c21ac4ef..9a828be0197 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -103,6 +103,10 @@ pub(crate) enum PendingOutboundPayment { route_params: RouteParameters, invoice_request: InvoiceRequest, static_invoice: StaticInvoice, + // Whether we should pay the static invoice asynchronously, i.e. by setting + // [`UpdateAddHTLC::hold_htlc`] so our channel counterparty(s) hold the HTLC(s) for us until the + // recipient comes online, allowing us to go offline after locking in the HTLC(s). + hold_htlcs_at_next_hop: bool, // The deadline as duration since the Unix epoch for the async recipient to come online, // after which we'll fail the payment. // @@ -828,6 +832,7 @@ pub(super) struct SendAlongPathArgs<'a> { pub invoice_request: Option<&'a InvoiceRequest>, pub bolt12_invoice: Option<&'a PaidBolt12Invoice>, pub session_priv_bytes: [u8; 32], + pub hold_htlc_at_next_hop: bool, } pub(super) struct OutboundPayments { @@ -1060,7 +1065,7 @@ impl OutboundPayments { let payment_params = Some(route_params.payment_params.clone()); let mut outbounds = self.pending_outbound_payments.lock().unwrap(); - let onion_session_privs = match outbounds.entry(payment_id) { + let (onion_session_privs, hold_htlcs_at_next_hop) = match outbounds.entry(payment_id) { hash_map::Entry::Occupied(entry) => match entry.get() { PendingOutboundPayment::InvoiceReceived { .. } => { let (retryable_payment, onion_session_privs) = Self::create_pending_payment( @@ -1068,18 +1073,21 @@ impl OutboundPayments { Some(retry_strategy), payment_params, entropy_source, best_block_height, ); *entry.into_mut() = retryable_payment; - onion_session_privs + (onion_session_privs, false) }, PendingOutboundPayment::StaticInvoiceReceived { .. } => { - let invreq = if let PendingOutboundPayment::StaticInvoiceReceived { invoice_request, .. } = entry.remove() { - invoice_request - } else { unreachable!() }; + let (invreq, hold_htlcs_at_next_hop) = + if let PendingOutboundPayment::StaticInvoiceReceived { + invoice_request, hold_htlcs_at_next_hop, .. + } = entry.remove() { + (invoice_request, hold_htlcs_at_next_hop) + } else { unreachable!() }; let (retryable_payment, onion_session_privs) = Self::create_pending_payment( payment_hash, recipient_onion.clone(), keysend_preimage, Some(invreq), Some(bolt12_invoice.clone()), &route, Some(retry_strategy), payment_params, entropy_source, best_block_height ); outbounds.insert(payment_id, retryable_payment); - onion_session_privs + (onion_session_privs, hold_htlcs_at_next_hop) }, _ => return Err(Bolt12PaymentError::DuplicateInvoice), }, @@ -1089,8 +1097,8 @@ impl OutboundPayments { let result = self.pay_route_internal( &route, payment_hash, &recipient_onion, keysend_preimage, invoice_request, Some(&bolt12_invoice), payment_id, - Some(route_params.final_value_msat), &onion_session_privs, node_signer, best_block_height, - &send_payment_along_path + Some(route_params.final_value_msat), &onion_session_privs, hold_htlcs_at_next_hop, node_signer, + best_block_height, &send_payment_along_path ); log_info!( logger, "Sending payment with id {} and hash {} returned {:?}", payment_id, @@ -1107,8 +1115,9 @@ impl OutboundPayments { } pub(super) fn static_invoice_received( - &self, invoice: &StaticInvoice, payment_id: PaymentId, features: Bolt12InvoiceFeatures, - best_block_height: u32, duration_since_epoch: Duration, entropy_source: ES, + &self, invoice: &StaticInvoice, payment_id: PaymentId, hold_htlcs_at_next_hop: bool, + features: Bolt12InvoiceFeatures, best_block_height: u32, duration_since_epoch: Duration, + entropy_source: ES, pending_events: &Mutex)>>, ) -> Result<(), Bolt12PaymentError> where @@ -1192,6 +1201,13 @@ impl OutboundPayments { RetryableSendFailure::OnionPacketSizeExceeded, )); } + + // If we expect the HTLCs for this payment to be held at our next-hop counterparty, don't + // retry the payment. In future iterations of this feature, we will send this payment via + // trampoline and the counterparty will retry on our behalf. + if hold_htlcs_at_next_hop { + *retry_strategy = Retry::Attempts(0); + } let absolute_expiry = duration_since_epoch.saturating_add(ASYNC_PAYMENT_TIMEOUT_RELATIVE_EXPIRY); @@ -1200,6 +1216,7 @@ impl OutboundPayments { keysend_preimage, retry_strategy: *retry_strategy, route_params, + hold_htlcs_at_next_hop, invoice_request: retryable_invoice_request .take() .ok_or(Bolt12PaymentError::UnexpectedInvoice)? @@ -1486,7 +1503,7 @@ impl OutboundPayments { })?; let res = self.pay_route_internal(&route, payment_hash, &recipient_onion, - keysend_preimage, None, None, payment_id, None, &onion_session_privs, node_signer, + keysend_preimage, None, None, payment_id, None, &onion_session_privs, false, node_signer, best_block_height, &send_payment_along_path); log_info!(logger, "Sending payment with id {} and hash {} returned {:?}", payment_id, payment_hash, res); @@ -1649,8 +1666,8 @@ impl OutboundPayments { } }; let res = self.pay_route_internal(&route, payment_hash, &recipient_onion, keysend_preimage, - invoice_request.as_ref(), bolt12_invoice.as_ref(), payment_id, Some(total_msat), &onion_session_privs, node_signer, - best_block_height, &send_payment_along_path); + invoice_request.as_ref(), bolt12_invoice.as_ref(), payment_id, Some(total_msat), + &onion_session_privs, false, node_signer, best_block_height, &send_payment_along_path); log_info!(logger, "Result retrying payment id {}: {:?}", &payment_id, res); if let Err(e) = res { self.handle_pay_route_err( @@ -1814,8 +1831,8 @@ impl OutboundPayments { let recipient_onion_fields = RecipientOnionFields::spontaneous_empty(); match self.pay_route_internal(&route, payment_hash, &recipient_onion_fields, - None, None, None, payment_id, None, &onion_session_privs, node_signer, best_block_height, - &send_payment_along_path + None, None, None, payment_id, None, &onion_session_privs, false, node_signer, + best_block_height, &send_payment_along_path ) { Ok(()) => Ok((payment_hash, payment_id)), Err(e) => { @@ -2063,7 +2080,7 @@ impl OutboundPayments { &self, route: &Route, payment_hash: PaymentHash, recipient_onion: &RecipientOnionFields, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, bolt12_invoice: Option<&PaidBolt12Invoice>, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: &Vec<[u8; 32]>, - node_signer: &NS, best_block_height: u32, send_payment_along_path: &F + hold_htlcs_at_next_hop: bool, node_signer: &NS, best_block_height: u32, send_payment_along_path: &F ) -> Result<(), PaymentSendFailure> where NS::Target: NodeSigner, @@ -2117,7 +2134,7 @@ impl OutboundPayments { let path_res = send_payment_along_path(SendAlongPathArgs { path: &path, payment_hash: &payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage: &keysend_preimage, invoice_request, - bolt12_invoice, + bolt12_invoice, hold_htlc_at_next_hop: hold_htlcs_at_next_hop, session_priv_bytes: *session_priv_bytes }); results.push(path_res); @@ -2186,7 +2203,7 @@ impl OutboundPayments { { self.pay_route_internal(route, payment_hash, &recipient_onion, keysend_preimage, None, None, payment_id, recv_value_msat, &onion_session_privs, - node_signer, best_block_height, &send_payment_along_path) + false, node_signer, best_block_height, &send_payment_along_path) .map_err(|e| { self.remove_outbound_if_all_failed(payment_id, &e); e }) } @@ -2759,6 +2776,12 @@ impl_writeable_tlv_based_enum_upgradable!(PendingOutboundPayment, // HTLCs are in-flight. (9, StaticInvoiceReceived) => { (0, payment_hash, required), + // Added in 0.2. If this field is set when this variant is created, the HTLCs are sent + // immediately after and the pending outbound is also immediately transitioned to Retryable. + // However, if we crash and then downgrade before the transition to Retryable, this payment will + // sit in outbounds until it either times out in `remove_stale_payments` or is manually + // abandoned. + (1, hold_htlcs_at_next_hop, required), (2, keysend_preimage, required), (4, retry_strategy, required), (6, route_params, required), @@ -3418,6 +3441,7 @@ mod tests { invoice_request: dummy_invoice_request(), static_invoice: dummy_static_invoice(), expiry_time: Duration::from_secs(absolute_expiry + 2), + hold_htlcs_at_next_hop: false }; outbounds.insert(payment_id, outbound); core::mem::drop(outbounds); @@ -3468,6 +3492,7 @@ mod tests { invoice_request: dummy_invoice_request(), static_invoice: dummy_static_invoice(), expiry_time: now(), + hold_htlcs_at_next_hop: false, }; outbounds.insert(payment_id, outbound); core::mem::drop(outbounds); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 0af0463134e..3dab164e619 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -5017,6 +5017,7 @@ fn peel_payment_onion_custom_tlvs() { skimmed_fee_msat: None, onion_routing_packet, blinding_point: None, + hold_htlc: None, }; let peeled_onion = crate::ln::onion_payment::peel_payment_onion( &update_add, diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index 38f472b2f5b..7129726d6aa 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -32,7 +32,7 @@ use crate::prelude::*; use crate::chain::BestBlock; use crate::ln::channel_state::ChannelDetails; -use crate::ln::channelmanager::{PaymentId, CLTV_FAR_FAR_AWAY}; +use crate::ln::channelmanager::{InterceptId, PaymentId, CLTV_FAR_FAR_AWAY}; use crate::ln::inbound_payment; use crate::offers::async_receive_offer_cache::AsyncReceiveOfferCache; use crate::offers::invoice::{ @@ -52,7 +52,7 @@ use crate::onion_message::async_payments::{ StaticInvoicePersisted, }; use crate::onion_message::messenger::{ - Destination, MessageRouter, MessageSendInstructions, Responder, + Destination, MessageRouter, MessageSendInstructions, Responder, PADDED_PATH_LENGTH, }; use crate::onion_message::offers::OffersMessage; use crate::onion_message::packet::OnionMessageContents; @@ -61,6 +61,7 @@ use crate::sign::{EntropySource, NodeSigner, ReceiveAuthKey}; use crate::offers::static_invoice::{StaticInvoice, StaticInvoiceBuilder}; use crate::sync::{Mutex, RwLock}; +use crate::types::features::InitFeatures; use crate::types::payment::{PaymentHash, PaymentSecret}; use crate::util::ser::Writeable; @@ -98,6 +99,7 @@ where pending_async_payments_messages: Mutex>, async_receive_offer_cache: Mutex, + peers_cache: Mutex>, #[cfg(feature = "dnssec")] pub(crate) hrn_resolver: OMNameResolver, @@ -130,6 +132,7 @@ where pending_offers_messages: Mutex::new(Vec::new()), pending_async_payments_messages: Mutex::new(Vec::new()), + peers_cache: Mutex::new(Vec::new()), #[cfg(feature = "dnssec")] hrn_resolver: OMNameResolver::new(current_timestamp, best_block.height), @@ -409,6 +412,26 @@ pub enum InvreqResponseInstructions { }, } +/// Parameters for the reply path to a [`HeldHtlcAvailable`] onion message. +pub enum HeldHtlcReplyPath { + /// The reply path to the [`HeldHtlcAvailable`] message should terminate at our node. + ToUs { + /// The id of the payment. + payment_id: PaymentId, + /// The peers to use when creating this reply path. + peers: Vec, + }, + /// The reply path to the [`HeldHtlcAvailable`] message should terminate at our next-hop channel + /// counterparty, as they are holding our HTLC until they receive the corresponding + /// [`ReleaseHeldHtlc`] message. + /// + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + ToCounterparty { + /// The blinded path provided to us by our counterparty. + path: BlindedMessagePath, + }, +} + impl OffersMessageFlow where MR::Target: MessageRouter, @@ -1128,14 +1151,19 @@ where /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc /// [`supports_onion_messages`]: crate::types::features::Features::supports_onion_messages pub fn enqueue_held_htlc_available( - &self, invoice: &StaticInvoice, payment_id: PaymentId, peers: Vec, + &self, invoice: &StaticInvoice, reply_path_params: HeldHtlcReplyPath, ) -> Result<(), Bolt12SemanticError> { - let context = - MessageContext::AsyncPayments(AsyncPaymentsContext::OutboundPayment { payment_id }); - - let reply_paths = self - .create_blinded_paths(peers, context) - .map_err(|_| Bolt12SemanticError::MissingPaths)?; + let reply_paths = match reply_path_params { + HeldHtlcReplyPath::ToUs { payment_id, peers } => { + let context = + MessageContext::AsyncPayments(AsyncPaymentsContext::OutboundPayment { + payment_id, + }); + self.create_blinded_paths(peers, context) + .map_err(|_| Bolt12SemanticError::MissingPaths)? + }, + HeldHtlcReplyPath::ToCounterparty { path } => vec![path], + }; let mut pending_async_payments_messages = self.pending_async_payments_messages.lock().unwrap(); @@ -1151,6 +1179,41 @@ where Ok(()) } + /// If we are holding an HTLC on behalf of an often-offline sender, this method allows us to + /// create a path for the sender to use as the reply path when they send the recipient a + /// [`HeldHtlcAvailable`] onion message, so the recipient's [`ReleaseHeldHtlc`] response will be + /// received to our node. + /// + /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc + pub fn path_for_release_held_htlc( + &self, intercept_id: InterceptId, entropy: ES, + ) -> BlindedMessagePath + where + ES::Target: EntropySource, + { + let peers = self.peers_cache.lock().unwrap().clone(); + let context = + MessageContext::AsyncPayments(AsyncPaymentsContext::ReleaseHeldHtlc { intercept_id }); + if let Ok(mut paths) = self.create_blinded_paths(peers, context.clone()) { + if let Some(path) = paths.pop() { + return path; + } + } + + // If the `MessageRouter` fails on blinded path creation, fall back to creating a 1-hop blinded + // path. + let num_dummy_hops = PADDED_PATH_LENGTH.saturating_sub(1); + BlindedMessagePath::new_with_dummy_hops( + &[], + self.get_our_node_id(), + num_dummy_hops, + self.receive_auth_key, + context, + &*entropy, + &self.secp_ctx, + ) + } + /// Enqueues the created [`DNSSECQuery`] to be sent to the counterparty. /// /// # Peers @@ -1627,4 +1690,40 @@ where pub fn writeable_async_receive_offer_cache(&self) -> Vec { self.async_receive_offer_cache.encode() } + + /// Indicates that a peer was connected to our node. Useful for the [`OffersMessageFlow`] to keep + /// track of which peers are connected, which allows for methods that can create blinded paths + /// without requiring a fresh set of [`MessageForwardNode`]s to be passed in. + /// + /// MUST be called by always-online nodes that support holding HTLCs on behalf of often-offline + /// senders. + /// + /// Errors if the peer does not support onion messages. + pub fn peer_connected( + &self, peer_node_id: PublicKey, features: &InitFeatures, + ) -> Result<(), ()> { + if !features.supports_onion_messages() { + return Err(()); + } + + let mut peers_cache = self.peers_cache.lock().unwrap(); + let peer = MessageForwardNode { node_id: peer_node_id, short_channel_id: None }; + peers_cache.push(peer); + + Ok(()) + } + + /// Indicates that a peer was disconnected from our node. See [`Self::peer_connected`]. + /// + /// Errors if the peer is unknown. + pub fn peer_disconnected(&self, peer_node_id: PublicKey) -> Result<(), ()> { + let mut peers_cache = self.peers_cache.lock().unwrap(); + let peer_idx = match peers_cache.iter().position(|peer| peer.node_id == peer_node_id) { + Some(idx) => idx, + None => return Err(()), + }; + peers_cache.swap_remove(peer_idx); + + Ok(()) + } } diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index a0c74673799..f7c266af9a6 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -935,6 +935,29 @@ pub struct UserConfig { /// /// Default value: `false` pub enable_dual_funded_channels: bool, + /// LDK supports a feature for always-online nodes such that these nodes can hold onto an HTLC + /// from an often-offline channel peer until the often-offline payment recipient sends an onion + /// message telling the always-online node to release the HTLC. If this is set to `true`, our node + /// will carry out this feature for channel peers that request it. + /// + /// This should only be set to `true` for nodes which expect to be online reliably. + /// + /// Setting this to `true` may break backwards compatibility with LDK versions < 0.2. + /// + /// Default value: `false` + pub enable_htlc_hold: bool, + /// If this is set to true, then if we as an often-offline payer receive a [`StaticInvoice`] to + /// pay, we will attempt to hold the corresponding outbound HTLCs with our next-hop channel + /// counterparty(s) that support the `htlc_hold` feature. This allows our node to go offline once + /// the HTLCs are locked in even though the recipient may not yet be online to receive them. + /// + /// This option only applies if we are a private node, and will be ignored if we are an announced + /// node that is expected to be online at all times. + /// + /// Default value: `true` + /// + /// [`StaticInvoice`]: crate::offers::static_invoice::StaticInvoice + pub hold_outbound_htlcs_at_next_hop: bool, } impl Default for UserConfig { @@ -949,6 +972,8 @@ impl Default for UserConfig { accept_intercept_htlcs: false, manually_handle_bolt12_invoices: false, enable_dual_funded_channels: false, + enable_htlc_hold: false, + hold_outbound_htlcs_at_next_hop: true, } } } @@ -969,6 +994,8 @@ impl Readable for UserConfig { accept_intercept_htlcs: Readable::read(reader)?, manually_handle_bolt12_invoices: Readable::read(reader)?, enable_dual_funded_channels: Readable::read(reader)?, + hold_outbound_htlcs_at_next_hop: Readable::read(reader)?, + enable_htlc_hold: Readable::read(reader)?, }) } } diff --git a/lightning/src/util/ser_macros.rs b/lightning/src/util/ser_macros.rs index ea7a3e8a2a2..647e7c77a6c 100644 --- a/lightning/src/util/ser_macros.rs +++ b/lightning/src/util/ser_macros.rs @@ -700,7 +700,7 @@ macro_rules! impl_writeable_msg { impl $crate::util::ser::Writeable for $st { fn write(&self, w: &mut W) -> Result<(), $crate::io::Error> { $( self.$field.write(w)?; )* - $crate::encode_tlv_stream!(w, {$(($type, self.$tlvfield.as_ref(), $fieldty)),*}); + $crate::encode_tlv_stream!(w, {$(($type, &self.$tlvfield, $fieldty)),*}); Ok(()) } } @@ -713,7 +713,7 @@ macro_rules! impl_writeable_msg { $crate::decode_tlv_stream!(r, {$(($type, $tlvfield, $fieldty)),*}); Ok(Self { $($field,)* - $($tlvfield),* + $($tlvfield: $crate::_init_tlv_based_struct_field!($tlvfield, $fieldty)),* }) } }