diff --git a/CHANGELOG.md b/CHANGELOG.md index a51c5fda8bd..5c29e63dc57 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# 0.3 + +## Backwards Compatibility +* Downgrade is not possible while the node has in-flight trampoline forwards. + # 0.2 - Dec 2, 2025 - "Natively Asynchronous Splicing" ## API Updates diff --git a/lightning-liquidity/tests/lsps2_integration_tests.rs b/lightning-liquidity/tests/lsps2_integration_tests.rs index e4ace27b715..92052891dd5 100644 --- a/lightning-liquidity/tests/lsps2_integration_tests.rs +++ b/lightning-liquidity/tests/lsps2_integration_tests.rs @@ -1338,14 +1338,14 @@ fn client_trusts_lsp_end_to_end_test() { let total_fee_msat = match service_events[0].clone() { Event::PaymentForwarded { - prev_node_id, - next_node_id, + ref prev_htlcs, + ref next_htlcs, skimmed_fee_msat, total_fee_earned_msat, .. } => { - assert_eq!(prev_node_id, Some(payer_node_id)); - assert_eq!(next_node_id, Some(client_node_id)); + assert_eq!(prev_htlcs[0].node_id, Some(payer_node_id)); + assert_eq!(next_htlcs[0].node_id, Some(client_node_id)); service_handler.payment_forwarded(channel_id, skimmed_fee_msat.unwrap_or(0)).unwrap(); Some(total_fee_earned_msat.unwrap() - skimmed_fee_msat.unwrap()) }, diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 515a3dc5f1d..ee93211af47 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -2868,6 +2868,7 @@ impl ChannelMonitorImpl { let outbound_payment = match source { None => panic!("Outbound HTLCs should have a source"), Some(&HTLCSource::PreviousHopData(_)) => false, + Some(&HTLCSource::TrampolineForward { .. }) => false, Some(&HTLCSource::OutboundRoute { .. }) => true, }; return Some(Balance::MaybeTimeoutClaimableHTLC { @@ -3080,6 +3081,7 @@ impl ChannelMonitor { let outbound_payment = match source { None => panic!("Outbound HTLCs should have a source"), Some(HTLCSource::PreviousHopData(_)) => false, + Some(HTLCSource::TrampolineForward { .. }) => false, Some(HTLCSource::OutboundRoute { .. }) => true, }; if outbound_payment { diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index d97ae6097b6..8ecd9419732 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -575,6 +575,14 @@ pub enum HTLCHandlingFailureType { /// The payment hash of the payment we attempted to process. payment_hash: PaymentHash, }, + /// We were responsible for pathfinding and forwarding of a trampoline payment, but failed to + /// do so. An example of such an instance is when we can't find a route to the specified + /// trampoline destination within. + TrampolineForward { + /// The set of HTLCs dispatched by our node in an attempt to complete the trampoline forward + /// which have failed. + attempted_htlcs: Vec, + }, } impl_writeable_tlv_based_enum_upgradable!(HTLCHandlingFailureType, @@ -592,6 +600,9 @@ impl_writeable_tlv_based_enum_upgradable!(HTLCHandlingFailureType, (4, Receive) => { (0, payment_hash, required), }, + (5, TrampolineForward) => { + (0, attempted_htlcs, required_vec), + }, ); /// The reason for HTLC failures in [`Event::HTLCHandlingFailed`]. @@ -729,6 +740,25 @@ pub enum InboundChannelFunds { DualFunded, } +/// Identifies the channel and peer committed to a HTLC, used for both incoming and outgoing HTLCs. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct HTLCLocator { + /// The channel that the HTLC was sent or received on. + pub channel_id: ChannelId, + + /// The `user_channel_id` for `channel_id`. + pub user_channel_id: Option, + + /// The public key identify of the node that the HTLC was sent to or received from. + pub node_id: Option, +} + +impl_writeable_tlv_based!(HTLCLocator, { + (1, channel_id, required), + (3, user_channel_id, option), + (5, node_id, option), +}); + /// An Event which you should probably take some action in response to. /// /// Note that while Writeable and Readable are implemented for Event, you probably shouldn't use @@ -1316,34 +1346,14 @@ pub enum Event { /// This event will eventually be replayed after failures-to-handle (i.e., the event handler /// returning `Err(ReplayEvent ())`) and will be persisted across restarts. PaymentForwarded { - /// The channel id of the incoming channel between the previous node and us. - /// - /// This is only `None` for events generated or serialized by versions prior to 0.0.107. - prev_channel_id: Option, - /// The channel id of the outgoing channel between the next node and us. - /// - /// This is only `None` for events generated or serialized by versions prior to 0.0.107. - next_channel_id: Option, - /// The `user_channel_id` of the incoming channel between the previous node and us. - /// - /// This is only `None` for events generated or serialized by versions prior to 0.0.122. - prev_user_channel_id: Option, - /// The `user_channel_id` of the outgoing channel between the next node and us. - /// - /// This will be `None` if the payment was settled via an on-chain transaction. See the - /// caveat described for the `total_fee_earned_msat` field. Moreover it will be `None` for - /// events generated or serialized by versions prior to 0.0.122. - next_user_channel_id: Option, - /// The node id of the previous node. - /// - /// This is only `None` for HTLCs received prior to 0.1 or for events serialized by - /// versions prior to 0.1 - prev_node_id: Option, - /// The node id of the next node. - /// - /// This is only `None` for HTLCs received prior to 0.1 or for events serialized by - /// versions prior to 0.1 - next_node_id: Option, + /// The set of HTLCs forwarded to our node that will be claimed by this forward. Contains a + /// single HTLC for source-routed payments, and may contain multiple HTLCs when we acted as + /// a trampoline router, responsible for pathfinding within the route. + prev_htlcs: Vec, + /// The set of HTLCs forwarded by our node that have been claimed by this forward. Contains + /// a single HTLC for regular source-routed payments, and may contain multiple HTLCs when + /// we acted as a trampoline router, responsible for pathfinding within the route. + next_htlcs: Vec, /// The total fee, in milli-satoshis, which was earned as a result of the payment. /// /// Note that if we force-closed the channel over which we forwarded an HTLC while the HTLC @@ -1664,8 +1674,9 @@ pub enum Event { /// This event will eventually be replayed after failures-to-handle (i.e., the event handler /// returning `Err(ReplayEvent ())`) and will be persisted across restarts. HTLCHandlingFailed { - /// The channel over which the HTLC was received. - prev_channel_id: ChannelId, + /// The channel(s) over which the HTLC(s) was received. May contain multiple entries for + /// trampoline forwards. + prev_channel_ids: Vec, /// The type of HTLC handling that failed. failure_type: HTLCHandlingFailureType, /// The reason that the HTLC failed. @@ -2027,12 +2038,8 @@ impl Writeable for Event { }); }, &Event::PaymentForwarded { - prev_channel_id, - next_channel_id, - prev_user_channel_id, - next_user_channel_id, - prev_node_id, - next_node_id, + ref prev_htlcs, + ref next_htlcs, total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx, @@ -2041,15 +2048,20 @@ impl Writeable for Event { 7u8.write(writer)?; write_tlv_fields!(writer, { (0, total_fee_earned_msat, option), - (1, prev_channel_id, option), + // Type 1 was prev_channel_id in 0.2 and earlier. (2, claim_from_onchain_tx, required), - (3, next_channel_id, option), + // Type 3 was next_channel_id in 0.2 and earlier. (5, outbound_amount_forwarded_msat, option), (7, skimmed_fee_msat, option), - (9, prev_user_channel_id, option), - (11, next_user_channel_id, option), - (13, prev_node_id, option), - (15, next_node_id, option), + // Type 9 was prev_user_channel_id in 0.2 and earlier. + // Type 11 was next_user_channel_id in 0.2 and earlier. + // Type 13 was prev_node_id in 0.2 and earlier. + // Type 15 was next_node_id in 0.2 and earlier. + // HTLCs are written as required, rather than required_vec, so that they can be + // deserialized using default_value to fill in legacy fields which expects + // LengthReadable (requried_vec is WithoutLength). + (17, *prev_htlcs, required), + (19, *next_htlcs, required), }); }, &Event::ChannelClosed { @@ -2197,15 +2209,16 @@ impl Writeable for Event { }) }, &Event::HTLCHandlingFailed { - ref prev_channel_id, + ref prev_channel_ids, ref failure_type, ref failure_reason, } => { 25u8.write(writer)?; write_tlv_fields!(writer, { - (0, prev_channel_id, required), + // Type 0 was prev_channel_id in 0.2 and earlier. (1, failure_reason, option), (2, failure_type, required), + (3, *prev_channel_ids, required), }) }, &Event::BumpTransaction(ref event) => { @@ -2544,35 +2557,63 @@ impl MaybeReadable for Event { }, 7u8 => { let mut f = || { - let mut prev_channel_id = None; - let mut next_channel_id = None; - let mut prev_user_channel_id = None; - let mut next_user_channel_id = None; - let mut prev_node_id = None; - let mut next_node_id = None; + // Legacy values that have been replaced by prev_htlcs and next_htlcs. + let mut prev_channel_id_legacy = None; + let mut next_channel_id_legacy = None; + let mut prev_user_channel_id_legacy = None; + let mut next_user_channel_id_legacy = None; + let mut prev_node_id_legacy = None; + let mut next_node_id_legacy = None; + let mut total_fee_earned_msat = None; let mut skimmed_fee_msat = None; let mut claim_from_onchain_tx = false; let mut outbound_amount_forwarded_msat = None; + let mut prev_htlcs = vec![]; + let mut next_htlcs = vec![]; read_tlv_fields!(reader, { (0, total_fee_earned_msat, option), - (1, prev_channel_id, option), + (1, prev_channel_id_legacy, option), (2, claim_from_onchain_tx, required), - (3, next_channel_id, option), + (3, next_channel_id_legacy, option), (5, outbound_amount_forwarded_msat, option), (7, skimmed_fee_msat, option), - (9, prev_user_channel_id, option), - (11, next_user_channel_id, option), - (13, prev_node_id, option), - (15, next_node_id, option), + (9, prev_user_channel_id_legacy, option), + (11, next_user_channel_id_legacy, option), + (13, prev_node_id_legacy, option), + (15, next_node_id_legacy, option), + // We can't unwrap prev_channel_id_legacy or next_channel_id_legacy here + // because default_value eagerly evaluates the default, so events that do + // not have legacy fields would fail. We settle for setting a zero ChannelID + // and replacing it below. + (17, prev_htlcs, (default_value, vec![HTLCLocator{ + channel_id: ChannelId::new_zero(), + user_channel_id: prev_user_channel_id_legacy, + node_id: prev_node_id_legacy, + }])), + (19, next_htlcs, (default_value, vec![HTLCLocator{ + channel_id: ChannelId::new_zero(), + user_channel_id: next_user_channel_id_legacy, + node_id: next_node_id_legacy, + }])), }); + + // If dealing with legacy serialization, we can be confident that we'll + // replace the zero value placeholders above because these fields were only + // None for events serialized before 0.0.107. We do not allow nodes with pending + // forwards to be upgraded directly to 0.1 from versions 0.0.123 or earlier, + // so we should always have Some here when reading legacy serialization. + if let Some(prev_channel_id) = prev_channel_id_legacy { + prev_htlcs[0].channel_id = prev_channel_id; + } + + if let Some(next_channel_id) = next_channel_id_legacy { + next_htlcs[0].channel_id = next_channel_id; + } + Ok(Some(Event::PaymentForwarded { - prev_channel_id, - next_channel_id, - prev_user_channel_id, - next_user_channel_id, - prev_node_id, - next_node_id, + prev_htlcs, + next_htlcs, total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx, @@ -2761,13 +2802,19 @@ impl MaybeReadable for Event { }, 25u8 => { let mut f = || { - let mut prev_channel_id = ChannelId::new_zero(); + let mut prev_channel_id_legacy = None; let mut failure_reason = None; let mut failure_type_opt = UpgradableRequired(None); + let mut prev_channel_ids = vec![]; read_tlv_fields!(reader, { - (0, prev_channel_id, required), + (0, prev_channel_id_legacy, option), (1, failure_reason, option), (2, failure_type_opt, upgradable_required), + // If our new prev_channel_ids field is not present, the legacy field + // must be because it used to be required. + (3, prev_channel_ids, (default_value, vec![ + prev_channel_id_legacy.ok_or(msgs::DecodeError::InvalidValue)?, + ])), }); // If a legacy HTLCHandlingFailureType::UnknownNextHop was written, upgrade @@ -2781,8 +2828,9 @@ impl MaybeReadable for Event { }); failure_reason = Some(LocalHTLCFailureReason::UnknownNextPeer.into()); } + Ok(Some(Event::HTLCHandlingFailed { - prev_channel_id, + prev_channel_ids, failure_type: _init_tlv_based_struct_field!( failure_type_opt, upgradable_required diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f3399ff8787..34e5c0ef917 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -718,6 +718,7 @@ impl Default for OptionalOfferPaymentParams { pub(crate) enum SentHTLCId { PreviousHopData { prev_outbound_scid_alias: u64, htlc_id: u64 }, OutboundRoute { session_priv: [u8; SECRET_KEY_SIZE] }, + TrampolineForward { session_priv: [u8; SECRET_KEY_SIZE] }, } impl SentHTLCId { pub(crate) fn from_source(source: &HTLCSource) -> Self { @@ -726,6 +727,9 @@ impl SentHTLCId { prev_outbound_scid_alias: hop_data.prev_outbound_scid_alias, htlc_id: hop_data.htlc_id, }, + HTLCSource::TrampolineForward { session_priv, .. } => { + Self::TrampolineForward { session_priv: session_priv.secret_bytes() } + }, HTLCSource::OutboundRoute { session_priv, .. } => { Self::OutboundRoute { session_priv: session_priv.secret_bytes() } }, @@ -740,6 +744,9 @@ impl_writeable_tlv_based_enum!(SentHTLCId, (2, OutboundRoute) => { (0, session_priv, required), }, + (4, TrampolineForward) => { + (0, session_priv, required), + }, ); // (src_outbound_scid_alias, src_counterparty_node_id, src_funding_outpoint, src_chan_id, src_user_chan_id) @@ -749,6 +756,8 @@ type PerSourcePendingForward = type FailedHTLCForward = (HTLCSource, PaymentHash, HTLCFailReason, HTLCHandlingFailureType); mod fuzzy_channelmanager { + use crate::routing::router::RouteHop; + use super::*; /// Tracks the inbound corresponding to an outbound HTLC @@ -756,6 +765,16 @@ mod fuzzy_channelmanager { #[derive(Clone, Debug, PartialEq, Eq)] pub enum HTLCSource { PreviousHopData(HTLCPreviousHopData), + TrampolineForward { + /// We might be forwarding an incoming payment that was received over MPP, and therefore + /// need to store the vector of corresponding `HTLCPreviousHopData` values. + previous_hop_data: Vec, + incoming_trampoline_shared_secret: [u8; 32], + hops: Vec, + /// In order to decode inter-Trampoline errors, we need to store the session_priv key + /// given we're effectively creating new outbound routes. + session_priv: SecretKey, + }, OutboundRoute { path: Path, session_priv: SecretKey, @@ -790,6 +809,16 @@ mod fuzzy_channelmanager { /// channel remains unconfirmed for too long. pub cltv_expiry: Option, } + + impl From<&HTLCPreviousHopData> for events::HTLCLocator { + fn from(value: &HTLCPreviousHopData) -> Self { + events::HTLCLocator { + channel_id: value.channel_id, + user_channel_id: value.user_channel_id, + node_id: value.counterparty_node_id, + } + } + } } #[cfg(fuzzing)] pub use self::fuzzy_channelmanager::*; @@ -818,6 +847,18 @@ impl core::hash::Hash for HTLCSource { first_hop_htlc_msat.hash(hasher); bolt12_invoice.hash(hasher); }, + HTLCSource::TrampolineForward { + previous_hop_data, + incoming_trampoline_shared_secret, + hops, + session_priv, + } => { + 2u8.hash(hasher); + previous_hop_data.hash(hasher); + incoming_trampoline_shared_secret.hash(hasher); + hops.hash(hasher); + session_priv[..].hash(hasher); + }, } } } @@ -1320,8 +1361,8 @@ pub(crate) enum MonitorUpdateCompletionAction { /// completes a monitor update containing the payment preimage. In that case, after the inbound /// edge completes, we will surface an [`Event::PaymentForwarded`] as well as unblock the /// outbound edge. - EmitEventAndFreeOtherChannel { - event: events::Event, + EmitEventOptionAndFreeOtherChannel { + event: Option, downstream_counterparty_and_funding_outpoint: Option, }, /// Indicates we should immediately resume the operation of another channel, unless there is @@ -1331,8 +1372,8 @@ pub(crate) enum MonitorUpdateCompletionAction { /// This is usually generated when we've forwarded an HTLC and want to block the outbound edge /// from completing a monitor update which removes the payment preimage until the inbound edge /// completes a monitor update containing the payment preimage. However, we use this variant - /// instead of [`Self::EmitEventAndFreeOtherChannel`] when we discover that the claim was in - /// fact duplicative and we simply want to resume the outbound edge channel immediately. + /// instead of [`Self::EmitEventOptionAndFreeOtherChannel`] when we discover that the claim was + /// in fact duplicative and we simply want to resume the outbound edge channel immediately. /// /// This variant should thus never be written to disk, as it is processed inline rather than /// stored for later processing. @@ -1355,8 +1396,11 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction, (4, blocking_action, upgradable_required), (5, downstream_channel_id, required), }, - (2, EmitEventAndFreeOtherChannel) => { - (0, event, upgradable_required), + (2, EmitEventOptionAndFreeOtherChannel) => { + // LDK prior to 0.3 required this field. It will not be present for trampoline payments + // with multiple incoming HTLCS, so nodes cannot downgrade while trampoline payments + // are in the process of being resolved. + (0, event, upgradable_option), // LDK prior to 0.0.116 did not have this field as the monitor update application order was // required by clients. If we downgrade to something prior to 0.0.116 this may result in // monitor updates which aren't properly blocked or resumed, however that's fine - we don't @@ -7228,7 +7272,7 @@ where .push(failure); self.pending_events.lock().unwrap().push_back(( events::Event::HTLCHandlingFailed { - prev_channel_id: incoming_channel_id, + prev_channel_ids: vec![incoming_channel_id], failure_type, failure_reason: Some(failure_reason), }, @@ -8689,6 +8733,19 @@ where debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread); } + let push_forward_htlcs_failure = + |prev_outbound_scid_alias: u64, failure: HTLCForwardInfo| { + let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); + match forward_htlcs.entry(prev_outbound_scid_alias) { + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().push(failure); + }, + hash_map::Entry::Vacant(entry) => { + entry.insert(vec![failure]); + }, + } + }; + //TODO: There is a timing attack here where if a node fails an HTLC back to us they can //identify whether we sent it or not based on the (I presume) very different runtime //between the branches here. We should make this async and move it into the forward HTLCs @@ -8747,49 +8804,81 @@ where if blinded_failure.is_some() { "blinded " } else { "" }, onion_error ); - // In case of trampoline + phantom we prioritize the trampoline failure over the phantom failure. - // TODO: Correctly wrap the error packet twice if failing back a trampoline + phantom HTLC. - let secondary_shared_secret = trampoline_shared_secret.or(*phantom_shared_secret); - let failure = match blinded_failure { - Some(BlindedFailure::FromIntroductionNode) => { - let blinded_onion_error = HTLCFailReason::reason( - LocalHTLCFailureReason::InvalidOnionBlinding, - vec![0; 32], - ); - let err_packet = blinded_onion_error.get_encrypted_failure_packet( - incoming_packet_shared_secret, - &secondary_shared_secret, - ); - HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet } - }, - Some(BlindedFailure::FromBlindedNode) => HTLCForwardInfo::FailMalformedHTLC { - htlc_id: *htlc_id, - failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), - sha256_of_onion: [0; 32], - }, - None => { - let err_packet = onion_error.get_encrypted_failure_packet( - incoming_packet_shared_secret, - &secondary_shared_secret, - ); - HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet } - }, - }; - let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); - match forward_htlcs.entry(*prev_outbound_scid_alias) { - hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(failure); - }, - hash_map::Entry::Vacant(entry) => { - entry.insert(vec![failure]); + push_forward_htlcs_failure( + *prev_outbound_scid_alias, + get_htlc_forward_failure( + blinded_failure, + onion_error, + incoming_packet_shared_secret, + trampoline_shared_secret, + phantom_shared_secret, + *htlc_id, + ), + ); + + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push_back(( + events::Event::HTLCHandlingFailed { + prev_channel_ids: vec![*channel_id], + failure_type, + failure_reason: Some(onion_error.into()), }, + None, + )); + }, + HTLCSource::TrampolineForward { + previous_hop_data, + incoming_trampoline_shared_secret, + .. + } => { + // TODO: what do we want to do with this given we do not wish to propagate it directly? + let _decoded_onion_failure = + onion_error.decode_onion_failure(&self.secp_ctx, &self.logger, &source); + let incoming_trampoline_shared_secret = Some(*incoming_trampoline_shared_secret); + + // TODO: when we receive a failure from a single outgoing trampoline HTLC, we don't + // necessarily want to fail all of our incoming HTLCs back yet. We may have other + // outgoing HTLCs that need to resolve first. This will be tracked in our + // pending_outbound_payments in a followup. + for current_hop_data in previous_hop_data { + let incoming_packet_shared_secret = + ¤t_hop_data.incoming_packet_shared_secret; + let channel_id = ¤t_hop_data.channel_id; + let short_channel_id = ¤t_hop_data.prev_outbound_scid_alias; + let htlc_id = ¤t_hop_data.htlc_id; + let blinded_failure = ¤t_hop_data.blinded_failure; + log_trace!( + WithContext::from(&self.logger, None, Some(*channel_id), Some(*payment_hash)), + "Failing {}HTLC with payment_hash {} backwards from us following Trampoline forwarding failure: {:?}", + if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error + ); + let onion_error = HTLCFailReason::reason( + LocalHTLCFailureReason::TemporaryTrampolineFailure, + Vec::new(), + ); + push_forward_htlcs_failure( + *short_channel_id, + get_htlc_forward_failure( + blinded_failure, + &onion_error, + incoming_packet_shared_secret, + &incoming_trampoline_shared_secret, + &None, + *htlc_id, + ), + ); } - mem::drop(forward_htlcs); + + // We only want to emit a single event for trampoline failures, so we do it once + // we've failed back all of our incoming HTLCs. let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back(( events::Event::HTLCHandlingFailed { - prev_channel_id: *channel_id, + prev_channel_ids: previous_hop_data + .iter() + .map(|prev| prev.channel_id) + .collect(), failure_type, failure_reason: Some(onion_error.into()), }, @@ -9028,6 +9117,133 @@ where } } + /// Claims funds for a forwarded HTLC where we are an intermediate hop. + /// + /// Processes attribution data, calculates fees earned, and emits a [`Event::PaymentForwarded`] + /// event upon successful claim. `make_payment_forwarded_event` is responsible for creating a + /// single [`Event::PaymentForwarded`] event that represents the forward. + fn claim_funds_from_htlc_forward_hop( + &self, payment_preimage: PaymentPreimage, + make_payment_forwarded_event: impl Fn(Option) -> Option, + startup_replay: bool, next_channel_counterparty_node_id: PublicKey, + next_channel_outpoint: OutPoint, next_channel_id: ChannelId, hop_data: HTLCPreviousHopData, + attribution_data: Option, send_timestamp: Option, + ) { + let _prev_channel_id = hop_data.channel_id; + let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); + + // Obtain hold time, if available. + let hold_time = hold_time_since(send_timestamp).unwrap_or(0); + + // If attribution data was received from downstream, we shift it and get it ready for adding our hold + // time. Note that fulfilled HTLCs take a fast path to the incoming side. We don't need to wait for RAA + // to record the hold time like we do for failed HTLCs. + let attribution_data = process_fulfill_attribution_data( + attribution_data, + &hop_data.incoming_packet_shared_secret, + hold_time, + ); + + #[cfg(test)] + let claiming_chan_funding_outpoint = hop_data.outpoint; + self.claim_funds_from_hop( + hop_data, + payment_preimage, + None, + Some(attribution_data), + |htlc_claim_value_msat, definitely_duplicate| { + let chan_to_release = Some(EventUnblockedChannel { + counterparty_node_id: next_channel_counterparty_node_id, + funding_txo: next_channel_outpoint, + channel_id: next_channel_id, + blocking_action: completed_blocker, + }); + + if definitely_duplicate && startup_replay { + // On startup we may get redundant claims which are related to + // monitor updates still in flight. In that case, we shouldn't + // immediately free, but instead let that monitor update complete + // in the background. + #[cfg(test)] + { + let per_peer_state = self.per_peer_state.deadlocking_read(); + // The channel we'd unblock should already be closed, or... + let channel_closed = per_peer_state + .get(&next_channel_counterparty_node_id) + .map(|lck| lck.deadlocking_lock()) + .map(|peer| !peer.channel_by_id.contains_key(&next_channel_id)) + .unwrap_or(true); + let background_events = self.pending_background_events.lock().unwrap(); + // there should be a `BackgroundEvent` pending... + let matching_bg_event = + background_events.iter().any(|ev| { + match ev { + // to apply a monitor update that blocked the claiming channel, + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + funding_txo, + update, + .. + } => { + if *funding_txo == claiming_chan_funding_outpoint { + assert!( + update.updates.iter().any(|upd| { + if let ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage: update_preimage, .. + } = upd { + payment_preimage == *update_preimage + } else { false } + }), + "{:?}", + update + ); + true + } else { + false + } + }, + // or the monitor update has completed and will unblock + // immediately once we get going. + BackgroundEvent::MonitorUpdatesComplete { + channel_id, .. + } => *channel_id == _prev_channel_id, + } + }); + assert!(channel_closed || matching_bg_event, "{:?}", *background_events); + } + (None, None) + } else if definitely_duplicate { + if let Some(other_chan) = chan_to_release { + ( + Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately { + downstream_counterparty_node_id: other_chan.counterparty_node_id, + downstream_channel_id: other_chan.channel_id, + blocking_action: other_chan.blocking_action, + }), + None, + ) + } else { + (None, None) + } + } else { + let event = make_payment_forwarded_event(htlc_claim_value_msat); + if let Some(ref payment_forwarded) = event { + debug_assert!(matches!( + payment_forwarded, + &events::Event::PaymentForwarded { .. } + )); + } + ( + Some(MonitorUpdateCompletionAction::EmitEventOptionAndFreeOtherChannel { + event, + downstream_counterparty_and_funding_outpoint: chan_to_release, + }), + None, + ) + } + }, + ); + } + fn claim_funds_from_hop< ComplFunc: FnOnce( Option, @@ -9409,136 +9625,88 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }, HTLCSource::PreviousHopData(hop_data) => { - let prev_channel_id = hop_data.channel_id; - let prev_user_channel_id = hop_data.user_channel_id; - let prev_node_id = hop_data.counterparty_node_id; - let completed_blocker = - RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); - - // Obtain hold time, if available. - let hold_time = hold_time_since(send_timestamp).unwrap_or(0); - - // If attribution data was received from downstream, we shift it and get it ready for adding our hold - // time. Note that fulfilled HTLCs take a fast path to the incoming side. We don't need to wait for RAA - // to record the hold time like we do for failed HTLCs. - let attribution_data = process_fulfill_attribution_data( - attribution_data, - &hop_data.incoming_packet_shared_secret, - hold_time, - ); - - #[cfg(test)] - let claiming_chan_funding_outpoint = hop_data.outpoint; - self.claim_funds_from_hop( - hop_data, + let prev_htlcs = vec![events::HTLCLocator::from(&hop_data)]; + self.claim_funds_from_htlc_forward_hop( payment_preimage, - None, - Some(attribution_data), - |htlc_claim_value_msat, definitely_duplicate| { - let chan_to_release = Some(EventUnblockedChannel { - counterparty_node_id: next_channel_counterparty_node_id, - funding_txo: next_channel_outpoint, - channel_id: next_channel_id, - blocking_action: completed_blocker, - }); - - if definitely_duplicate && startup_replay { - // On startup we may get redundant claims which are related to - // monitor updates still in flight. In that case, we shouldn't - // immediately free, but instead let that monitor update complete - // in the background. - #[cfg(test)] - { - let per_peer_state = self.per_peer_state.deadlocking_read(); - // The channel we'd unblock should already be closed, or... - let channel_closed = per_peer_state - .get(&next_channel_counterparty_node_id) - .map(|lck| lck.deadlocking_lock()) - .map(|peer| !peer.channel_by_id.contains_key(&next_channel_id)) - .unwrap_or(true); - let background_events = - self.pending_background_events.lock().unwrap(); - // there should be a `BackgroundEvent` pending... - let matching_bg_event = - background_events.iter().any(|ev| { - match ev { - // to apply a monitor update that blocked the claiming channel, - BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - funding_txo, update, .. - } => { - if *funding_txo == claiming_chan_funding_outpoint { - assert!(update.updates.iter().any(|upd| - if let ChannelMonitorUpdateStep::PaymentPreimage { - payment_preimage: update_preimage, .. - } = upd { - payment_preimage == *update_preimage - } else { false } - ), "{:?}", update); - true - } else { false } - }, - // or the monitor update has completed and will unblock - // immediately once we get going. - BackgroundEvent::MonitorUpdatesComplete { - channel_id, .. - } => - *channel_id == prev_channel_id, - } - }); - assert!( - channel_closed || matching_bg_event, - "{:?}", - *background_events - ); - } - (None, None) - } else if definitely_duplicate { - if let Some(other_chan) = chan_to_release { - (Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately { - downstream_counterparty_node_id: other_chan.counterparty_node_id, - downstream_channel_id: other_chan.channel_id, - blocking_action: other_chan.blocking_action, - }), None) - } else { - (None, None) - } - } else { - let total_fee_earned_msat = - if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { - if let Some(claimed_htlc_value) = htlc_claim_value_msat { - Some(claimed_htlc_value - forwarded_htlc_value) - } else { - None - } + |htlc_claim_value_msat: Option| -> Option { + let total_fee_earned_msat = + if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { + if let Some(claimed_htlc_value) = htlc_claim_value_msat { + Some(claimed_htlc_value - forwarded_htlc_value) } else { None - }; - debug_assert!( - skimmed_fee_msat <= total_fee_earned_msat, - "skimmed_fee_msat must always be included in total_fee_earned_msat" - ); - ( - Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { - event: events::Event::PaymentForwarded { - prev_channel_id: Some(prev_channel_id), - next_channel_id: Some(next_channel_id), - prev_user_channel_id, - next_user_channel_id, - prev_node_id, - next_node_id: Some(next_channel_counterparty_node_id), - total_fee_earned_msat, - skimmed_fee_msat, - claim_from_onchain_tx: from_onchain, - outbound_amount_forwarded_msat: forwarded_htlc_value_msat, - }, - downstream_counterparty_and_funding_outpoint: chan_to_release, - }), - None, - ) - } + } + } else { + None + }; + debug_assert!( + skimmed_fee_msat <= total_fee_earned_msat, + "skimmed_fee_msat must always be included in total_fee_earned_msat" + ); + + Some(events::Event::PaymentForwarded { + prev_htlcs: prev_htlcs.clone(), + next_htlcs: vec![events::HTLCLocator { + channel_id: next_channel_id, + user_channel_id: next_user_channel_id, + node_id: Some(next_channel_counterparty_node_id), + }], + total_fee_earned_msat, + skimmed_fee_msat, + claim_from_onchain_tx: from_onchain, + outbound_amount_forwarded_msat: forwarded_htlc_value_msat, + }) }, + startup_replay, + next_channel_counterparty_node_id, + next_channel_outpoint, + next_channel_id, + hop_data, + attribution_data, + send_timestamp, ); }, + HTLCSource::TrampolineForward { previous_hop_data, .. } => { + // Only emit a single event for trampoline claims. + let prev_htlcs: Vec = + previous_hop_data.iter().map(Into::into).collect(); + for (i, current_previous_hop_data) in previous_hop_data.into_iter().enumerate() { + self.claim_funds_from_htlc_forward_hop( + payment_preimage, + |_: Option| -> Option { + if i == 0 { + Some(events::Event::PaymentForwarded { + prev_htlcs: prev_htlcs.clone(), + // TODO: When trampoline payments are tracked in our + // pending_outbound_payments, we'll be able to provide all the + // outgoing htlcs for this forward. + next_htlcs: vec![events::HTLCLocator { + channel_id: next_channel_id, + user_channel_id: next_user_channel_id, + node_id: Some(next_channel_counterparty_node_id), + }], + // TODO: When trampoline payments are tracked in our + // pending_outbound_payments, we'll be able to lookup our total + // fee earnings. + total_fee_earned_msat: None, + skimmed_fee_msat, + claim_from_onchain_tx: from_onchain, + outbound_amount_forwarded_msat: forwarded_htlc_value_msat, + }) + } else { + None + } + }, + startup_replay, + next_channel_counterparty_node_id, + next_channel_outpoint, + next_channel_id, + current_previous_hop_data, + attribution_data.clone(), + send_timestamp, + ); + } + }, } } @@ -9746,11 +9914,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } }, - MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { + MonitorUpdateCompletionAction::EmitEventOptionAndFreeOtherChannel { event, downstream_counterparty_and_funding_outpoint, } => { - self.pending_events.lock().unwrap().push_back((event, None)); + if let Some(event) = event { + self.pending_events.lock().unwrap().push_back((event, None)); + } if let Some(unblocked) = downstream_counterparty_and_funding_outpoint { self.handle_monitor_update_release( unblocked.counterparty_node_id, @@ -12964,6 +13134,43 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } +/// Constructs an HTLC forward failure for sending back to the previous hop, converting to a blinded +/// failure where appropriate. +/// +/// When both trampoline and phantom secrets are present, the trampoline secret takes priority +/// for error encryption. +fn get_htlc_forward_failure( + blinded_failure: &Option, onion_error: &HTLCFailReason, + incoming_packet_shared_secret: &[u8; 32], trampoline_shared_secret: &Option<[u8; 32]>, + phantom_shared_secret: &Option<[u8; 32]>, htlc_id: u64, +) -> HTLCForwardInfo { + // TODO: Correctly wrap the error packet twice if failing back a trampoline + phantom HTLC. + let secondary_shared_secret = trampoline_shared_secret.or(*phantom_shared_secret); + match blinded_failure { + Some(BlindedFailure::FromIntroductionNode) => { + let blinded_onion_error = + HTLCFailReason::reason(LocalHTLCFailureReason::InvalidOnionBlinding, vec![0; 32]); + let err_packet = blinded_onion_error.get_encrypted_failure_packet( + incoming_packet_shared_secret, + &secondary_shared_secret, + ); + HTLCForwardInfo::FailHTLC { htlc_id, err_packet } + }, + Some(BlindedFailure::FromBlindedNode) => HTLCForwardInfo::FailMalformedHTLC { + htlc_id, + failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), + sha256_of_onion: [0; 32], + }, + None => { + let err_packet = onion_error.get_encrypted_failure_packet( + incoming_packet_shared_secret, + &secondary_shared_secret, + ); + HTLCForwardInfo::FailHTLC { htlc_id, err_packet } + }, + } +} + /// Parameters used with [`create_bolt11_invoice`]. /// /// [`create_bolt11_invoice`]: ChannelManager::create_bolt11_invoice @@ -16468,6 +16675,24 @@ impl Readable for HTLCSource { }) } 1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)), + 2 => { + let mut previous_hop_data = Vec::new(); + let mut incoming_trampoline_shared_secret: crate::util::ser::RequiredWrapper<[u8; 32]> = crate::util::ser::RequiredWrapper(None); + let mut session_priv: crate::util::ser::RequiredWrapper = crate::util::ser::RequiredWrapper(None); + let mut hops = Vec::new(); + read_tlv_fields!(reader, { + (0, previous_hop_data, required_vec), + (2, incoming_trampoline_shared_secret, required), + (4, session_priv, required), + (6, hops, required_vec), + }); + Ok(HTLCSource::TrampolineForward { + previous_hop_data, + incoming_trampoline_shared_secret: incoming_trampoline_shared_secret.0.unwrap(), + hops, + session_priv: session_priv.0.unwrap(), + }) + }, _ => Err(DecodeError::UnknownRequiredFeature), } } @@ -16500,6 +16725,20 @@ impl Writeable for HTLCSource { 1u8.write(writer)?; field.write(writer)?; }, + HTLCSource::TrampolineForward { + ref previous_hop_data, + incoming_trampoline_shared_secret, + ref session_priv, + ref hops, + } => { + 2u8.write(writer)?; + write_tlv_fields!(writer, { + (0, *previous_hop_data, required_vec), + (2, incoming_trampoline_shared_secret, required), + (4, session_priv, required), + (6, *hops, required_vec), + }); + }, } Ok(()) } @@ -18011,51 +18250,32 @@ where let htlc_id = SentHTLCId::from_source(&htlc_source); match htlc_source { HTLCSource::PreviousHopData(prev_hop_data) => { - let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { - info.prev_funding_outpoint == prev_hop_data.outpoint - && info.prev_htlc_id == prev_hop_data.htlc_id - }; - // The ChannelMonitor is now responsible for this HTLC's - // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs`, - // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not - // persisted after the monitor was when forwarding the payment. - dedup_decode_update_add_htlcs( + reconcile_pending_htlcs_with_monitor( + &mut forward_htlcs_legacy, + &mut pending_events_read, + &mut pending_intercepted_htlcs_legacy, &mut decode_update_add_htlcs, - &prev_hop_data, - "HTLC was forwarded to the closed channel", - &args.logger, - ); - dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs_legacy, - &prev_hop_data, - "HTLC was forwarded to the closed channel", - &args.logger, + prev_hop_data, + &logger, + htlc.payment_hash, + monitor.channel_id(), ); - forward_htlcs_legacy.retain(|_, forwards| { - forwards.retain(|forward| { - if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - false - } else { true } - } else { true } - }); - !forwards.is_empty() - }); - pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - pending_events_read.retain(|(event, _)| { - if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { - intercepted_id != ev_id - } else { true } - }); - false - } else { true } - }); + }, + HTLCSource::TrampolineForward { previous_hop_data, .. } => { + for prev_hop_data in previous_hop_data { + reconcile_pending_htlcs_with_monitor( + &mut forward_htlcs_legacy, + &mut pending_events_read, + &mut pending_intercepted_htlcs_legacy, + &mut decode_update_add_htlcs, + &mut decode_update_add_htlcs_legacy, + prev_hop_data, + &logger, + htlc.payment_hash, + monitor.channel_id(), + ); + } }, HTLCSource::OutboundRoute { payment_id, @@ -18445,7 +18665,7 @@ where let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id), None); for action in actions.iter() { - if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { + if let MonitorUpdateCompletionAction::EmitEventOptionAndFreeOtherChannel { downstream_counterparty_and_funding_outpoint: Some(EventUnblockedChannel { counterparty_node_id: blocked_node_id, @@ -18971,6 +19191,64 @@ where } } +/// Removes pending HTLC entries that the ChannelMonitor has already taken responsibility for, +/// cleaning up state mismatches that can occur during restart. +fn reconcile_pending_htlcs_with_monitor( + forward_htlcs_legacy: &mut HashMap>, + pending_events_read: &mut VecDeque<(Event, Option)>, + pending_intercepted_htlcs_legacy: &mut HashMap, + decode_update_add_htlcs: &mut HashMap>, + decode_update_add_htlcs_legacy: &mut HashMap>, + prev_hop_data: HTLCPreviousHopData, logger: &impl Logger, payment_hash: PaymentHash, + channel_id: ChannelId, +) { + let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { + info.prev_funding_outpoint == prev_hop_data.outpoint + && info.prev_htlc_id == prev_hop_data.htlc_id + }; + // The ChannelMonitor is now responsible for this HTLC's + // failure/success and will let us know what its outcome is. If we + // still have an entry for this HTLC in `forward_htlcs`, + // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not + // persisted after the monitor was when forwarding the payment. + dedup_decode_update_add_htlcs( + decode_update_add_htlcs, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &logger, + ); + dedup_decode_update_add_htlcs( + decode_update_add_htlcs_legacy, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &logger, + ); + forward_htlcs_legacy.retain(|_, forwards| { + forwards.retain(|forward| { + if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", + &payment_hash, &channel_id); + false + } else { true } + } else { true } + }); + !forwards.is_empty() + }); + pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", + &payment_hash, &channel_id); + pending_events_read.retain(|(event, _)| { + if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { + intercepted_id != ev_id + } else { true } + }); + false + } else { true } + }); +} + #[cfg(test)] mod tests { use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e072deb6a97..33fe0c72cef 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -3059,17 +3059,16 @@ pub fn expect_payment_forwarded>( ) -> Option { match event { Event::PaymentForwarded { - prev_channel_id, - next_channel_id, - prev_user_channel_id, - next_user_channel_id, - prev_node_id, - next_node_id, + prev_htlcs, + next_htlcs, total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx, .. } => { + assert_eq!(prev_htlcs.len(), 1); + assert_eq!(next_htlcs.len(), 1); + if allow_1_msat_fee_overpay { // Aggregating fees for blinded paths may result in a rounding error, causing slight // overpayment in fees. @@ -3084,33 +3083,36 @@ pub fn expect_payment_forwarded>( // overpaid amount. assert!(skimmed_fee_msat == expected_extra_fees_msat); if !upstream_force_closed { - assert_eq!(prev_node.node().get_our_node_id(), prev_node_id.unwrap()); + let prev_node_id = prev_htlcs[0].node_id.unwrap(); + let prev_channel_id = prev_htlcs[0].channel_id; + let prev_user_channel_id = prev_htlcs[0].user_channel_id.unwrap(); + + assert_eq!(prev_node.node().get_our_node_id(), prev_node_id); // Is the event prev_channel_id in one of the channels between the two nodes? let node_chans = node.node().list_channels(); - assert!(node_chans.iter().any(|x| x.counterparty.node_id == prev_node_id.unwrap() - && x.channel_id == prev_channel_id.unwrap() - && x.user_channel_id == prev_user_channel_id.unwrap())); + assert!(node_chans.iter().any(|x| x.counterparty.node_id == prev_node_id + && x.channel_id == prev_channel_id + && x.user_channel_id == prev_user_channel_id)); } // We check for force closures since a force closed channel is removed from the // node's channel list if !downstream_force_closed { + let next_node_id = next_htlcs[0].node_id.unwrap(); + let next_channel_id = next_htlcs[0].channel_id; + let next_user_channel_id = next_htlcs[0].user_channel_id.unwrap(); // As documented, `next_user_channel_id` will only be `Some` if we didn't settle via an // onchain transaction, just as the `total_fee_earned_msat` field. Rather than // introducing yet another variable, we use the latter's state as a flag to detect // this and only check if it's `Some`. - assert_eq!(next_node.node().get_our_node_id(), next_node_id.unwrap()); + assert_eq!(next_node.node().get_our_node_id(), next_node_id); let node_chans = node.node().list_channels(); if total_fee_earned_msat.is_none() { - assert!(node_chans - .iter() - .any(|x| x.counterparty.node_id == next_node_id.unwrap() - && x.channel_id == next_channel_id.unwrap())); + assert!(node_chans.iter().any(|x| x.counterparty.node_id == next_node_id + && x.channel_id == next_channel_id)); } else { - assert!(node_chans - .iter() - .any(|x| x.counterparty.node_id == next_node_id.unwrap() - && x.channel_id == next_channel_id.unwrap() - && x.user_channel_id == next_user_channel_id.unwrap())); + assert!(node_chans.iter().any(|x| x.counterparty.node_id == next_node_id + && x.channel_id == next_channel_id + && x.user_channel_id == next_user_channel_id)); } } assert_eq!(claim_from_onchain_tx, downstream_force_closed); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index e2963dbeb09..933cb2d2cdc 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1459,37 +1459,37 @@ pub fn test_htlc_on_chain_success() { connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires let forwarded_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(forwarded_events.len(), 3); - let chan_id = Some(chan_1.2); + let chan_id = chan_1.2; match forwarded_events[0] { Event::PaymentForwarded { + ref prev_htlcs, + ref next_htlcs, total_fee_earned_msat, - prev_channel_id, claim_from_onchain_tx, - next_channel_id, outbound_amount_forwarded_msat, .. } => { assert_eq!(total_fee_earned_msat, Some(1000)); - assert_eq!(prev_channel_id, chan_id); + assert_eq!(prev_htlcs[0].channel_id, chan_id); assert_eq!(claim_from_onchain_tx, true); - assert_eq!(next_channel_id, Some(chan_2.2)); + assert_eq!(next_htlcs[0].channel_id, chan_2.2); assert_eq!(outbound_amount_forwarded_msat, Some(3000000)); }, _ => panic!(), } match forwarded_events[1] { Event::PaymentForwarded { + ref prev_htlcs, + ref next_htlcs, total_fee_earned_msat, - prev_channel_id, claim_from_onchain_tx, - next_channel_id, outbound_amount_forwarded_msat, .. } => { assert_eq!(total_fee_earned_msat, Some(1000)); - assert_eq!(prev_channel_id, chan_id); + assert_eq!(prev_htlcs[0].channel_id, chan_id); assert_eq!(claim_from_onchain_tx, true); - assert_eq!(next_channel_id, Some(chan_2.2)); + assert_eq!(next_htlcs[0].channel_id, chan_2.2); assert_eq!(outbound_amount_forwarded_msat, Some(3000000)); }, _ => panic!(), @@ -3965,17 +3965,17 @@ pub fn test_onchain_to_onchain_claim() { assert_eq!(events.len(), 2); match events[0] { Event::PaymentForwarded { + ref prev_htlcs, + ref next_htlcs, total_fee_earned_msat, - prev_channel_id, claim_from_onchain_tx, - next_channel_id, outbound_amount_forwarded_msat, .. } => { assert_eq!(total_fee_earned_msat, Some(1000)); - assert_eq!(prev_channel_id, Some(chan_1.2)); + assert_eq!(prev_htlcs[0].channel_id, chan_1.2); assert_eq!(claim_from_onchain_tx, true); - assert_eq!(next_channel_id, Some(chan_2.2)); + assert_eq!(next_htlcs[0].channel_id, chan_2.2); assert_eq!(outbound_amount_forwarded_msat, Some(3000000)); }, _ => panic!("Unexpected event"), diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 04915affa20..a754c4fb81f 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -3792,8 +3792,8 @@ fn do_test_lost_timeout_monitor_events(confirm_tx: CommitmentType, dust_htlcs: b Event::PaymentFailed { payment_hash, .. } => { assert_eq!(payment_hash, Some(hash_b)); }, - Event::HTLCHandlingFailed { prev_channel_id, .. } => { - assert_eq!(prev_channel_id, chan_a); + Event::HTLCHandlingFailed { prev_channel_ids, .. } => { + assert_eq!(prev_channel_ids[0], chan_a); }, _ => panic!("Wrong event {ev:?}"), } diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index f821aa5afc0..731c869b581 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -1085,6 +1085,7 @@ impl Readable for Vec { impl_for_vec!(ecdsa::Signature); impl_for_vec!(crate::chain::channelmonitor::ChannelMonitorUpdate); +impl_for_vec!(crate::ln::channelmanager::HTLCPreviousHopData); impl_for_vec!(crate::ln::channelmanager::MonitorUpdateCompletionAction); impl_for_vec!(crate::ln::channelmanager::PaymentClaimDetails); impl_for_vec!(crate::ln::msgs::SocketAddress); @@ -1096,10 +1097,13 @@ impl_for_vec!(crate::ln::our_peer_storage::PeerStorageMonitorHolder); impl_for_vec!(crate::blinded_path::message::BlindedMessagePath); impl_writeable_for_vec!(&crate::routing::router::BlindedTail); impl_readable_for_vec!(crate::routing::router::BlindedTail); +impl_for_vec!(crate::routing::router::RouteHop); impl_for_vec!(crate::routing::router::TrampolineHop); impl_for_vec_with_element_length_prefix!(crate::ln::msgs::UpdateAddHTLC); impl_writeable_for_vec_with_element_length_prefix!(&crate::ln::msgs::UpdateAddHTLC); impl_for_vec!(u32); +impl_for_vec!(crate::events::HTLCLocator); +impl_for_vec!(crate::ln::types::ChannelId); impl Writeable for Vec { #[inline]