From f99d519508bf971e88ee41ee8e98dc4578509694 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 16 Dec 2025 13:37:23 +0200 Subject: [PATCH 01/14] ln/events: multiple htlcs in/out for trampoline PaymentForwarded --- .../tests/lsps2_integration_tests.rs | 8 +- lightning/src/events/mod.rs | 144 +++++++++++------- lightning/src/ln/channelmanager.rs | 16 +- lightning/src/ln/functional_test_utils.rs | 42 ++--- lightning/src/ln/functional_tests.rs | 26 ++-- lightning/src/util/ser.rs | 1 + 6 files changed, 136 insertions(+), 101 deletions(-) diff --git a/lightning-liquidity/tests/lsps2_integration_tests.rs b/lightning-liquidity/tests/lsps2_integration_tests.rs index e4ace27b715..92052891dd5 100644 --- a/lightning-liquidity/tests/lsps2_integration_tests.rs +++ b/lightning-liquidity/tests/lsps2_integration_tests.rs @@ -1338,14 +1338,14 @@ fn client_trusts_lsp_end_to_end_test() { let total_fee_msat = match service_events[0].clone() { Event::PaymentForwarded { - prev_node_id, - next_node_id, + ref prev_htlcs, + ref next_htlcs, skimmed_fee_msat, total_fee_earned_msat, .. } => { - assert_eq!(prev_node_id, Some(payer_node_id)); - assert_eq!(next_node_id, Some(client_node_id)); + assert_eq!(prev_htlcs[0].node_id, Some(payer_node_id)); + assert_eq!(next_htlcs[0].node_id, Some(client_node_id)); service_handler.payment_forwarded(channel_id, skimmed_fee_msat.unwrap_or(0)).unwrap(); Some(total_fee_earned_msat.unwrap() - skimmed_fee_msat.unwrap()) }, diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index d97ae6097b6..c5deed57a7f 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -729,6 +729,25 @@ pub enum InboundChannelFunds { DualFunded, } +/// Identifies the channel and peer committed to a HTLC, used for both incoming and outgoing HTLCs. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct HTLCLocator { + /// The channel that the HTLC was sent or received on. + pub channel_id: ChannelId, + + /// The `user_channel_id` for `channel_id`. + pub user_channel_id: Option, + + /// The public key identify of the node that the HTLC was sent to or received from. + pub node_id: Option, +} + +impl_writeable_tlv_based!(HTLCLocator, { + (1, channel_id, required), + (3, user_channel_id, option), + (5, node_id, option), +}); + /// An Event which you should probably take some action in response to. /// /// Note that while Writeable and Readable are implemented for Event, you probably shouldn't use @@ -1316,34 +1335,14 @@ pub enum Event { /// This event will eventually be replayed after failures-to-handle (i.e., the event handler /// returning `Err(ReplayEvent ())`) and will be persisted across restarts. PaymentForwarded { - /// The channel id of the incoming channel between the previous node and us. - /// - /// This is only `None` for events generated or serialized by versions prior to 0.0.107. - prev_channel_id: Option, - /// The channel id of the outgoing channel between the next node and us. - /// - /// This is only `None` for events generated or serialized by versions prior to 0.0.107. - next_channel_id: Option, - /// The `user_channel_id` of the incoming channel between the previous node and us. - /// - /// This is only `None` for events generated or serialized by versions prior to 0.0.122. - prev_user_channel_id: Option, - /// The `user_channel_id` of the outgoing channel between the next node and us. - /// - /// This will be `None` if the payment was settled via an on-chain transaction. See the - /// caveat described for the `total_fee_earned_msat` field. Moreover it will be `None` for - /// events generated or serialized by versions prior to 0.0.122. - next_user_channel_id: Option, - /// The node id of the previous node. - /// - /// This is only `None` for HTLCs received prior to 0.1 or for events serialized by - /// versions prior to 0.1 - prev_node_id: Option, - /// The node id of the next node. - /// - /// This is only `None` for HTLCs received prior to 0.1 or for events serialized by - /// versions prior to 0.1 - next_node_id: Option, + /// The set of HTLCs forwarded to our node that will be claimed by this forward. Contains a + /// single HTLC for source-routed payments, and may contain multiple HTLCs when we acted as + /// a trampoline router, responsible for pathfinding within the route. + prev_htlcs: Vec, + /// The set of HTLCs forwarded by our node that have been claimed by this forward. Contains + /// a single HTLC for regular source-routed payments, and may contain multiple HTLCs when + /// we acted as a trampoline router, responsible for pathfinding within the route. + next_htlcs: Vec, /// The total fee, in milli-satoshis, which was earned as a result of the payment. /// /// Note that if we force-closed the channel over which we forwarded an HTLC while the HTLC @@ -2027,12 +2026,8 @@ impl Writeable for Event { }); }, &Event::PaymentForwarded { - prev_channel_id, - next_channel_id, - prev_user_channel_id, - next_user_channel_id, - prev_node_id, - next_node_id, + ref prev_htlcs, + ref next_htlcs, total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx, @@ -2041,15 +2036,20 @@ impl Writeable for Event { 7u8.write(writer)?; write_tlv_fields!(writer, { (0, total_fee_earned_msat, option), - (1, prev_channel_id, option), + // Type 1 was prev_channel_id in 0.2 and earlier. (2, claim_from_onchain_tx, required), - (3, next_channel_id, option), + // Type 3 was next_channel_id in 0.2 and earlier. (5, outbound_amount_forwarded_msat, option), (7, skimmed_fee_msat, option), - (9, prev_user_channel_id, option), - (11, next_user_channel_id, option), - (13, prev_node_id, option), - (15, next_node_id, option), + // Type 9 was prev_user_channel_id in 0.2 and earlier. + // Type 11 was next_user_channel_id in 0.2 and earlier. + // Type 13 was prev_node_id in 0.2 and earlier. + // Type 15 was next_node_id in 0.2 and earlier. + // HTLCs are written as required, rather than required_vec, so that they can be + // deserialized using default_value to fill in legacy fields which expects + // LengthReadable (requried_vec is WithoutLength). + (17, *prev_htlcs, required), + (19, *next_htlcs, required), }); }, &Event::ChannelClosed { @@ -2544,35 +2544,63 @@ impl MaybeReadable for Event { }, 7u8 => { let mut f = || { - let mut prev_channel_id = None; - let mut next_channel_id = None; - let mut prev_user_channel_id = None; - let mut next_user_channel_id = None; - let mut prev_node_id = None; - let mut next_node_id = None; + // Legacy values that have been replaced by prev_htlcs and next_htlcs. + let mut prev_channel_id_legacy = None; + let mut next_channel_id_legacy = None; + let mut prev_user_channel_id_legacy = None; + let mut next_user_channel_id_legacy = None; + let mut prev_node_id_legacy = None; + let mut next_node_id_legacy = None; + let mut total_fee_earned_msat = None; let mut skimmed_fee_msat = None; let mut claim_from_onchain_tx = false; let mut outbound_amount_forwarded_msat = None; + let mut prev_htlcs = vec![]; + let mut next_htlcs = vec![]; read_tlv_fields!(reader, { (0, total_fee_earned_msat, option), - (1, prev_channel_id, option), + (1, prev_channel_id_legacy, option), (2, claim_from_onchain_tx, required), - (3, next_channel_id, option), + (3, next_channel_id_legacy, option), (5, outbound_amount_forwarded_msat, option), (7, skimmed_fee_msat, option), - (9, prev_user_channel_id, option), - (11, next_user_channel_id, option), - (13, prev_node_id, option), - (15, next_node_id, option), + (9, prev_user_channel_id_legacy, option), + (11, next_user_channel_id_legacy, option), + (13, prev_node_id_legacy, option), + (15, next_node_id_legacy, option), + // We can't unwrap prev_channel_id_legacy or next_channel_id_legacy here + // because default_value eagerly evaluates the default, so events that do + // not have legacy fields would fail. We settle for setting a zero ChannelID + // and replacing it below. + (17, prev_htlcs, (default_value, vec![HTLCLocator{ + channel_id: ChannelId::new_zero(), + user_channel_id: prev_user_channel_id_legacy, + node_id: prev_node_id_legacy, + }])), + (19, next_htlcs, (default_value, vec![HTLCLocator{ + channel_id: ChannelId::new_zero(), + user_channel_id: next_user_channel_id_legacy, + node_id: next_node_id_legacy, + }])), }); + + // If dealing with legacy serialization, we can be confident that we'll + // replace the zero value placeholders above because these fields were only + // None for events serialized before 0.0.107. We do not allow nodes with pending + // forwards to be upgraded directly to 0.1 from versions 0.0.123 or earlier, + // so we should always have Some here when reading legacy serialization. + if let Some(prev_channel_id) = prev_channel_id_legacy { + prev_htlcs[0].channel_id = prev_channel_id; + } + + if let Some(next_channel_id) = next_channel_id_legacy { + next_htlcs[0].channel_id = next_channel_id; + } + Ok(Some(Event::PaymentForwarded { - prev_channel_id, - next_channel_id, - prev_user_channel_id, - next_user_channel_id, - prev_node_id, - next_node_id, + prev_htlcs, + next_htlcs, total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f3399ff8787..ada21b8bdaf 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9520,12 +9520,16 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ( Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { event: events::Event::PaymentForwarded { - prev_channel_id: Some(prev_channel_id), - next_channel_id: Some(next_channel_id), - prev_user_channel_id, - next_user_channel_id, - prev_node_id, - next_node_id: Some(next_channel_counterparty_node_id), + prev_htlcs: vec![events::HTLCLocator { + channel_id: prev_channel_id, + user_channel_id: prev_user_channel_id, + node_id: prev_node_id, + }], + next_htlcs: vec![events::HTLCLocator { + channel_id: next_channel_id, + user_channel_id: next_user_channel_id, + node_id: Some(next_channel_counterparty_node_id), + }], total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx: from_onchain, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e072deb6a97..33fe0c72cef 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -3059,17 +3059,16 @@ pub fn expect_payment_forwarded>( ) -> Option { match event { Event::PaymentForwarded { - prev_channel_id, - next_channel_id, - prev_user_channel_id, - next_user_channel_id, - prev_node_id, - next_node_id, + prev_htlcs, + next_htlcs, total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx, .. } => { + assert_eq!(prev_htlcs.len(), 1); + assert_eq!(next_htlcs.len(), 1); + if allow_1_msat_fee_overpay { // Aggregating fees for blinded paths may result in a rounding error, causing slight // overpayment in fees. @@ -3084,33 +3083,36 @@ pub fn expect_payment_forwarded>( // overpaid amount. assert!(skimmed_fee_msat == expected_extra_fees_msat); if !upstream_force_closed { - assert_eq!(prev_node.node().get_our_node_id(), prev_node_id.unwrap()); + let prev_node_id = prev_htlcs[0].node_id.unwrap(); + let prev_channel_id = prev_htlcs[0].channel_id; + let prev_user_channel_id = prev_htlcs[0].user_channel_id.unwrap(); + + assert_eq!(prev_node.node().get_our_node_id(), prev_node_id); // Is the event prev_channel_id in one of the channels between the two nodes? let node_chans = node.node().list_channels(); - assert!(node_chans.iter().any(|x| x.counterparty.node_id == prev_node_id.unwrap() - && x.channel_id == prev_channel_id.unwrap() - && x.user_channel_id == prev_user_channel_id.unwrap())); + assert!(node_chans.iter().any(|x| x.counterparty.node_id == prev_node_id + && x.channel_id == prev_channel_id + && x.user_channel_id == prev_user_channel_id)); } // We check for force closures since a force closed channel is removed from the // node's channel list if !downstream_force_closed { + let next_node_id = next_htlcs[0].node_id.unwrap(); + let next_channel_id = next_htlcs[0].channel_id; + let next_user_channel_id = next_htlcs[0].user_channel_id.unwrap(); // As documented, `next_user_channel_id` will only be `Some` if we didn't settle via an // onchain transaction, just as the `total_fee_earned_msat` field. Rather than // introducing yet another variable, we use the latter's state as a flag to detect // this and only check if it's `Some`. - assert_eq!(next_node.node().get_our_node_id(), next_node_id.unwrap()); + assert_eq!(next_node.node().get_our_node_id(), next_node_id); let node_chans = node.node().list_channels(); if total_fee_earned_msat.is_none() { - assert!(node_chans - .iter() - .any(|x| x.counterparty.node_id == next_node_id.unwrap() - && x.channel_id == next_channel_id.unwrap())); + assert!(node_chans.iter().any(|x| x.counterparty.node_id == next_node_id + && x.channel_id == next_channel_id)); } else { - assert!(node_chans - .iter() - .any(|x| x.counterparty.node_id == next_node_id.unwrap() - && x.channel_id == next_channel_id.unwrap() - && x.user_channel_id == next_user_channel_id.unwrap())); + assert!(node_chans.iter().any(|x| x.counterparty.node_id == next_node_id + && x.channel_id == next_channel_id + && x.user_channel_id == next_user_channel_id)); } } assert_eq!(claim_from_onchain_tx, downstream_force_closed); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index e2963dbeb09..933cb2d2cdc 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1459,37 +1459,37 @@ pub fn test_htlc_on_chain_success() { connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires let forwarded_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(forwarded_events.len(), 3); - let chan_id = Some(chan_1.2); + let chan_id = chan_1.2; match forwarded_events[0] { Event::PaymentForwarded { + ref prev_htlcs, + ref next_htlcs, total_fee_earned_msat, - prev_channel_id, claim_from_onchain_tx, - next_channel_id, outbound_amount_forwarded_msat, .. } => { assert_eq!(total_fee_earned_msat, Some(1000)); - assert_eq!(prev_channel_id, chan_id); + assert_eq!(prev_htlcs[0].channel_id, chan_id); assert_eq!(claim_from_onchain_tx, true); - assert_eq!(next_channel_id, Some(chan_2.2)); + assert_eq!(next_htlcs[0].channel_id, chan_2.2); assert_eq!(outbound_amount_forwarded_msat, Some(3000000)); }, _ => panic!(), } match forwarded_events[1] { Event::PaymentForwarded { + ref prev_htlcs, + ref next_htlcs, total_fee_earned_msat, - prev_channel_id, claim_from_onchain_tx, - next_channel_id, outbound_amount_forwarded_msat, .. } => { assert_eq!(total_fee_earned_msat, Some(1000)); - assert_eq!(prev_channel_id, chan_id); + assert_eq!(prev_htlcs[0].channel_id, chan_id); assert_eq!(claim_from_onchain_tx, true); - assert_eq!(next_channel_id, Some(chan_2.2)); + assert_eq!(next_htlcs[0].channel_id, chan_2.2); assert_eq!(outbound_amount_forwarded_msat, Some(3000000)); }, _ => panic!(), @@ -3965,17 +3965,17 @@ pub fn test_onchain_to_onchain_claim() { assert_eq!(events.len(), 2); match events[0] { Event::PaymentForwarded { + ref prev_htlcs, + ref next_htlcs, total_fee_earned_msat, - prev_channel_id, claim_from_onchain_tx, - next_channel_id, outbound_amount_forwarded_msat, .. } => { assert_eq!(total_fee_earned_msat, Some(1000)); - assert_eq!(prev_channel_id, Some(chan_1.2)); + assert_eq!(prev_htlcs[0].channel_id, chan_1.2); assert_eq!(claim_from_onchain_tx, true); - assert_eq!(next_channel_id, Some(chan_2.2)); + assert_eq!(next_htlcs[0].channel_id, chan_2.2); assert_eq!(outbound_amount_forwarded_msat, Some(3000000)); }, _ => panic!("Unexpected event"), diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index f821aa5afc0..249505f2e21 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -1100,6 +1100,7 @@ impl_for_vec!(crate::routing::router::TrampolineHop); impl_for_vec_with_element_length_prefix!(crate::ln::msgs::UpdateAddHTLC); impl_writeable_for_vec_with_element_length_prefix!(&crate::ln::msgs::UpdateAddHTLC); impl_for_vec!(u32); +impl_for_vec!(crate::events::HTLCLocator); impl Writeable for Vec { #[inline] From 0cd524d7e2073f7c605c1b4193d6c7c339a5dfab Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 16 Dec 2025 15:14:55 +0200 Subject: [PATCH 02/14] ln: make event optional in EmitEventAndFreeOtherChannel In the commits that follow, we want to be able to free the other channel without emitting an event so that we can emit a single event for trampoline payments with multiple incoming HTLCs. We still want to go through the full claim flow for each incoming HTLC (and persist the EmitEventAndFreeOtherChannel event to be picked up on restart), but do not want multiple events for the same trampoline forward. Changing from upgradable_required to upgradable_option is forwards compatible - old versions of the software will always have written this field, newer versions don't require it to be there but will be able to read it as-is. This change is not backwards compatible, because older versions of the software will expect the field to be present but newer versions may not write it. An alternative would be to add a new event type, but that would need to have an even TLV (because the event must be understood and processed on restart to claim the incoming HTLC), so that option isn't backwards compatible either. --- CHANGELOG.md | 5 +++++ lightning/src/ln/channelmanager.rs | 15 ++++++++++----- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a51c5fda8bd..5c29e63dc57 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# 0.3 + +## Backwards Compatibility +* Downgrade is not possible while the node has in-flight trampoline forwards. + # 0.2 - Dec 2, 2025 - "Natively Asynchronous Splicing" ## API Updates diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ada21b8bdaf..3369f8031b9 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1321,7 +1321,7 @@ pub(crate) enum MonitorUpdateCompletionAction { /// edge completes, we will surface an [`Event::PaymentForwarded`] as well as unblock the /// outbound edge. EmitEventAndFreeOtherChannel { - event: events::Event, + event: Option, downstream_counterparty_and_funding_outpoint: Option, }, /// Indicates we should immediately resume the operation of another channel, unless there is @@ -1356,7 +1356,10 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction, (5, downstream_channel_id, required), }, (2, EmitEventAndFreeOtherChannel) => { - (0, event, upgradable_required), + // LDK prior to 0.3 required this field. It will not be present for trampoline payments + // with multiple incoming HTLCS, so nodes cannot downgrade while trampoline payments + // are in the process of being resolved. + (0, event, upgradable_option), // LDK prior to 0.0.116 did not have this field as the monitor update application order was // required by clients. If we downgrade to something prior to 0.0.116 this may result in // monitor updates which aren't properly blocked or resumed, however that's fine - we don't @@ -9519,7 +9522,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ); ( Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { - event: events::Event::PaymentForwarded { + event: Some(events::Event::PaymentForwarded { prev_htlcs: vec![events::HTLCLocator { channel_id: prev_channel_id, user_channel_id: prev_user_channel_id, @@ -9534,7 +9537,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ skimmed_fee_msat, claim_from_onchain_tx: from_onchain, outbound_amount_forwarded_msat: forwarded_htlc_value_msat, - }, + }), downstream_counterparty_and_funding_outpoint: chan_to_release, }), None, @@ -9754,7 +9757,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ event, downstream_counterparty_and_funding_outpoint, } => { - self.pending_events.lock().unwrap().push_back((event, None)); + if let Some(event) = event { + self.pending_events.lock().unwrap().push_back((event, None)); + } if let Some(unblocked) = downstream_counterparty_and_funding_outpoint { self.handle_monitor_update_release( unblocked.counterparty_node_id, From 67b5d915059be0eec49ca390d85c4d238b53a459 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Wed, 7 Jan 2026 15:36:30 -0500 Subject: [PATCH 03/14] ln/refactor: rename EmitEventAndFreeOtherChannel to note optional event --- lightning/src/ln/channelmanager.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 3369f8031b9..4dcb3c29657 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1320,7 +1320,7 @@ pub(crate) enum MonitorUpdateCompletionAction { /// completes a monitor update containing the payment preimage. In that case, after the inbound /// edge completes, we will surface an [`Event::PaymentForwarded`] as well as unblock the /// outbound edge. - EmitEventAndFreeOtherChannel { + EmitEventOptionAndFreeOtherChannel { event: Option, downstream_counterparty_and_funding_outpoint: Option, }, @@ -1331,8 +1331,8 @@ pub(crate) enum MonitorUpdateCompletionAction { /// This is usually generated when we've forwarded an HTLC and want to block the outbound edge /// from completing a monitor update which removes the payment preimage until the inbound edge /// completes a monitor update containing the payment preimage. However, we use this variant - /// instead of [`Self::EmitEventAndFreeOtherChannel`] when we discover that the claim was in - /// fact duplicative and we simply want to resume the outbound edge channel immediately. + /// instead of [`Self::EmitEventOptionAndFreeOtherChannel`] when we discover that the claim was + /// in fact duplicative and we simply want to resume the outbound edge channel immediately. /// /// This variant should thus never be written to disk, as it is processed inline rather than /// stored for later processing. @@ -1355,7 +1355,7 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction, (4, blocking_action, upgradable_required), (5, downstream_channel_id, required), }, - (2, EmitEventAndFreeOtherChannel) => { + (2, EmitEventOptionAndFreeOtherChannel) => { // LDK prior to 0.3 required this field. It will not be present for trampoline payments // with multiple incoming HTLCS, so nodes cannot downgrade while trampoline payments // are in the process of being resolved. @@ -9521,7 +9521,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ "skimmed_fee_msat must always be included in total_fee_earned_msat" ); ( - Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { + Some(MonitorUpdateCompletionAction::EmitEventOptionAndFreeOtherChannel { event: Some(events::Event::PaymentForwarded { prev_htlcs: vec![events::HTLCLocator { channel_id: prev_channel_id, @@ -9753,7 +9753,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } }, - MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { + MonitorUpdateCompletionAction::EmitEventOptionAndFreeOtherChannel { event, downstream_counterparty_and_funding_outpoint, } => { @@ -18454,7 +18454,7 @@ where let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id), None); for action in actions.iter() { - if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { + if let MonitorUpdateCompletionAction::EmitEventOptionAndFreeOtherChannel { downstream_counterparty_and_funding_outpoint: Some(EventUnblockedChannel { counterparty_node_id: blocked_node_id, From 0069d039180813d3839ab4cda3856dd2d5e82136 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Wed, 7 Jan 2026 15:05:44 -0500 Subject: [PATCH 04/14] ln+events: allow multiple prev_channel_id in HTLCHandlingFailed In preparation for trampoline failures, allow multiple previous channel ids. We'll only omit a single HTLCHandlingFailed for all of our failed back HTLCs, so we want to be able to express all of them in one event. --- lightning/src/events/mod.rs | 23 ++++++++++++++++------- lightning/src/ln/channelmanager.rs | 4 ++-- lightning/src/ln/monitor_tests.rs | 4 ++-- lightning/src/util/ser.rs | 1 + 4 files changed, 21 insertions(+), 11 deletions(-) diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index c5deed57a7f..98b1e913043 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -1663,8 +1663,9 @@ pub enum Event { /// This event will eventually be replayed after failures-to-handle (i.e., the event handler /// returning `Err(ReplayEvent ())`) and will be persisted across restarts. HTLCHandlingFailed { - /// The channel over which the HTLC was received. - prev_channel_id: ChannelId, + /// The channel(s) over which the HTLC(s) was received. May contain multiple entries for + /// trampoline forwards. + prev_channel_ids: Vec, /// The type of HTLC handling that failed. failure_type: HTLCHandlingFailureType, /// The reason that the HTLC failed. @@ -2197,15 +2198,16 @@ impl Writeable for Event { }) }, &Event::HTLCHandlingFailed { - ref prev_channel_id, + ref prev_channel_ids, ref failure_type, ref failure_reason, } => { 25u8.write(writer)?; write_tlv_fields!(writer, { - (0, prev_channel_id, required), + // Type 0 was prev_channel_id in 0.2 and earlier. (1, failure_reason, option), (2, failure_type, required), + (3, *prev_channel_ids, required), }) }, &Event::BumpTransaction(ref event) => { @@ -2789,13 +2791,19 @@ impl MaybeReadable for Event { }, 25u8 => { let mut f = || { - let mut prev_channel_id = ChannelId::new_zero(); + let mut prev_channel_id_legacy = None; let mut failure_reason = None; let mut failure_type_opt = UpgradableRequired(None); + let mut prev_channel_ids = vec![]; read_tlv_fields!(reader, { - (0, prev_channel_id, required), + (0, prev_channel_id_legacy, option), (1, failure_reason, option), (2, failure_type_opt, upgradable_required), + // If our new prev_channel_ids field is not present, the legacy field + // must be because it used to be required. + (3, prev_channel_ids, (default_value, vec![ + prev_channel_id_legacy.ok_or(msgs::DecodeError::InvalidValue)?, + ])), }); // If a legacy HTLCHandlingFailureType::UnknownNextHop was written, upgrade @@ -2809,8 +2817,9 @@ impl MaybeReadable for Event { }); failure_reason = Some(LocalHTLCFailureReason::UnknownNextPeer.into()); } + Ok(Some(Event::HTLCHandlingFailed { - prev_channel_id, + prev_channel_ids, failure_type: _init_tlv_based_struct_field!( failure_type_opt, upgradable_required diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 4dcb3c29657..72df726d9c7 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -7231,7 +7231,7 @@ where .push(failure); self.pending_events.lock().unwrap().push_back(( events::Event::HTLCHandlingFailed { - prev_channel_id: incoming_channel_id, + prev_channel_ids: vec![incoming_channel_id], failure_type, failure_reason: Some(failure_reason), }, @@ -8792,7 +8792,7 @@ where let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back(( events::Event::HTLCHandlingFailed { - prev_channel_id: *channel_id, + prev_channel_ids: vec![*channel_id], failure_type, failure_reason: Some(onion_error.into()), }, diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 04915affa20..a754c4fb81f 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -3792,8 +3792,8 @@ fn do_test_lost_timeout_monitor_events(confirm_tx: CommitmentType, dust_htlcs: b Event::PaymentFailed { payment_hash, .. } => { assert_eq!(payment_hash, Some(hash_b)); }, - Event::HTLCHandlingFailed { prev_channel_id, .. } => { - assert_eq!(prev_channel_id, chan_a); + Event::HTLCHandlingFailed { prev_channel_ids, .. } => { + assert_eq!(prev_channel_ids[0], chan_a); }, _ => panic!("Wrong event {ev:?}"), } diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index 249505f2e21..24e535b70bb 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -1101,6 +1101,7 @@ impl_for_vec_with_element_length_prefix!(crate::ln::msgs::UpdateAddHTLC); impl_writeable_for_vec_with_element_length_prefix!(&crate::ln::msgs::UpdateAddHTLC); impl_for_vec!(u32); impl_for_vec!(crate::events::HTLCLocator); +impl_for_vec!(crate::ln::types::ChannelId); impl Writeable for Vec { #[inline] From 5d5caad8c615702bb975ed43205a6c22cf2096c7 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 6 Jan 2026 15:28:42 -0500 Subject: [PATCH 05/14] events: add TrampolineForward variant to HTLCHandlingFailureType --- lightning/src/events/mod.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 98b1e913043..8ecd9419732 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -575,6 +575,14 @@ pub enum HTLCHandlingFailureType { /// The payment hash of the payment we attempted to process. payment_hash: PaymentHash, }, + /// We were responsible for pathfinding and forwarding of a trampoline payment, but failed to + /// do so. An example of such an instance is when we can't find a route to the specified + /// trampoline destination within. + TrampolineForward { + /// The set of HTLCs dispatched by our node in an attempt to complete the trampoline forward + /// which have failed. + attempted_htlcs: Vec, + }, } impl_writeable_tlv_based_enum_upgradable!(HTLCHandlingFailureType, @@ -592,6 +600,9 @@ impl_writeable_tlv_based_enum_upgradable!(HTLCHandlingFailureType, (4, Receive) => { (0, payment_hash, required), }, + (5, TrampolineForward) => { + (0, attempted_htlcs, required_vec), + }, ); /// The reason for HTLC failures in [`Event::HTLCHandlingFailed`]. From da3ff5c55c1b65b0334a00c3c2177e68cb45591b Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 2 Dec 2025 10:06:41 -0500 Subject: [PATCH 06/14] ln: add TrampolineForward SendHTLCId variant This commit adds a SendHTLCId for trampoline forwards, identified by their session_priv. As with an OutboundRoute, we can expect our HTLC to be uniquely identified by a randomly generated session_priv. TrampolineForward could also be identified by the set of all previous outbound scid/htlc id pairs that represent its incoming HTLC(s). We choose the 32 byte session_priv to fix the size of this identifier rather than 16 byte scid/id pairs that will grow with the number of incoming htlcs. --- lightning/src/ln/channelmanager.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 72df726d9c7..40926b342cb 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -718,6 +718,7 @@ impl Default for OptionalOfferPaymentParams { pub(crate) enum SentHTLCId { PreviousHopData { prev_outbound_scid_alias: u64, htlc_id: u64 }, OutboundRoute { session_priv: [u8; SECRET_KEY_SIZE] }, + TrampolineForward { session_priv: [u8; SECRET_KEY_SIZE] }, } impl SentHTLCId { pub(crate) fn from_source(source: &HTLCSource) -> Self { @@ -740,6 +741,9 @@ impl_writeable_tlv_based_enum!(SentHTLCId, (2, OutboundRoute) => { (0, session_priv, required), }, + (4, TrampolineForward) => { + (0, session_priv, required), + }, ); // (src_outbound_scid_alias, src_counterparty_node_id, src_funding_outpoint, src_chan_id, src_user_chan_id) From 4ad54640474e46970cb2ec6efcae90a1de656044 Mon Sep 17 00:00:00 2001 From: Maurice Date: Fri, 22 Aug 2025 10:37:21 -0400 Subject: [PATCH 07/14] ln: add TrampolineForward variant to HTLCSource enum Co-authored-by: Arik Sosman Co-authored-by: Maurice Poirrier --- lightning/src/chain/channelmonitor.rs | 2 + lightning/src/ln/channelmanager.rs | 62 +++++++++++++++++++++++++++ lightning/src/util/ser.rs | 2 + 3 files changed, 66 insertions(+) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 515a3dc5f1d..ee93211af47 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -2868,6 +2868,7 @@ impl ChannelMonitorImpl { let outbound_payment = match source { None => panic!("Outbound HTLCs should have a source"), Some(&HTLCSource::PreviousHopData(_)) => false, + Some(&HTLCSource::TrampolineForward { .. }) => false, Some(&HTLCSource::OutboundRoute { .. }) => true, }; return Some(Balance::MaybeTimeoutClaimableHTLC { @@ -3080,6 +3081,7 @@ impl ChannelMonitor { let outbound_payment = match source { None => panic!("Outbound HTLCs should have a source"), Some(HTLCSource::PreviousHopData(_)) => false, + Some(HTLCSource::TrampolineForward { .. }) => false, Some(HTLCSource::OutboundRoute { .. }) => true, }; if outbound_payment { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 40926b342cb..bf3fe5d73d3 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -727,6 +727,9 @@ impl SentHTLCId { prev_outbound_scid_alias: hop_data.prev_outbound_scid_alias, htlc_id: hop_data.htlc_id, }, + HTLCSource::TrampolineForward { session_priv, .. } => { + Self::TrampolineForward { session_priv: session_priv.secret_bytes() } + }, HTLCSource::OutboundRoute { session_priv, .. } => { Self::OutboundRoute { session_priv: session_priv.secret_bytes() } }, @@ -753,6 +756,8 @@ type PerSourcePendingForward = type FailedHTLCForward = (HTLCSource, PaymentHash, HTLCFailReason, HTLCHandlingFailureType); mod fuzzy_channelmanager { + use crate::routing::router::RouteHop; + use super::*; /// Tracks the inbound corresponding to an outbound HTLC @@ -760,6 +765,16 @@ mod fuzzy_channelmanager { #[derive(Clone, Debug, PartialEq, Eq)] pub enum HTLCSource { PreviousHopData(HTLCPreviousHopData), + TrampolineForward { + /// We might be forwarding an incoming payment that was received over MPP, and therefore + /// need to store the vector of corresponding `HTLCPreviousHopData` values. + previous_hop_data: Vec, + incoming_trampoline_shared_secret: [u8; 32], + hops: Vec, + /// In order to decode inter-Trampoline errors, we need to store the session_priv key + /// given we're effectively creating new outbound routes. + session_priv: SecretKey, + }, OutboundRoute { path: Path, session_priv: SecretKey, @@ -822,6 +837,18 @@ impl core::hash::Hash for HTLCSource { first_hop_htlc_msat.hash(hasher); bolt12_invoice.hash(hasher); }, + HTLCSource::TrampolineForward { + previous_hop_data, + incoming_trampoline_shared_secret, + hops, + session_priv, + } => { + 2u8.hash(hasher); + previous_hop_data.hash(hasher); + incoming_trampoline_shared_secret.hash(hasher); + hops.hash(hasher); + session_priv[..].hash(hasher); + }, } } } @@ -8803,6 +8830,7 @@ where None, )); }, + HTLCSource::TrampolineForward { .. } => todo!(), } } @@ -9550,6 +9578,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, ); }, + HTLCSource::TrampolineForward { .. } => todo!(), } } @@ -16481,6 +16510,24 @@ impl Readable for HTLCSource { }) } 1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)), + 2 => { + let mut previous_hop_data = Vec::new(); + let mut incoming_trampoline_shared_secret: crate::util::ser::RequiredWrapper<[u8; 32]> = crate::util::ser::RequiredWrapper(None); + let mut session_priv: crate::util::ser::RequiredWrapper = crate::util::ser::RequiredWrapper(None); + let mut hops = Vec::new(); + read_tlv_fields!(reader, { + (0, previous_hop_data, required_vec), + (2, incoming_trampoline_shared_secret, required), + (4, session_priv, required), + (6, hops, required_vec), + }); + Ok(HTLCSource::TrampolineForward { + previous_hop_data, + incoming_trampoline_shared_secret: incoming_trampoline_shared_secret.0.unwrap(), + hops, + session_priv: session_priv.0.unwrap(), + }) + }, _ => Err(DecodeError::UnknownRequiredFeature), } } @@ -16513,6 +16560,20 @@ impl Writeable for HTLCSource { 1u8.write(writer)?; field.write(writer)?; }, + HTLCSource::TrampolineForward { + ref previous_hop_data, + incoming_trampoline_shared_secret, + ref session_priv, + ref hops, + } => { + 2u8.write(writer)?; + write_tlv_fields!(writer, { + (0, *previous_hop_data, required_vec), + (2, incoming_trampoline_shared_secret, required), + (4, session_priv, required), + (6, *hops, required_vec), + }); + }, } Ok(()) } @@ -18070,6 +18131,7 @@ where } else { true } }); }, + HTLCSource::TrampolineForward { .. } => todo!(), HTLCSource::OutboundRoute { payment_id, session_priv, diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index 24e535b70bb..731c869b581 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -1085,6 +1085,7 @@ impl Readable for Vec { impl_for_vec!(ecdsa::Signature); impl_for_vec!(crate::chain::channelmonitor::ChannelMonitorUpdate); +impl_for_vec!(crate::ln::channelmanager::HTLCPreviousHopData); impl_for_vec!(crate::ln::channelmanager::MonitorUpdateCompletionAction); impl_for_vec!(crate::ln::channelmanager::PaymentClaimDetails); impl_for_vec!(crate::ln::msgs::SocketAddress); @@ -1096,6 +1097,7 @@ impl_for_vec!(crate::ln::our_peer_storage::PeerStorageMonitorHolder); impl_for_vec!(crate::blinded_path::message::BlindedMessagePath); impl_writeable_for_vec!(&crate::routing::router::BlindedTail); impl_readable_for_vec!(crate::routing::router::BlindedTail); +impl_for_vec!(crate::routing::router::RouteHop); impl_for_vec!(crate::routing::router::TrampolineHop); impl_for_vec_with_element_length_prefix!(crate::ln::msgs::UpdateAddHTLC); impl_writeable_for_vec_with_element_length_prefix!(&crate::ln::msgs::UpdateAddHTLC); From 29a392267eb48382b5aed7d907184963027a8ebd Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 16 Dec 2025 15:21:57 +0200 Subject: [PATCH 08/14] ln/refactor: add claim funds for htlc forward helper Will need to share this code when we add trampoline forwarding. This commit exactly moves the logic as-is, in preparation for the next commit that will update to suit trampoline. Co-authored-by: Arik Sosman Co-authored-by: Maurice Poirrier --- lightning/src/ln/channelmanager.rs | 294 ++++++++++++++++------------- 1 file changed, 163 insertions(+), 131 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index bf3fe5d73d3..94a4ae8b994 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9063,6 +9063,157 @@ where } } + /// Claims funds for a forwarded HTLC where we are an intermediate hop. + /// + /// Processes attribution data, calculates fees earned, and emits a [`Event::PaymentForwarded`] + /// event upon successful claim. + fn claim_funds_from_htlc_forward_hop( + &self, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, + skimmed_fee_msat: Option, from_onchain: bool, startup_replay: bool, + next_channel_counterparty_node_id: PublicKey, next_channel_outpoint: OutPoint, + next_channel_id: ChannelId, next_user_channel_id: Option, + hop_data: HTLCPreviousHopData, attribution_data: Option, + send_timestamp: Option, + ) { + let prev_channel_id = hop_data.channel_id; + let prev_user_channel_id = hop_data.user_channel_id; + let prev_node_id = hop_data.counterparty_node_id; + let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); + + // Obtain hold time, if available. + let hold_time = hold_time_since(send_timestamp).unwrap_or(0); + + // If attribution data was received from downstream, we shift it and get it ready for adding our hold + // time. Note that fulfilled HTLCs take a fast path to the incoming side. We don't need to wait for RAA + // to record the hold time like we do for failed HTLCs. + let attribution_data = process_fulfill_attribution_data( + attribution_data, + &hop_data.incoming_packet_shared_secret, + hold_time, + ); + + #[cfg(test)] + let claiming_chan_funding_outpoint = hop_data.outpoint; + self.claim_funds_from_hop( + hop_data, + payment_preimage, + None, + Some(attribution_data), + |htlc_claim_value_msat, definitely_duplicate| { + let chan_to_release = Some(EventUnblockedChannel { + counterparty_node_id: next_channel_counterparty_node_id, + funding_txo: next_channel_outpoint, + channel_id: next_channel_id, + blocking_action: completed_blocker, + }); + + if definitely_duplicate && startup_replay { + // On startup we may get redundant claims which are related to + // monitor updates still in flight. In that case, we shouldn't + // immediately free, but instead let that monitor update complete + // in the background. + #[cfg(test)] + { + let per_peer_state = self.per_peer_state.deadlocking_read(); + // The channel we'd unblock should already be closed, or... + let channel_closed = per_peer_state + .get(&next_channel_counterparty_node_id) + .map(|lck| lck.deadlocking_lock()) + .map(|peer| !peer.channel_by_id.contains_key(&next_channel_id)) + .unwrap_or(true); + let background_events = self.pending_background_events.lock().unwrap(); + // there should be a `BackgroundEvent` pending... + let matching_bg_event = + background_events.iter().any(|ev| { + match ev { + // to apply a monitor update that blocked the claiming channel, + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + funding_txo, + update, + .. + } => { + if *funding_txo == claiming_chan_funding_outpoint { + assert!( + update.updates.iter().any(|upd| { + if let ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage: update_preimage, .. + } = upd { + payment_preimage == *update_preimage + } else { false } + }), + "{:?}", + update + ); + true + } else { + false + } + }, + // or the monitor update has completed and will unblock + // immediately once we get going. + BackgroundEvent::MonitorUpdatesComplete { + channel_id, .. + } => *channel_id == prev_channel_id, + } + }); + assert!(channel_closed || matching_bg_event, "{:?}", *background_events); + } + (None, None) + } else if definitely_duplicate { + if let Some(other_chan) = chan_to_release { + ( + Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately { + downstream_counterparty_node_id: other_chan.counterparty_node_id, + downstream_channel_id: other_chan.channel_id, + blocking_action: other_chan.blocking_action, + }), + None, + ) + } else { + (None, None) + } + } else { + let total_fee_earned_msat = + if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { + if let Some(claimed_htlc_value) = htlc_claim_value_msat { + Some(claimed_htlc_value - forwarded_htlc_value) + } else { + None + } + } else { + None + }; + debug_assert!( + skimmed_fee_msat <= total_fee_earned_msat, + "skimmed_fee_msat must always be included in total_fee_earned_msat" + ); + ( + Some(MonitorUpdateCompletionAction::EmitEventOptionAndFreeOtherChannel { + event: Some(events::Event::PaymentForwarded { + prev_htlcs: vec![events::HTLCLocator { + channel_id: prev_channel_id, + user_channel_id: prev_user_channel_id, + node_id: prev_node_id, + }], + next_htlcs: vec![events::HTLCLocator { + channel_id: next_channel_id, + user_channel_id: next_user_channel_id, + node_id: Some(next_channel_counterparty_node_id), + }], + total_fee_earned_msat, + skimmed_fee_msat, + claim_from_onchain_tx: from_onchain, + outbound_amount_forwarded_msat: forwarded_htlc_value_msat, + }), + downstream_counterparty_and_funding_outpoint: chan_to_release, + }), + None, + ) + } + }, + ); + } + fn claim_funds_from_hop< ComplFunc: FnOnce( Option, @@ -9444,138 +9595,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }, HTLCSource::PreviousHopData(hop_data) => { - let prev_channel_id = hop_data.channel_id; - let prev_user_channel_id = hop_data.user_channel_id; - let prev_node_id = hop_data.counterparty_node_id; - let completed_blocker = - RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); - - // Obtain hold time, if available. - let hold_time = hold_time_since(send_timestamp).unwrap_or(0); - - // If attribution data was received from downstream, we shift it and get it ready for adding our hold - // time. Note that fulfilled HTLCs take a fast path to the incoming side. We don't need to wait for RAA - // to record the hold time like we do for failed HTLCs. - let attribution_data = process_fulfill_attribution_data( - attribution_data, - &hop_data.incoming_packet_shared_secret, - hold_time, - ); - - #[cfg(test)] - let claiming_chan_funding_outpoint = hop_data.outpoint; - self.claim_funds_from_hop( - hop_data, + self.claim_funds_from_htlc_forward_hop( payment_preimage, - None, - Some(attribution_data), - |htlc_claim_value_msat, definitely_duplicate| { - let chan_to_release = Some(EventUnblockedChannel { - counterparty_node_id: next_channel_counterparty_node_id, - funding_txo: next_channel_outpoint, - channel_id: next_channel_id, - blocking_action: completed_blocker, - }); - - if definitely_duplicate && startup_replay { - // On startup we may get redundant claims which are related to - // monitor updates still in flight. In that case, we shouldn't - // immediately free, but instead let that monitor update complete - // in the background. - #[cfg(test)] - { - let per_peer_state = self.per_peer_state.deadlocking_read(); - // The channel we'd unblock should already be closed, or... - let channel_closed = per_peer_state - .get(&next_channel_counterparty_node_id) - .map(|lck| lck.deadlocking_lock()) - .map(|peer| !peer.channel_by_id.contains_key(&next_channel_id)) - .unwrap_or(true); - let background_events = - self.pending_background_events.lock().unwrap(); - // there should be a `BackgroundEvent` pending... - let matching_bg_event = - background_events.iter().any(|ev| { - match ev { - // to apply a monitor update that blocked the claiming channel, - BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - funding_txo, update, .. - } => { - if *funding_txo == claiming_chan_funding_outpoint { - assert!(update.updates.iter().any(|upd| - if let ChannelMonitorUpdateStep::PaymentPreimage { - payment_preimage: update_preimage, .. - } = upd { - payment_preimage == *update_preimage - } else { false } - ), "{:?}", update); - true - } else { false } - }, - // or the monitor update has completed and will unblock - // immediately once we get going. - BackgroundEvent::MonitorUpdatesComplete { - channel_id, .. - } => - *channel_id == prev_channel_id, - } - }); - assert!( - channel_closed || matching_bg_event, - "{:?}", - *background_events - ); - } - (None, None) - } else if definitely_duplicate { - if let Some(other_chan) = chan_to_release { - (Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately { - downstream_counterparty_node_id: other_chan.counterparty_node_id, - downstream_channel_id: other_chan.channel_id, - blocking_action: other_chan.blocking_action, - }), None) - } else { - (None, None) - } - } else { - let total_fee_earned_msat = - if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { - if let Some(claimed_htlc_value) = htlc_claim_value_msat { - Some(claimed_htlc_value - forwarded_htlc_value) - } else { - None - } - } else { - None - }; - debug_assert!( - skimmed_fee_msat <= total_fee_earned_msat, - "skimmed_fee_msat must always be included in total_fee_earned_msat" - ); - ( - Some(MonitorUpdateCompletionAction::EmitEventOptionAndFreeOtherChannel { - event: Some(events::Event::PaymentForwarded { - prev_htlcs: vec![events::HTLCLocator { - channel_id: prev_channel_id, - user_channel_id: prev_user_channel_id, - node_id: prev_node_id, - }], - next_htlcs: vec![events::HTLCLocator { - channel_id: next_channel_id, - user_channel_id: next_user_channel_id, - node_id: Some(next_channel_counterparty_node_id), - }], - total_fee_earned_msat, - skimmed_fee_msat, - claim_from_onchain_tx: from_onchain, - outbound_amount_forwarded_msat: forwarded_htlc_value_msat, - }), - downstream_counterparty_and_funding_outpoint: chan_to_release, - }), - None, - ) - } - }, + forwarded_htlc_value_msat, + skimmed_fee_msat, + from_onchain, + startup_replay, + next_channel_counterparty_node_id, + next_channel_outpoint, + next_channel_id, + next_user_channel_id, + hop_data, + attribution_data, + send_timestamp, ); }, HTLCSource::TrampolineForward { .. } => todo!(), From 613fc99446ee7458a1bd6cd683dacf09e825d20d Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 6 Jan 2026 09:09:06 -0500 Subject: [PATCH 09/14] ln/refactor: pass closure to create PaymentForwarded event When we introduce trampoline forwards, we're going to want to provide two external pieces of information to create events: - When to emit an event: we only want to emit one trampoline event, even when we have multiple incoming htlcs. We need to make multiple calls to claim_funds_from_htlc_forward_hop to claim each individual htlc, which are not aware of each other, so we rely on the caller's closure to decide when to emit Some or None. - Forwarding fees: we will not be able to calculate the total fee for a trampoline forward when an individual outgoing htlcs is fulfilled, because there may be other outgoing htlcs that are not accounted for (we only get the htlc_claim_value_msat for the single htlc that was just fulfilled). In future, we'll be able to provide the total fee from the channelmanager's top level view. --- lightning/src/ln/channelmanager.rs | 102 ++++++++++++++++------------- 1 file changed, 57 insertions(+), 45 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 94a4ae8b994..4e067d84733 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -809,6 +809,16 @@ mod fuzzy_channelmanager { /// channel remains unconfirmed for too long. pub cltv_expiry: Option, } + + impl From<&HTLCPreviousHopData> for events::HTLCLocator { + fn from(value: &HTLCPreviousHopData) -> Self { + events::HTLCLocator { + channel_id: value.channel_id, + user_channel_id: value.user_channel_id, + node_id: value.counterparty_node_id, + } + } + } } #[cfg(fuzzing)] pub use self::fuzzy_channelmanager::*; @@ -9066,18 +9076,16 @@ where /// Claims funds for a forwarded HTLC where we are an intermediate hop. /// /// Processes attribution data, calculates fees earned, and emits a [`Event::PaymentForwarded`] - /// event upon successful claim. + /// event upon successful claim. `make_payment_forwarded_event` is responsible for creating a + /// single [`Event::PaymentForwarded`] event that represents the forward. fn claim_funds_from_htlc_forward_hop( - &self, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, - skimmed_fee_msat: Option, from_onchain: bool, startup_replay: bool, - next_channel_counterparty_node_id: PublicKey, next_channel_outpoint: OutPoint, - next_channel_id: ChannelId, next_user_channel_id: Option, - hop_data: HTLCPreviousHopData, attribution_data: Option, - send_timestamp: Option, + &self, payment_preimage: PaymentPreimage, + make_payment_forwarded_event: impl Fn(Option) -> Option, + startup_replay: bool, next_channel_counterparty_node_id: PublicKey, + next_channel_outpoint: OutPoint, next_channel_id: ChannelId, hop_data: HTLCPreviousHopData, + attribution_data: Option, send_timestamp: Option, ) { - let prev_channel_id = hop_data.channel_id; - let prev_user_channel_id = hop_data.user_channel_id; - let prev_node_id = hop_data.counterparty_node_id; + let _prev_channel_id = hop_data.channel_id; let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); // Obtain hold time, if available. @@ -9153,7 +9161,7 @@ where // immediately once we get going. BackgroundEvent::MonitorUpdatesComplete { channel_id, .. - } => *channel_id == prev_channel_id, + } => *channel_id == _prev_channel_id, } }); assert!(channel_closed || matching_bg_event, "{:?}", *background_events); @@ -9173,38 +9181,16 @@ where (None, None) } } else { - let total_fee_earned_msat = - if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { - if let Some(claimed_htlc_value) = htlc_claim_value_msat { - Some(claimed_htlc_value - forwarded_htlc_value) - } else { - None - } - } else { - None - }; - debug_assert!( - skimmed_fee_msat <= total_fee_earned_msat, - "skimmed_fee_msat must always be included in total_fee_earned_msat" - ); + let event = make_payment_forwarded_event(htlc_claim_value_msat); + if let Some(ref payment_forwarded) = event { + debug_assert!(matches!( + payment_forwarded, + &events::Event::PaymentForwarded { .. } + )); + } ( Some(MonitorUpdateCompletionAction::EmitEventOptionAndFreeOtherChannel { - event: Some(events::Event::PaymentForwarded { - prev_htlcs: vec![events::HTLCLocator { - channel_id: prev_channel_id, - user_channel_id: prev_user_channel_id, - node_id: prev_node_id, - }], - next_htlcs: vec![events::HTLCLocator { - channel_id: next_channel_id, - user_channel_id: next_user_channel_id, - node_id: Some(next_channel_counterparty_node_id), - }], - total_fee_earned_msat, - skimmed_fee_msat, - claim_from_onchain_tx: from_onchain, - outbound_amount_forwarded_msat: forwarded_htlc_value_msat, - }), + event, downstream_counterparty_and_funding_outpoint: chan_to_release, }), None, @@ -9595,16 +9581,42 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }, HTLCSource::PreviousHopData(hop_data) => { + let prev_htlcs = vec![events::HTLCLocator::from(&hop_data)]; self.claim_funds_from_htlc_forward_hop( payment_preimage, - forwarded_htlc_value_msat, - skimmed_fee_msat, - from_onchain, + |htlc_claim_value_msat: Option| -> Option { + let total_fee_earned_msat = + if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { + if let Some(claimed_htlc_value) = htlc_claim_value_msat { + Some(claimed_htlc_value - forwarded_htlc_value) + } else { + None + } + } else { + None + }; + debug_assert!( + skimmed_fee_msat <= total_fee_earned_msat, + "skimmed_fee_msat must always be included in total_fee_earned_msat" + ); + + Some(events::Event::PaymentForwarded { + prev_htlcs: prev_htlcs.clone(), + next_htlcs: vec![events::HTLCLocator { + channel_id: next_channel_id, + user_channel_id: next_user_channel_id, + node_id: Some(next_channel_counterparty_node_id), + }], + total_fee_earned_msat, + skimmed_fee_msat, + claim_from_onchain_tx: from_onchain, + outbound_amount_forwarded_msat: forwarded_htlc_value_msat, + }) + }, startup_replay, next_channel_counterparty_node_id, next_channel_outpoint, next_channel_id, - next_user_channel_id, hop_data, attribution_data, send_timestamp, From 9a23de4e75c4c855d00cb820037acddc462bfc0d Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Tue, 6 Jan 2026 09:42:37 -0500 Subject: [PATCH 10/14] ln: add trampoline routing payment claiming Implement payment claiming for `HTLCSource::TrampolineForward` by iterating through previous hop data and claiming funds for each HTLC. Co-authored-by: Arik Sosman Co-authored-by: Maurice Poirrier --- lightning/src/ln/channelmanager.rs | 42 +++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 4e067d84733..8acf80fd6ec 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9622,7 +9622,47 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ send_timestamp, ); }, - HTLCSource::TrampolineForward { .. } => todo!(), + HTLCSource::TrampolineForward { previous_hop_data, .. } => { + // Only emit a single event for trampoline claims. + let prev_htlcs: Vec = + previous_hop_data.iter().map(Into::into).collect(); + for (i, current_previous_hop_data) in previous_hop_data.into_iter().enumerate() { + self.claim_funds_from_htlc_forward_hop( + payment_preimage, + |_: Option| -> Option { + if i == 0 { + Some(events::Event::PaymentForwarded { + prev_htlcs: prev_htlcs.clone(), + // TODO: When trampoline payments are tracked in our + // pending_outbound_payments, we'll be able to provide all the + // outgoing htlcs for this forward. + next_htlcs: vec![events::HTLCLocator { + channel_id: next_channel_id, + user_channel_id: next_user_channel_id, + node_id: Some(next_channel_counterparty_node_id), + }], + // TODO: When trampoline payments are tracked in our + // pending_outbound_payments, we'll be able to lookup our total + // fee earnings. + total_fee_earned_msat: None, + skimmed_fee_msat, + claim_from_onchain_tx: from_onchain, + outbound_amount_forwarded_msat: forwarded_htlc_value_msat, + }) + } else { + None + } + }, + startup_replay, + next_channel_counterparty_node_id, + next_channel_outpoint, + next_channel_id, + current_previous_hop_data, + attribution_data.clone(), + send_timestamp, + ); + } + }, } } From 0dfa2fc98f40ee32600801ec688e37e50b2bfadc Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Thu, 20 Nov 2025 11:04:59 -0500 Subject: [PATCH 11/14] ln/refactor: add blinded forwarding failure helper function We'll want this extracted when we need to handle trampoline and regular forwards. Co-authored-by: Arik Sosman Co-authored-by: Maurice Poirrier --- lightning/src/ln/channelmanager.rs | 100 ++++++++++++++++++----------- 1 file changed, 62 insertions(+), 38 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 8acf80fd6ec..65af2576391 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -8733,6 +8733,19 @@ where debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread); } + let push_forward_htlcs_failure = + |prev_outbound_scid_alias: u64, failure: HTLCForwardInfo| { + let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); + match forward_htlcs.entry(prev_outbound_scid_alias) { + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().push(failure); + }, + hash_map::Entry::Vacant(entry) => { + entry.insert(vec![failure]); + }, + } + }; + //TODO: There is a timing attack here where if a node fails an HTLC back to us they can //identify whether we sent it or not based on the (I presume) very different runtime //between the branches here. We should make this async and move it into the forward HTLCs @@ -8791,45 +8804,19 @@ where if blinded_failure.is_some() { "blinded " } else { "" }, onion_error ); - // In case of trampoline + phantom we prioritize the trampoline failure over the phantom failure. - // TODO: Correctly wrap the error packet twice if failing back a trampoline + phantom HTLC. - let secondary_shared_secret = trampoline_shared_secret.or(*phantom_shared_secret); - let failure = match blinded_failure { - Some(BlindedFailure::FromIntroductionNode) => { - let blinded_onion_error = HTLCFailReason::reason( - LocalHTLCFailureReason::InvalidOnionBlinding, - vec![0; 32], - ); - let err_packet = blinded_onion_error.get_encrypted_failure_packet( - incoming_packet_shared_secret, - &secondary_shared_secret, - ); - HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet } - }, - Some(BlindedFailure::FromBlindedNode) => HTLCForwardInfo::FailMalformedHTLC { - htlc_id: *htlc_id, - failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), - sha256_of_onion: [0; 32], - }, - None => { - let err_packet = onion_error.get_encrypted_failure_packet( - incoming_packet_shared_secret, - &secondary_shared_secret, - ); - HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet } - }, - }; - let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); - match forward_htlcs.entry(*prev_outbound_scid_alias) { - hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(failure); - }, - hash_map::Entry::Vacant(entry) => { - entry.insert(vec![failure]); - }, - } - mem::drop(forward_htlcs); + push_forward_htlcs_failure( + *prev_outbound_scid_alias, + get_htlc_forward_failure( + blinded_failure, + onion_error, + incoming_packet_shared_secret, + trampoline_shared_secret, + phantom_shared_secret, + *htlc_id, + ), + ); + let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back(( events::Event::HTLCHandlingFailed { @@ -13090,6 +13077,43 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } +/// Constructs an HTLC forward failure for sending back to the previous hop, converting to a blinded +/// failure where appropriate. +/// +/// When both trampoline and phantom secrets are present, the trampoline secret takes priority +/// for error encryption. +fn get_htlc_forward_failure( + blinded_failure: &Option, onion_error: &HTLCFailReason, + incoming_packet_shared_secret: &[u8; 32], trampoline_shared_secret: &Option<[u8; 32]>, + phantom_shared_secret: &Option<[u8; 32]>, htlc_id: u64, +) -> HTLCForwardInfo { + // TODO: Correctly wrap the error packet twice if failing back a trampoline + phantom HTLC. + let secondary_shared_secret = trampoline_shared_secret.or(*phantom_shared_secret); + match blinded_failure { + Some(BlindedFailure::FromIntroductionNode) => { + let blinded_onion_error = + HTLCFailReason::reason(LocalHTLCFailureReason::InvalidOnionBlinding, vec![0; 32]); + let err_packet = blinded_onion_error.get_encrypted_failure_packet( + incoming_packet_shared_secret, + &secondary_shared_secret, + ); + HTLCForwardInfo::FailHTLC { htlc_id, err_packet } + }, + Some(BlindedFailure::FromBlindedNode) => HTLCForwardInfo::FailMalformedHTLC { + htlc_id, + failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), + sha256_of_onion: [0; 32], + }, + None => { + let err_packet = onion_error.get_encrypted_failure_packet( + incoming_packet_shared_secret, + &secondary_shared_secret, + ); + HTLCForwardInfo::FailHTLC { htlc_id, err_packet } + }, + } +} + /// Parameters used with [`create_bolt11_invoice`]. /// /// [`create_bolt11_invoice`]: ChannelManager::create_bolt11_invoice From 771488ab743b0675fe48a8b2548688a04c3826b5 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Mon, 1 Dec 2025 15:50:46 -0500 Subject: [PATCH 12/14] ln: add trampoline routing failure handling Implement failure propagation for `HTLCSource::TrampolineForward` by iterating through previous hop data and failing each HTLC with `TemporaryTrampolineFailure`. Note that testing should be implemented when trampoline forward is completed. Co-authored-by: Arik Sosman Co-authored-by: Maurice Poirrier --- lightning/src/ln/channelmanager.rs | 59 +++++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 65af2576391..6e47bc05493 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -8827,7 +8827,64 @@ where None, )); }, - HTLCSource::TrampolineForward { .. } => todo!(), + HTLCSource::TrampolineForward { + previous_hop_data, + incoming_trampoline_shared_secret, + .. + } => { + // TODO: what do we want to do with this given we do not wish to propagate it directly? + let _decoded_onion_failure = + onion_error.decode_onion_failure(&self.secp_ctx, &self.logger, &source); + let incoming_trampoline_shared_secret = Some(*incoming_trampoline_shared_secret); + + // TODO: when we receive a failure from a single outgoing trampoline HTLC, we don't + // necessarily want to fail all of our incoming HTLCs back yet. We may have other + // outgoing HTLCs that need to resolve first. This will be tracked in our + // pending_outbound_payments in a followup. + for current_hop_data in previous_hop_data { + let incoming_packet_shared_secret = + ¤t_hop_data.incoming_packet_shared_secret; + let channel_id = ¤t_hop_data.channel_id; + let short_channel_id = ¤t_hop_data.prev_outbound_scid_alias; + let htlc_id = ¤t_hop_data.htlc_id; + let blinded_failure = ¤t_hop_data.blinded_failure; + log_trace!( + WithContext::from(&self.logger, None, Some(*channel_id), Some(*payment_hash)), + "Failing {}HTLC with payment_hash {} backwards from us following Trampoline forwarding failure: {:?}", + if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error + ); + let onion_error = HTLCFailReason::reason( + LocalHTLCFailureReason::TemporaryTrampolineFailure, + Vec::new(), + ); + push_forward_htlcs_failure( + *short_channel_id, + get_htlc_forward_failure( + blinded_failure, + &onion_error, + incoming_packet_shared_secret, + &incoming_trampoline_shared_secret, + &None, + *htlc_id, + ), + ); + } + + // We only want to emit a single event for trampoline failures, so we do it once + // we've failed back all of our incoming HTLCs. + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push_back(( + events::Event::HTLCHandlingFailed { + prev_channel_ids: previous_hop_data + .iter() + .map(|prev| prev.channel_id) + .collect(), + failure_type, + failure_reason: Some(onion_error.into()), + }, + None, + )); + }, } } From 56b1b792c7bd57437d9d0cab31bca379d806c0ed Mon Sep 17 00:00:00 2001 From: Maurice Date: Mon, 25 Aug 2025 15:33:44 -0400 Subject: [PATCH 13/14] ln/refactor: extract channelmonitor recovery to external helper Move recovery logic for `HTLCSource::PreviousHopData` into `channel_monitor_recovery_internal` to prepare for trampoline forward reuse. Co-authored-by: Arik Sosman Co-authored-by: Maurice Poirrier --- lightning/src/ln/channelmanager.rs | 108 ++++++++++++++++++----------- 1 file changed, 66 insertions(+), 42 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 6e47bc05493..36dd00f8b91 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18250,51 +18250,17 @@ where let htlc_id = SentHTLCId::from_source(&htlc_source); match htlc_source { HTLCSource::PreviousHopData(prev_hop_data) => { - let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { - info.prev_funding_outpoint == prev_hop_data.outpoint - && info.prev_htlc_id == prev_hop_data.htlc_id - }; - // The ChannelMonitor is now responsible for this HTLC's - // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs`, - // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not - // persisted after the monitor was when forwarding the payment. - dedup_decode_update_add_htlcs( + reconcile_pending_htlcs_with_monitor( + &mut forward_htlcs_legacy, + &mut pending_events_read, + &mut pending_intercepted_htlcs_legacy, &mut decode_update_add_htlcs, - &prev_hop_data, - "HTLC was forwarded to the closed channel", - &args.logger, - ); - dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs_legacy, - &prev_hop_data, - "HTLC was forwarded to the closed channel", - &args.logger, + prev_hop_data, + &logger, + htlc.payment_hash, + monitor.channel_id(), ); - forward_htlcs_legacy.retain(|_, forwards| { - forwards.retain(|forward| { - if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - false - } else { true } - } else { true } - }); - !forwards.is_empty() - }); - pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - pending_events_read.retain(|(event, _)| { - if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { - intercepted_id != ev_id - } else { true } - }); - false - } else { true } - }); }, HTLCSource::TrampolineForward { .. } => todo!(), HTLCSource::OutboundRoute { @@ -19211,6 +19177,64 @@ where } } +/// Removes pending HTLC entries that the ChannelMonitor has already taken responsibility for, +/// cleaning up state mismatches that can occur during restart. +fn reconcile_pending_htlcs_with_monitor( + forward_htlcs_legacy: &mut HashMap>, + pending_events_read: &mut VecDeque<(Event, Option)>, + pending_intercepted_htlcs_legacy: &mut HashMap, + decode_update_add_htlcs: &mut HashMap>, + decode_update_add_htlcs_legacy: &mut HashMap>, + prev_hop_data: HTLCPreviousHopData, logger: &impl Logger, payment_hash: PaymentHash, + channel_id: ChannelId, +) { + let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { + info.prev_funding_outpoint == prev_hop_data.outpoint + && info.prev_htlc_id == prev_hop_data.htlc_id + }; + // The ChannelMonitor is now responsible for this HTLC's + // failure/success and will let us know what its outcome is. If we + // still have an entry for this HTLC in `forward_htlcs`, + // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not + // persisted after the monitor was when forwarding the payment. + dedup_decode_update_add_htlcs( + decode_update_add_htlcs, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &logger, + ); + dedup_decode_update_add_htlcs( + decode_update_add_htlcs_legacy, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &logger, + ); + forward_htlcs_legacy.retain(|_, forwards| { + forwards.retain(|forward| { + if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", + &payment_hash, &channel_id); + false + } else { true } + } else { true } + }); + !forwards.is_empty() + }); + pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", + &payment_hash, &channel_id); + pending_events_read.retain(|(event, _)| { + if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { + intercepted_id != ev_id + } else { true } + }); + false + } else { true } + }); +} + #[cfg(test)] mod tests { use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; From b9a8b09f99d91599d46e336b10ffbeffeb094fc4 Mon Sep 17 00:00:00 2001 From: Maurice Date: Mon, 25 Aug 2025 15:38:34 -0400 Subject: [PATCH 14/14] ln: add channel monitor recovery for trampoline forwards Implement channel monitor recovery for trampoline forwards iterating over all hop data and updating pending forwards. --- lightning/src/ln/channelmanager.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 36dd00f8b91..34e5c0ef917 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -18262,7 +18262,21 @@ where monitor.channel_id(), ); }, - HTLCSource::TrampolineForward { .. } => todo!(), + HTLCSource::TrampolineForward { previous_hop_data, .. } => { + for prev_hop_data in previous_hop_data { + reconcile_pending_htlcs_with_monitor( + &mut forward_htlcs_legacy, + &mut pending_events_read, + &mut pending_intercepted_htlcs_legacy, + &mut decode_update_add_htlcs, + &mut decode_update_add_htlcs_legacy, + prev_hop_data, + &logger, + htlc.payment_hash, + monitor.channel_id(), + ); + } + }, HTLCSource::OutboundRoute { payment_id, session_priv,