diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 2b712073568..8ac02309981 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -233,6 +233,11 @@ impl_writeable_tlv_based_enum_legacy!(PaymentPurpose, /// Information about an HTLC that is part of a payment that can be claimed. #[derive(Clone, Debug, PartialEq, Eq)] pub struct ClaimedHTLC { + /// The counterparty of the channel. + /// + /// This value will always be `None` for objects serialized with LDK versions prior to 0.2 and + /// `Some` otherwise. + pub counterparty_node_id: Option, /// The `channel_id` of the channel over which the HTLC was received. pub channel_id: ChannelId, /// The `user_channel_id` of the channel over which the HTLC was received. This is the value @@ -263,6 +268,7 @@ impl_writeable_tlv_based!(ClaimedHTLC, { (0, channel_id, required), (1, counterparty_skimmed_fee_msat, (default_value, 0u64)), (2, user_channel_id, required), + (3, counterparty_node_id, option), (4, cltv_expiry, required), (6, value_msat, required), }); diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 225f58898c2..511bdce6d69 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -1395,6 +1395,7 @@ fn test_no_disconnect_while_async_commitment_signed_expecting_remote_revoke_and_ let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_amount); nodes[1].node.claim_funds(preimage); check_added_monitors(&nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash, payment_amount); // We'll disable signing counterparty commitments on the payment sender. nodes[0].disable_channel_signer_op(&node_b_id, &chan_id, SignerOp::SignCounterpartyCommitment); @@ -1403,6 +1404,7 @@ fn test_no_disconnect_while_async_commitment_signed_expecting_remote_revoke_and_ // the `commitment_signed` is no longer pending. let mut update = get_htlc_update_msgs!(&nodes[1], node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, update.update_fulfill_htlcs.remove(0)); + expect_payment_sent(&nodes[0], preimage, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &update.commitment_signed); check_added_monitors(&nodes[0], 1); @@ -1426,7 +1428,4 @@ fn test_no_disconnect_while_async_commitment_signed_expecting_remote_revoke_and_ }; assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().into_iter().any(has_disconnect_event)); - - expect_payment_sent(&nodes[0], preimage, None, false, false); - expect_payment_claimed!(nodes[1], payment_hash, payment_amount); } diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 0a8f258935c..1302932a8a9 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -4694,6 +4694,23 @@ fn test_single_channel_multiple_mpp() { // `update_fulfill_htlc`/`commitment_signed` pair to pass to our counterparty. do_a_write.send(()).unwrap(); + let event_node: &'static TestChannelManager<'static, 'static> = + unsafe { std::mem::transmute(nodes[8].node as &TestChannelManager) }; + let thrd_event = std::thread::spawn(move || { + let mut have_event = false; + while !have_event { + let mut events = event_node.get_and_clear_pending_events(); + assert!(events.len() == 1 || events.len() == 0); + if events.len() == 1 { + if let Event::PaymentClaimed { .. } = events[0] { + } else { + panic!("Unexpected event {events:?}"); + } + have_event = true; + } + } + }); + // Then fetch the `update_fulfill_htlc`/`commitment_signed`. Note that the // `get_and_clear_pending_msg_events` will immediately hang trying to take a peer lock which // `claim_funds` is holding. Thus, we release a second write after a small sleep in the @@ -4713,7 +4730,11 @@ fn test_single_channel_multiple_mpp() { }); block_thrd2.store(false, Ordering::Release); let mut first_updates = get_htlc_update_msgs(&nodes[8], &node_h_id); + + // Thread 2 could unblock first, or it could get blocked waiting on us to process a + // `PaymentClaimed` event. Either way, wait until both have finished. thrd2.join().unwrap(); + thrd_event.join().unwrap(); // Disconnect node 6 from all its peers so it doesn't bother to fail the HTLCs back nodes[7].node.peer_disconnected(node_b_id); @@ -4760,8 +4781,6 @@ fn test_single_channel_multiple_mpp() { thrd4.join().unwrap(); thrd.join().unwrap(); - expect_payment_claimed!(nodes[8], payment_hash, 50_000_000); - // At the end, we should have 7 ChannelMonitorUpdates - 6 for HTLC claims, and one for the // above `revoke_and_ack`. check_added_monitors(&nodes[8], 7); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 2fc2ec3eba7..b5cf6f3ea9c 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -6401,7 +6401,7 @@ where Ok((closing_transaction, total_fee_satoshis)) } - fn funding_outpoint(&self) -> OutPoint { + pub fn funding_outpoint(&self) -> OutPoint { self.funding.channel_transaction_parameters.funding_outpoint.unwrap() } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 8fa34d7c349..1fd99f89451 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -450,7 +450,7 @@ pub(super) struct PendingAddHTLCInfo { // Note that this may be an outbound SCID alias for the associated channel. prev_short_channel_id: u64, prev_htlc_id: u64, - prev_counterparty_node_id: Option, + prev_counterparty_node_id: PublicKey, prev_channel_id: ChannelId, prev_funding_outpoint: OutPoint, prev_user_channel_id: u128, @@ -512,6 +512,7 @@ struct ClaimableHTLC { impl From<&ClaimableHTLC> for events::ClaimedHTLC { fn from(val: &ClaimableHTLC) -> Self { events::ClaimedHTLC { + counterparty_node_id: val.prev_hop.counterparty_node_id, channel_id: val.prev_hop.channel_id, user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0), cltv_expiry: val.cltv_expiry, @@ -670,7 +671,7 @@ impl_writeable_tlv_based_enum!(SentHTLCId, // (src_channel_id, src_counterparty_node_id, src_funding_outpoint, src_chan_id, src_user_chan_id) type PerSourcePendingForward = - (u64, Option, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>); + (u64, PublicKey, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>); type FailedHTLCForward = (HTLCSource, PaymentHash, HTLCFailReason, HTLCHandlingFailureType); @@ -933,9 +934,19 @@ struct ClaimingPayment { sender_intended_value: Option, onion_fields: Option, payment_id: Option, + /// When we claim and generate a [`Event::PaymentClaimed`], we want to block any + /// payment-preimage-removing RAA [`ChannelMonitorUpdate`]s until the [`Event::PaymentClaimed`] + /// is handled, ensuring we can regenerate the event on restart. We pick a random channel to + /// block and store it here. + /// + /// Note that once we disallow downgrades to 0.1 we should be able to simply use + /// [`Self::htlcs`] to generate this rather than storing it here (as we won't need the funding + /// outpoint), allowing us to remove this field. + durable_preimage_channel: Option<(OutPoint, PublicKey, ChannelId)>, } impl_writeable_tlv_based!(ClaimingPayment, { (0, amount_msat, required), + (1, durable_preimage_channel, option), (2, payment_purpose, required), (4, receiver_node_id, required), (5, htlcs, optional_vec), @@ -1082,6 +1093,16 @@ impl ClaimablePayments { .or_insert_with(|| { let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(); let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat); + // Pick an "arbitrary" channel to block RAAs on until the `PaymentSent` + // event is processed, specifically the last channel to get claimed. + let durable_preimage_channel = payment.htlcs.last().map_or(None, |htlc| { + if let Some(node_id) = htlc.prev_hop.counterparty_node_id { + Some((htlc.prev_hop.outpoint, node_id, htlc.prev_hop.channel_id)) + } else { + None + } + }); + debug_assert!(durable_preimage_channel.is_some()); ClaimingPayment { amount_msat: payment.htlcs.iter().map(|source| source.value).sum(), payment_purpose: payment.purpose, @@ -1090,6 +1111,7 @@ impl ClaimablePayments { sender_intended_value, onion_fields: payment.onion_fields, payment_id: Some(payment_id), + durable_preimage_channel, } }).clone(); @@ -1201,7 +1223,6 @@ pub(crate) enum MonitorUpdateCompletionAction { /// stored for later processing. FreeOtherChannelImmediately { downstream_counterparty_node_id: PublicKey, - downstream_funding_outpoint: OutPoint, blocking_action: RAAMonitorUpdateBlockingAction, downstream_channel_id: ChannelId, }, @@ -1216,11 +1237,8 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction, // *immediately*. However, for simplicity we implement read/write here. (1, FreeOtherChannelImmediately) => { (0, downstream_counterparty_node_id, required), - (2, downstream_funding_outpoint, required), (4, blocking_action, upgradable_required), - // Note that by the time we get past the required read above, downstream_funding_outpoint will be - // filled in, so we can safely unwrap it here. - (5, downstream_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(downstream_funding_outpoint.0.unwrap()))), + (5, downstream_channel_id, required), }, (2, EmitEventAndFreeOtherChannel) => { (0, event, upgradable_required), @@ -1237,17 +1255,21 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction, pub(crate) enum EventCompletionAction { ReleaseRAAChannelMonitorUpdate { counterparty_node_id: PublicKey, - channel_funding_outpoint: OutPoint, + // Was required until LDK 0.2. Always filled in as `Some`. + channel_funding_outpoint: Option, channel_id: ChannelId, }, } impl_writeable_tlv_based_enum!(EventCompletionAction, (0, ReleaseRAAChannelMonitorUpdate) => { - (0, channel_funding_outpoint, required), + (0, channel_funding_outpoint, option), (2, counterparty_node_id, required), - // Note that by the time we get past the required read above, channel_funding_outpoint will be - // filled in, so we can safely unwrap it here. - (3, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.0.unwrap()))), + (3, channel_id, (default_value, { + if channel_funding_outpoint.is_none() { + Err(DecodeError::InvalidValue)? + } + ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.unwrap()) + })), } ); @@ -1258,7 +1280,7 @@ impl_writeable_tlv_based_enum!(EventCompletionAction, /// drop this and merge the two, however doing so may break upgrades for nodes which have pending /// forwarded payments. struct HTLCClaimSource { - counterparty_node_id: Option, + counterparty_node_id: PublicKey, funding_txo: OutPoint, channel_id: ChannelId, htlc_id: u64, @@ -1267,7 +1289,7 @@ struct HTLCClaimSource { impl From<&MPPClaimHTLCSource> for HTLCClaimSource { fn from(o: &MPPClaimHTLCSource) -> HTLCClaimSource { HTLCClaimSource { - counterparty_node_id: Some(o.counterparty_node_id), + counterparty_node_id: o.counterparty_node_id, funding_txo: o.funding_txo, channel_id: o.channel_id, htlc_id: o.htlc_id, @@ -1277,8 +1299,8 @@ impl From<&MPPClaimHTLCSource> for HTLCClaimSource { #[derive(Debug)] pub(crate) struct PendingMPPClaim { - channels_without_preimage: Vec<(PublicKey, OutPoint, ChannelId)>, - channels_with_preimage: Vec<(PublicKey, OutPoint, ChannelId)>, + channels_without_preimage: Vec<(PublicKey, ChannelId)>, + channels_with_preimage: Vec<(PublicKey, ChannelId)>, } #[derive(Clone, Debug, Hash, PartialEq, Eq)] @@ -6149,7 +6171,7 @@ where user_channel_id: Some(payment.prev_user_channel_id), outpoint: payment.prev_funding_outpoint, channel_id: payment.prev_channel_id, - counterparty_node_id: payment.prev_counterparty_node_id, + counterparty_node_id: Some(payment.prev_counterparty_node_id), htlc_id: payment.prev_htlc_id, incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret, phantom_shared_secret: None, @@ -6322,7 +6344,7 @@ where // proposed to as a batch. let pending_forwards = ( incoming_scid, - Some(incoming_counterparty_node_id), + incoming_counterparty_node_id, incoming_funding_txo, incoming_channel_id, incoming_user_channel_id, @@ -6511,7 +6533,7 @@ where user_channel_id: Some(prev_user_channel_id), channel_id: prev_channel_id, outpoint: prev_funding_outpoint, - counterparty_node_id: prev_counterparty_node_id, + counterparty_node_id: Some(prev_counterparty_node_id), htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, phantom_shared_secret: phantom_ss, @@ -6724,7 +6746,7 @@ where let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, user_channel_id: Some(prev_user_channel_id), - counterparty_node_id: prev_counterparty_node_id, + counterparty_node_id: Some(prev_counterparty_node_id), channel_id: prev_channel_id, outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, @@ -7035,7 +7057,7 @@ where prev_hop: HTLCPreviousHopData { short_channel_id: prev_short_channel_id, user_channel_id: Some(prev_user_channel_id), - counterparty_node_id: prev_counterparty_node_id, + counterparty_node_id: Some(prev_counterparty_node_id), channel_id: prev_channel_id, outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, @@ -8065,7 +8087,7 @@ where let pending_mpp_claim_ptr_opt = if sources.len() > 1 { let mut channels_without_preimage = Vec::with_capacity(mpp_parts.len()); for part in mpp_parts.iter() { - let chan = (part.counterparty_node_id, part.funding_txo, part.channel_id); + let chan = (part.counterparty_node_id, part.channel_id); if !channels_without_preimage.contains(&chan) { channels_without_preimage.push(chan); } @@ -8080,13 +8102,12 @@ where let payment_info = Some(PaymentClaimDetails { mpp_parts, claiming_payment }); for htlc in sources { let this_mpp_claim = - pending_mpp_claim_ptr_opt.as_ref().and_then(|pending_mpp_claim| { - if let Some(cp_id) = htlc.prev_hop.counterparty_node_id { - let claim_ptr = PendingMPPClaimPointer(Arc::clone(pending_mpp_claim)); - Some((cp_id, htlc.prev_hop.channel_id, claim_ptr)) - } else { - None - } + pending_mpp_claim_ptr_opt.as_ref().map(|pending_mpp_claim| { + let counterparty_id = htlc.prev_hop.counterparty_node_id; + let counterparty_id = counterparty_id + .expect("Prior to upgrading to LDK 0.1, all pending HTLCs forwarded by LDK 0.0.123 or before must be resolved. It appears at least one claimable payment was not resolved. Please downgrade to LDK 0.0.125 and resolve the HTLC by claiming the payment prior to upgrading."); + let claim_ptr = PendingMPPClaimPointer(Arc::clone(pending_mpp_claim)); + (counterparty_id, htlc.prev_hop.channel_id, claim_ptr) }); let raa_blocker = pending_mpp_claim_ptr_opt.as_ref().map(|pending_claim| { RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { @@ -8168,6 +8189,14 @@ where let short_to_chan_info = self.short_to_chan_info.read().unwrap(); short_to_chan_info.get(&prev_hop.short_channel_id).map(|(cp_id, _)| *cp_id) }); + let counterparty_node_id = if let Some(node_id) = counterparty_node_id { + node_id + } else { + let payment_hash: PaymentHash = payment_preimage.into(); + panic!( + "Prior to upgrading to LDK 0.1, all pending HTLCs forwarded by LDK 0.0.123 or before must be resolved. It appears at least the HTLC with payment_hash {payment_hash} (preimage {payment_preimage}) was not resolved. Please downgrade to LDK 0.0.125 and resolve the HTLC prior to upgrading.", + ); + }; let htlc_source = HTLCClaimSource { counterparty_node_id, @@ -8212,18 +8241,13 @@ where const MISSING_MON_ERROR: &'static str = "If we're going to claim an HTLC against a channel, we should always have *some* state for the channel, even if just the latest ChannelMonitor update_id. This failure indicates we need to claim an HTLC from a channel for which we did not have a ChannelMonitor at startup and didn't create one while running."; - // Note here that `peer_state_opt` is always `Some` if `prev_hop.counterparty_node_id` is - // `Some`. This is relied on in the closed-channel case below. - let mut peer_state_opt = - prev_hop.counterparty_node_id.as_ref().map(|counterparty_node_id| { - per_peer_state - .get(counterparty_node_id) - .map(|peer_mutex| peer_mutex.lock().unwrap()) - .expect(MISSING_MON_ERROR) - }); + let mut peer_state_lock = per_peer_state + .get(&prev_hop.counterparty_node_id) + .map(|peer_mutex| peer_mutex.lock().unwrap()) + .expect(MISSING_MON_ERROR); - if let Some(peer_state_lock) = peer_state_opt.as_mut() { - let peer_state = &mut **peer_state_lock; + { + let peer_state = &mut *peer_state_lock; if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(chan_id) { @@ -8261,7 +8285,7 @@ where self, prev_hop.funding_txo, monitor_update, - peer_state_opt, + peer_state_lock, peer_state, per_peer_state, chan @@ -8312,13 +8336,12 @@ where return; } - mem::drop(peer_state_opt); + mem::drop(peer_state_lock); log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}", chan_id, action); if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { downstream_counterparty_node_id: node_id, - downstream_funding_outpoint: _, blocking_action: blocker, downstream_channel_id: channel_id, } = action @@ -8365,18 +8388,7 @@ where } } - if prev_hop.counterparty_node_id.is_none() { - let payment_hash: PaymentHash = payment_preimage.into(); - panic!( - "Prior to upgrading to LDK 0.1, all pending HTLCs forwarded by LDK 0.0.123 or before must be resolved. It appears at least the HTLC with payment_hash {} (preimage {}) was not resolved. Please downgrade to LDK 0.0.125 and resolve the HTLC prior to upgrading.", - payment_hash, - payment_preimage, - ); - } - let counterparty_node_id = - prev_hop.counterparty_node_id.expect("Checked immediately above"); - let mut peer_state = peer_state_opt - .expect("peer_state_opt is always Some when the counterparty_node_id is Some"); + let peer_state = &mut *peer_state_lock; let update_id = if let Some(latest_update_id) = peer_state.closed_channel_monitor_update_ids.get_mut(&chan_id) @@ -8420,7 +8432,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let payment_hash = payment_preimage.into(); let logger = WithContext::from( &self.logger, - Some(counterparty_node_id), + Some(prev_hop.counterparty_node_id), Some(chan_id), Some(payment_hash), ); @@ -8443,10 +8455,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self, prev_hop.funding_txo, preimage_update, - peer_state, + peer_state_lock, peer_state, per_peer_state, - counterparty_node_id, + prev_hop.counterparty_node_id, chan_id, POST_CHANNEL_CLOSE ); @@ -8502,7 +8514,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ "We don't support claim_htlc claims during startup - monitors may not be available yet"); debug_assert_eq!(next_channel_counterparty_node_id, path.hops[0].pubkey); let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate { - channel_funding_outpoint: next_channel_outpoint, + channel_funding_outpoint: Some(next_channel_outpoint), channel_id: next_channel_id, counterparty_node_id: path.hops[0].pubkey, }; @@ -8606,7 +8618,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(other_chan) = chan_to_release { (Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately { downstream_counterparty_node_id: other_chan.counterparty_node_id, - downstream_funding_outpoint: other_chan.funding_txo, downstream_channel_id: other_chan.channel_id, blocking_action: other_chan.blocking_action, }), None) @@ -8680,17 +8691,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if *pending_claim == claim_ptr { let mut pending_claim_state_lock = pending_claim.0.lock().unwrap(); let pending_claim_state = &mut *pending_claim_state_lock; - pending_claim_state.channels_without_preimage.retain(|(cp, op, cid)| { + pending_claim_state.channels_without_preimage.retain(|(cp, cid)| { let this_claim = *cp == counterparty_node_id && *cid == chan_id; if this_claim { - pending_claim_state.channels_with_preimage.push((*cp, *op, *cid)); + pending_claim_state.channels_with_preimage.push((*cp, *cid)); false } else { true } }); if pending_claim_state.channels_without_preimage.is_empty() { - for (cp, op, cid) in pending_claim_state.channels_with_preimage.iter() { - let freed_chan = (*cp, *op, *cid, blocker.clone()); + for (cp, cid) in pending_claim_state.channels_with_preimage.iter() { + let freed_chan = (*cp, *cid, blocker.clone()); freed_channels.push(freed_chan); } } @@ -8714,6 +8725,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ sender_intended_value: sender_intended_total_msat, onion_fields, payment_id, + durable_preimage_channel, }) = payment { let event = events::Event::PaymentClaimed { payment_hash, @@ -8725,7 +8737,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ onion_fields, payment_id, }; - let event_action = (event, None); + let action = if let Some((outpoint, counterparty_node_id, channel_id)) + = durable_preimage_channel + { + Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate { + channel_funding_outpoint: Some(outpoint), + counterparty_node_id, + channel_id, + }) + } else { + None + }; + let event_action = (event, action); let mut pending_events = self.pending_events.lock().unwrap(); // If we're replaying a claim on startup we may end up duplicating an event // that's already in our queue, so check before we push another one. The @@ -8742,17 +8765,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.pending_events.lock().unwrap().push_back((event, None)); if let Some(unblocked) = downstream_counterparty_and_funding_outpoint { self.handle_monitor_update_release( - unblocked.counterparty_node_id, unblocked.funding_txo, - unblocked.channel_id, Some(unblocked.blocking_action), + unblocked.counterparty_node_id, + unblocked.channel_id, + Some(unblocked.blocking_action), ); } }, MonitorUpdateCompletionAction::FreeOtherChannelImmediately { - downstream_counterparty_node_id, downstream_funding_outpoint, downstream_channel_id, blocking_action, + downstream_counterparty_node_id, downstream_channel_id, blocking_action, } => { self.handle_monitor_update_release( downstream_counterparty_node_id, - downstream_funding_outpoint, downstream_channel_id, Some(blocking_action), ); @@ -8760,8 +8783,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } - for (node_id, funding_outpoint, channel_id, blocker) in freed_channels { - self.handle_monitor_update_release(node_id, funding_outpoint, channel_id, Some(blocker)); + for (node_id, channel_id, blocker) in freed_channels { + self.handle_monitor_update_release(node_id, channel_id, Some(blocker)); } } @@ -8775,7 +8798,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option, tx_signatures: Option, tx_abort: Option, - ) -> (Option<(u64, Option, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec)>) { + ) -> (Option<(u64, PublicKey, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec)>) { let logger = WithChannelContext::from(&self.logger, &channel.context, None); log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", &channel.context.channel_id(), @@ -8795,7 +8818,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let mut htlc_forwards = None; if !pending_forwards.is_empty() { htlc_forwards = Some(( - short_channel_id, Some(channel.context.get_counterparty_node_id()), + short_channel_id, channel.context.get_counterparty_node_id(), channel.funding.get_funding_txo().unwrap(), channel.context.channel_id(), channel.context.get_user_id(), pending_forwards )); @@ -10468,23 +10491,22 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid ); + let routing = &forward_info.routing; let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, user_channel_id: Some(prev_user_channel_id), - counterparty_node_id: prev_counterparty_node_id, + counterparty_node_id: Some( + prev_counterparty_node_id, + ), outpoint: prev_funding_outpoint, channel_id: prev_channel_id, htlc_id: prev_htlc_id, incoming_packet_shared_secret: forward_info .incoming_shared_secret, phantom_shared_secret: None, - blinded_failure: forward_info - .routing - .blinded_failure(), - cltv_expiry: forward_info - .routing - .incoming_cltv_expiry(), + blinded_failure: routing.blinded_failure(), + cltv_expiry: routing.incoming_cltv_expiry(), }); let payment_hash = forward_info.payment_hash; @@ -10544,16 +10566,20 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ #[rustfmt::skip] fn raa_monitor_updates_held(&self, actions_blocking_raa_monitor_updates: &BTreeMap>, - channel_funding_outpoint: OutPoint, channel_id: ChannelId, counterparty_node_id: PublicKey + channel_id: ChannelId, counterparty_node_id: PublicKey, ) -> bool { actions_blocking_raa_monitor_updates .get(&channel_id).map(|v| !v.is_empty()).unwrap_or(false) || self.pending_events.lock().unwrap().iter().any(|(_, action)| { - action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate { - channel_funding_outpoint, - channel_id, - counterparty_node_id, - }) + if let Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate { + channel_funding_outpoint: _, + channel_id: ev_channel_id, + counterparty_node_id: ev_counterparty_node_id + }) = action { + *ev_channel_id == channel_id && *ev_counterparty_node_id == counterparty_node_id + } else { + false + } }) } @@ -10566,14 +10592,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let mut peer_state_lck = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lck; - if let Some(chan) = peer_state.channel_by_id.get(&channel_id) { - return self.raa_monitor_updates_held( - &peer_state.actions_blocking_raa_monitor_updates, - chan.funding().get_funding_txo().unwrap(), - channel_id, - counterparty_node_id, - ); - } + assert!(peer_state.channel_by_id.contains_key(&channel_id)); + return self.raa_monitor_updates_held( + &peer_state.actions_blocking_raa_monitor_updates, + channel_id, + counterparty_node_id, + ); } false } @@ -10593,11 +10617,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(chan) = chan_entry.get_mut().as_funded_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context, None); let funding_txo_opt = chan.funding.get_funding_txo(); - let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt { - self.raa_monitor_updates_held( - &peer_state.actions_blocking_raa_monitor_updates, funding_txo, msg.channel_id, - *counterparty_node_id) - } else { false }; + let mon_update_blocked = self.raa_monitor_updates_held( + &peer_state.actions_blocking_raa_monitor_updates, msg.channel_id, + *counterparty_node_id); let (htlcs_to_fail, monitor_update_opt) = try_channel_entry!(self, peer_state, chan.revoke_and_ack(&msg, &self.fee_estimator, &&logger, mon_update_blocked), chan_entry); if let Some(monitor_update) = monitor_update_opt { @@ -12582,10 +12604,10 @@ where /// operation. It will double-check that nothing *else* is also blocking the same channel from /// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly. #[rustfmt::skip] - fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, - channel_funding_outpoint: OutPoint, channel_id: ChannelId, - mut completed_blocker: Option) { - + fn handle_monitor_update_release( + &self, counterparty_node_id: PublicKey, channel_id: ChannelId, + mut completed_blocker: Option, + ) { let logger = WithContext::from( &self.logger, Some(counterparty_node_id), Some(channel_id), None ); @@ -12604,7 +12626,7 @@ where } if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates, - channel_funding_outpoint, channel_id, counterparty_node_id) { + channel_id, counterparty_node_id) { // Check that, while holding the peer lock, we don't have anything else // blocking monitor updates for this channel. If we do, release the monitor // update(s) when those blockers complete. @@ -12616,7 +12638,7 @@ where if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry( channel_id) { if let Some(chan) = chan_entry.get_mut().as_funded_mut() { - debug_assert_eq!(chan.funding.get_funding_txo().unwrap(), channel_funding_outpoint); + let channel_funding_outpoint = chan.funding_outpoint(); if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() { log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor", channel_id); @@ -12646,16 +12668,11 @@ where for action in actions { match action { EventCompletionAction::ReleaseRAAChannelMonitorUpdate { - channel_funding_outpoint, + channel_funding_outpoint: _, channel_id, counterparty_node_id, } => { - self.handle_monitor_update_release( - counterparty_node_id, - channel_funding_outpoint, - channel_id, - None, - ); + self.handle_monitor_update_release(counterparty_node_id, channel_id, None); }, } } @@ -13470,7 +13487,7 @@ where htlc_id: htlc.prev_htlc_id, incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret, phantom_shared_secret: None, - counterparty_node_id: htlc.prev_counterparty_node_id, + counterparty_node_id: Some(htlc.prev_counterparty_node_id), outpoint: htlc.prev_funding_outpoint, channel_id: htlc.prev_channel_id, blinded_failure: htlc.forward_info.routing.blinded_failure(), @@ -14981,7 +14998,7 @@ impl_writeable_tlv_based!(PendingAddHTLCInfo, { // Note that by the time we get past the required read for type 6 above, prev_funding_outpoint will be // filled in, so we can safely unwrap it here. (7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))), - (9, prev_counterparty_node_id, option), + (9, prev_counterparty_node_id, required), }); impl Writeable for HTLCForwardInfo { @@ -16429,7 +16446,9 @@ where // `ChannelMonitor` is removed. let compl_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate { - channel_funding_outpoint: monitor.get_funding_txo(), + channel_funding_outpoint: Some( + monitor.get_funding_txo(), + ), channel_id: monitor.channel_id(), counterparty_node_id: path.hops[0].pubkey, }; @@ -16932,13 +16951,7 @@ where let mut channels_without_preimage = payment_claim .mpp_parts .iter() - .map(|htlc_info| { - ( - htlc_info.counterparty_node_id, - htlc_info.funding_txo, - htlc_info.channel_id, - ) - }) + .map(|htlc_info| (htlc_info.counterparty_node_id, htlc_info.channel_id)) .collect::>(); // If we have multiple MPP parts which were received over the same channel, // we only track it once as once we get a preimage durably in the @@ -17124,6 +17137,10 @@ where onion_fields: payment.onion_fields, payment_id: Some(payment_id), }, + // Note that we don't bother adding a EventCompletionAction here to + // ensure the `PaymentClaimed` event is durable processed as this + // should only be hit for particularly old channels and we don't have + // enough information to generate such an action. None, )); } diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 13b9301084b..c903424afa6 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -3211,3 +3211,65 @@ fn test_update_replay_panics() { monitor.update_monitor(&updates[2], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap(); monitor.update_monitor(&updates[3], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap(); } + +#[test] +fn test_claim_event_never_handled() { + // When a payment is claimed, the `ChannelMonitorUpdate` containing the payment preimage goes + // out and when it completes the `PaymentClaimed` event is generated. If the channel then + // progresses forward a few steps, the payment preimage will then eventually be removed from + // the channel. By that point, we have to make sure that the `PaymentClaimed` event has been + // handled (which ensures the user has maked the payment received). + // Otherwise, it is possible that, on restart, we load with a stale `ChannelManager` which + // doesn't have the `PaymentClaimed` event and it needs to rebuild it from the + // `ChannelMonitor`'s payment information and preimage. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let persister; + let new_chain_mon; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes_1_reload; + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let init_node_ser = nodes[1].node.encode(); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + // Send the payment we'll ultimately test the PaymentClaimed event for. + let (preimage_a, payment_hash_a, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + + nodes[1].node.claim_funds(preimage_a); + check_added_monitors(&nodes[1], 1); + + let mut updates = get_htlc_update_msgs(&nodes[1], &node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, updates.update_fulfill_htlcs.remove(0)); + expect_payment_sent(&nodes[0], preimage_a, None, false, false); + + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); + check_added_monitors(&nodes[0], 1); + + // Once the `PaymentClaimed` event is generated, further RAA `ChannelMonitorUpdate`s will be + // blocked until it is handled, ensuring we never get far enough to remove the preimage. + let (raa, cs) = get_revoke_commit_msgs(&nodes[0], &node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &cs); + check_added_monitors(&nodes[1], 0); + + // The last RAA here should be blocked waiting on us to handle the PaymentClaimed event before + // continuing. Otherwise, we'd be able to make enough progress that the payment preimage is + // removed from node A's `ChannelMonitor`. This leaves us unable to make further progress. + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Finally, reload node B with an empty `ChannelManager` and check that we get the + // `PaymentClaimed` event. + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan.2).encode(); + let mons = &[&chan_0_monitor_serialized[..]]; + reload_node!(nodes[1], &init_node_ser, mons, persister, new_chain_mon, nodes_1_reload); + + expect_payment_claimed!(nodes[1], payment_hash_a, 1_000_000); + // The reload logic spuriously generates a redundant payment preimage-containing + // `ChannelMonitorUpdate`. + check_added_monitors(&nodes[1], 2); +} diff --git a/lightning/src/ln/quiescence_tests.rs b/lightning/src/ln/quiescence_tests.rs index 17b6535d95b..211e79adb6d 100644 --- a/lightning/src/ln/quiescence_tests.rs +++ b/lightning/src/ln/quiescence_tests.rs @@ -197,6 +197,7 @@ fn test_quiescence_waits_for_async_signer_and_monitor_update() { let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_amount); nodes[1].node.claim_funds(preimage); check_added_monitors(&nodes[1], 1); + expect_payment_claimed!(&nodes[1], payment_hash, payment_amount); let mut update = get_htlc_update_msgs!(&nodes[1], node_id_0); nodes[0].node.handle_update_fulfill_htlc(node_id_1, update.update_fulfill_htlcs.remove(0)); @@ -223,8 +224,6 @@ fn test_quiescence_waits_for_async_signer_and_monitor_update() { nodes[1].enable_channel_signer_op(&node_id_0, &chan_id, SignerOp::ReleaseCommitmentSecret); nodes[1].node.signer_unblocked(Some((node_id_0, chan_id))); - expect_payment_claimed!(&nodes[1], payment_hash, payment_amount); - macro_rules! find_msg { ($events: expr, $msg: ident) => {{ $events @@ -418,14 +417,11 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { if fail_htlc { nodes[0].node.handle_update_fail_htlc(node_id_1, &update.update_fail_htlcs[0]); } else { + expect_payment_claimed!(nodes[1], payment_hash2, payment_amount); nodes[0].node.handle_update_fulfill_htlc(node_id_1, update.update_fulfill_htlcs.remove(0)); } commitment_signed_dance!(&nodes[0], &nodes[1], update.commitment_signed, false); - if !fail_htlc { - expect_payment_claimed!(nodes[1], payment_hash2, payment_amount); - } - // The payment from nodes[0] should now be seen as failed/successful. let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); @@ -454,6 +450,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { if fail_htlc { nodes[1].node.handle_update_fail_htlc(node_id_0, &update.update_fail_htlcs[0]); } else { + expect_payment_claimed!(nodes[0], payment_hash1, payment_amount); nodes[1].node.handle_update_fulfill_htlc(node_id_0, update.update_fulfill_htlcs.remove(0)); } commitment_signed_dance!(&nodes[1], &nodes[0], update.commitment_signed, false); @@ -463,7 +460,6 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { let conditions = PaymentFailedConditions::new(); expect_payment_failed_conditions(&nodes[1], payment_hash1, true, conditions); } else { - expect_payment_claimed!(nodes[0], payment_hash1, payment_amount); expect_payment_sent(&nodes[1], payment_preimage1, None, true, true); } }