diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 0595d6d99eb..1cb1c21340b 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -211,7 +211,9 @@ enum InboundHTLCState { /// channel (before it can then get forwarded and/or removed). /// Implies AwaitingRemoteRevoke. AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution), - Committed, + Committed { + update_add_htlc_opt: Option, + }, /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we /// created it we would have put it in the holding cell instead). When they next revoke_and_ack /// we'll drop it. @@ -234,7 +236,7 @@ impl From<&InboundHTLCState> for Option { Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd), InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd), - InboundHTLCState::Committed => + InboundHTLCState::Committed { .. } => Some(InboundHTLCStateDetails::Committed), InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(_)) => Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail), @@ -253,7 +255,7 @@ impl fmt::Display for InboundHTLCState { InboundHTLCState::RemoteAnnounced(_) => write!(f, "RemoteAnnounced"), InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => write!(f, "AwaitingRemoteRevokeToAnnounce"), InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => write!(f, "AwaitingAnnouncedRemoteRevoke"), - InboundHTLCState::Committed => write!(f, "Committed"), + InboundHTLCState::Committed { .. } => write!(f, "Committed"), InboundHTLCState::LocalRemoved(_) => write!(f, "LocalRemoved"), } } @@ -265,7 +267,7 @@ impl InboundHTLCState { InboundHTLCState::RemoteAnnounced(_) => !generated_by_local, InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => !generated_by_local, InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => true, - InboundHTLCState::Committed => true, + InboundHTLCState::Committed { .. } => true, InboundHTLCState::LocalRemoved(_) => !generated_by_local, } } @@ -293,7 +295,7 @@ impl InboundHTLCState { }, InboundHTLCResolution::Resolved { .. } => false, }, - InboundHTLCState::Committed | InboundHTLCState::LocalRemoved(_) => false, + InboundHTLCState::Committed { .. } | InboundHTLCState::LocalRemoved(_) => false, } } } @@ -4091,7 +4093,7 @@ where if self.pending_inbound_htlcs.iter() .any(|htlc| match htlc.state { - InboundHTLCState::Committed => false, + InboundHTLCState::Committed { .. } => false, // An HTLC removal from the local node is pending on the remote commitment. InboundHTLCState::LocalRemoved(_) => true, // An HTLC add from the remote node is pending on the local commitment. @@ -4520,7 +4522,7 @@ where (InboundHTLCState::RemoteAnnounced(..), _) => true, (InboundHTLCState::AwaitingRemoteRevokeToAnnounce(..), _) => true, (InboundHTLCState::AwaitingAnnouncedRemoteRevoke(..), _) => true, - (InboundHTLCState::Committed, _) => true, + (InboundHTLCState::Committed { .. }, _) => true, (InboundHTLCState::LocalRemoved(..), true) => true, (InboundHTLCState::LocalRemoved(..), false) => false, }) @@ -7303,7 +7305,7 @@ where payment_preimage_arg ); match htlc.state { - InboundHTLCState::Committed => {}, + InboundHTLCState::Committed { .. } => {}, InboundHTLCState::LocalRemoved(ref reason) => { if let &InboundHTLCRemovalReason::Fulfill(_, _) = reason { } else { @@ -7396,7 +7398,7 @@ where { let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; - if let InboundHTLCState::Committed = htlc.state { + if let InboundHTLCState::Committed { .. } = htlc.state { } else { debug_assert!( false, @@ -7531,7 +7533,7 @@ where for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() { if htlc.htlc_id == htlc_id_arg { match htlc.state { - InboundHTLCState::Committed => {}, + InboundHTLCState::Committed { .. } => {}, InboundHTLCState::LocalRemoved(_) => { return Err(ChannelError::Ignore(format!("HTLC {} was already resolved", htlc.htlc_id))); }, @@ -7754,6 +7756,20 @@ where Ok(()) } + /// Useful for reconstructing forwarded HTLCs when deserializing the `ChannelManager`. + pub(super) fn get_inbound_committed_update_adds(&self) -> Vec { + self.context + .pending_inbound_htlcs + .iter() + .filter_map(|htlc| match htlc.state { + InboundHTLCState::Committed { ref update_add_htlc_opt } => { + update_add_htlc_opt.clone() + }, + _ => None, + }) + .collect() + } + /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed #[inline] #[rustfmt::skip] @@ -8690,7 +8706,7 @@ where false }; if swap { - let mut state = InboundHTLCState::Committed; + let mut state = InboundHTLCState::Committed { update_add_htlc_opt: None }; mem::swap(&mut state, &mut htlc.state); if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state { @@ -8729,14 +8745,19 @@ where PendingHTLCStatus::Forward(forward_info) => { log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed, attempting to forward", &htlc.payment_hash); to_forward_infos.push((forward_info, htlc.htlc_id)); - htlc.state = InboundHTLCState::Committed; + // TODO: this is currently unreachable, so is it okay? will we lose a forward? + htlc.state = InboundHTLCState::Committed { + update_add_htlc_opt: None, + }; }, } }, InboundHTLCResolution::Pending { update_add_htlc } => { log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash); - pending_update_adds.push(update_add_htlc); - htlc.state = InboundHTLCState::Committed; + pending_update_adds.push(update_add_htlc.clone()); + htlc.state = InboundHTLCState::Committed { + update_add_htlc_opt: Some(update_add_htlc), + }; }, } } @@ -9266,7 +9287,7 @@ where // in response to it yet, so don't touch it. true }, - InboundHTLCState::Committed => true, + InboundHTLCState::Committed { .. } => true, InboundHTLCState::LocalRemoved(_) => { // We (hopefully) sent a commitment_signed updating this HTLC (which we can // re-transmit if needed) and they may have even sent a revoke_and_ack back @@ -14467,6 +14488,7 @@ where } } let mut removed_htlc_attribution_data: Vec<&Option> = Vec::new(); + let mut inbound_committed_update_adds: Vec> = Vec::new(); (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?; for htlc in self.context.pending_inbound_htlcs.iter() { if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state { @@ -14486,8 +14508,9 @@ where 2u8.write(writer)?; htlc_resolution.write(writer)?; }, - &InboundHTLCState::Committed => { + &InboundHTLCState::Committed { ref update_add_htlc_opt } => { 3u8.write(writer)?; + inbound_committed_update_adds.push(update_add_htlc_opt.clone()); }, &InboundHTLCState::LocalRemoved(ref removal_reason) => { 4u8.write(writer)?; @@ -14860,6 +14883,7 @@ where (69, holding_cell_held_htlc_flags, optional_vec), // Added in 0.2 (71, holder_commitment_point_previous_revoked, option), // Added in 0.3 (73, holder_commitment_point_last_revoked, option), // Added in 0.3 + (75, inbound_committed_update_adds, optional_vec), }); Ok(()) @@ -14943,7 +14967,7 @@ where }; InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) }, - 3 => InboundHTLCState::Committed, + 3 => InboundHTLCState::Committed { update_add_htlc_opt: None }, 4 => { let reason = match ::read(reader)? { 0 => InboundHTLCRemovalReason::FailRelay(msgs::OnionErrorPacket { @@ -15229,6 +15253,7 @@ where let mut pending_outbound_held_htlc_flags_opt: Option>> = None; let mut holding_cell_held_htlc_flags_opt: Option>> = None; + let mut inbound_committed_update_adds_opt: Option>> = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -15278,6 +15303,7 @@ where (69, holding_cell_held_htlc_flags_opt, optional_vec), // Added in 0.2 (71, holder_commitment_point_previous_revoked_opt, option), // Added in 0.3 (73, holder_commitment_point_last_revoked_opt, option), // Added in 0.3 + (75, inbound_committed_update_adds_opt, optional_vec), }); let holder_signer = signer_provider.derive_channel_signer(channel_keys_id); @@ -15401,6 +15427,17 @@ where return Err(DecodeError::InvalidValue); } } + if let Some(update_adds) = inbound_committed_update_adds_opt { + let mut iter = update_adds.into_iter(); + for htlc in pending_inbound_htlcs.iter_mut() { + if let InboundHTLCState::Committed { ref mut update_add_htlc_opt } = htlc.state { + *update_add_htlc_opt = iter.next().ok_or(DecodeError::InvalidValue)?; + } + } + if iter.next().is_some() { + return Err(DecodeError::InvalidValue); + } + } if let Some(attribution_data_list) = removed_htlc_attribution_data { let mut removed_htlcs = pending_inbound_htlcs.iter_mut().filter_map(|status| { @@ -15985,7 +16022,7 @@ mod tests { amount_msat: htlc_amount_msat, payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()), cltv_expiry: 300000000, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput { @@ -16831,7 +16868,7 @@ mod tests { amount_msat: 1000000, cltv_expiry: 500, payment_hash: PaymentHash::from(payment_preimage_0), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); let payment_preimage_1 = @@ -16841,7 +16878,7 @@ mod tests { amount_msat: 2000000, cltv_expiry: 501, payment_hash: PaymentHash::from(payment_preimage_1), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); let payment_preimage_2 = @@ -16881,7 +16918,7 @@ mod tests { amount_msat: 4000000, cltv_expiry: 504, payment_hash: PaymentHash::from(payment_preimage_4), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); // commitment tx with all five HTLCs untrimmed (minimum feerate) @@ -17270,7 +17307,7 @@ mod tests { amount_msat: 2000000, cltv_expiry: 501, payment_hash: PaymentHash::from(payment_preimage_1), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); chan.context.pending_outbound_htlcs.clear(); @@ -17521,7 +17558,7 @@ mod tests { amount_msat: 5000000, cltv_expiry: 920150, payment_hash: PaymentHash::from(htlc_in_preimage), - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, })); chan.context.pending_outbound_htlcs.extend( @@ -17595,7 +17632,7 @@ mod tests { amount_msat: 100000, cltv_expiry: 920125, payment_hash: htlc_0_in_hash, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); let htlc_1_in_preimage = @@ -17613,7 +17650,7 @@ mod tests { amount_msat: 49900000, cltv_expiry: 920125, payment_hash: htlc_1_in_hash, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }); chan.context.pending_outbound_htlcs.extend( @@ -17721,7 +17758,7 @@ mod tests { amount_msat, cltv_expiry, payment_hash, - state: InboundHTLCState::Committed, + state: InboundHTLCState::Committed { update_add_htlc_opt: None }, }), ); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 0a13d2312b2..900badf2521 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -11509,6 +11509,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if !new_intercept_events.is_empty() { let mut events = self.pending_events.lock().unwrap(); + new_intercept_events.retain(|new_ev| !events.contains(new_ev)); events.append(&mut new_intercept_events); } } @@ -16777,6 +16778,33 @@ where } } +// If the HTLC corresponding to `prev_hop_data` is present in `decode_update_add_htlcs`, remove it +// from the map as it is already being stored and processed elsewhere. +fn dedup_decode_update_add_htlcs( + decode_update_add_htlcs: &mut HashMap>, + prev_hop_data: &HTLCPreviousHopData, removal_reason: &'static str, logger: &L, +) where + L::Target: Logger, +{ + decode_update_add_htlcs.retain(|src_outb_alias, update_add_htlcs| { + update_add_htlcs.retain(|update_add| { + let matches = *src_outb_alias == prev_hop_data.prev_outbound_scid_alias + && update_add.htlc_id == prev_hop_data.htlc_id; + if matches { + let logger = WithContext::from( + logger, + prev_hop_data.counterparty_node_id, + Some(update_add.channel_id), + Some(update_add.payment_hash), + ); + log_info!(logger, "Removing pending to-decode HTLC: {}", removal_reason); + } + !matches + }); + !update_add_htlcs.is_empty() + }); +} + // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the // SipmleArcChannelManager type: impl< @@ -17126,7 +17154,11 @@ where const MAX_ALLOC_SIZE: usize = 1024 * 64; let forward_htlcs_count: u64 = Readable::read(reader)?; - let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); + // This map is read but may no longer be used because we'll attempt to rebuild `forward_htlcs` + // from the `Channel{Monitor}`s instead, as a step towards getting rid of `ChannelManager` + // persistence. + let mut forward_htlcs_legacy: HashMap> = + hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); for _ in 0..forward_htlcs_count { let short_channel_id = Readable::read(reader)?; let pending_forwards_count: u64 = Readable::read(reader)?; @@ -17137,7 +17169,7 @@ where for _ in 0..pending_forwards_count { pending_forwards.push(Readable::read(reader)?); } - forward_htlcs.insert(short_channel_id, pending_forwards); + forward_htlcs_legacy.insert(short_channel_id, pending_forwards); } let claimable_htlcs_count: u64 = Readable::read(reader)?; @@ -17225,12 +17257,18 @@ where }; } + // Some maps are read but may no longer be used because we attempt to rebuild pending HTLC + // forwards from the `Channel{Monitor}`s instead, as a step towards getting rid of + // `ChannelManager` persistence. + let mut pending_intercepted_htlcs_legacy: Option> = + Some(new_hash_map()); + let mut decode_update_add_htlcs_legacy: Option>> = + None; + // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. let mut pending_outbound_payments_no_retry: Option>> = None; let mut pending_outbound_payments = None; - let mut pending_intercepted_htlcs: Option> = - Some(new_hash_map()); let mut received_network_pubkey: Option = None; let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; let mut probing_cookie_secret: Option<[u8; 32]> = None; @@ -17248,13 +17286,12 @@ where let mut in_flight_monitor_updates: Option< HashMap<(PublicKey, ChannelId), Vec>, > = None; - let mut decode_update_add_htlcs: Option>> = None; let mut inbound_payment_id_secret = None; let mut peer_storage_dir: Option)>> = None; let mut async_receive_offer_cache: AsyncReceiveOfferCache = AsyncReceiveOfferCache::new(); read_tlv_fields!(reader, { (1, pending_outbound_payments_no_retry, option), - (2, pending_intercepted_htlcs, option), + (2, pending_intercepted_htlcs_legacy, option), (3, pending_outbound_payments, option), (4, pending_claiming_payments, option), (5, received_network_pubkey, option), @@ -17265,13 +17302,15 @@ where (10, legacy_in_flight_monitor_updates, option), (11, probing_cookie_secret, option), (13, claimable_htlc_onion_fields, optional_vec), - (14, decode_update_add_htlcs, option), + (14, decode_update_add_htlcs_legacy, option), (15, inbound_payment_id_secret, option), (17, in_flight_monitor_updates, option), (19, peer_storage_dir, optional_vec), (21, async_receive_offer_cache, (default_value, async_receive_offer_cache)), }); - let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map()); + let mut decode_update_add_htlcs_legacy = + decode_update_add_htlcs_legacy.unwrap_or_else(|| new_hash_map()); + let mut decode_update_add_htlcs = new_hash_map(); let peer_storage_dir: Vec<(PublicKey, Vec)> = peer_storage_dir.unwrap_or_else(Vec::new); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); @@ -17582,7 +17621,25 @@ where if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; - is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); + is_channel_closed = match peer_state.channel_by_id.get(channel_id) { + Some(chan) => { + if let Some(funded_chan) = chan.as_funded() { + let inbound_committed_update_adds = + funded_chan.get_inbound_committed_update_adds(); + if !inbound_committed_update_adds.is_empty() { + // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized + // `Channel`. We are moving away from writing the `decode_update_add_htlcs` map as + // part of getting rid of `ChannelManager` persistence. + decode_update_add_htlcs.insert( + funded_chan.context.outbound_scid_alias(), + inbound_committed_update_adds, + ); + } + } + false + }, + None => true, + }; } if is_channel_closed { @@ -17644,21 +17701,19 @@ where // still have an entry for this HTLC in `forward_htlcs` or // `pending_intercepted_htlcs`, we were apparently not persisted after // the monitor was when forwarding the payment. - decode_update_add_htlcs.retain( - |src_outb_alias, update_add_htlcs| { - update_add_htlcs.retain(|update_add_htlc| { - let matches = *src_outb_alias - == prev_hop_data.prev_outbound_scid_alias - && update_add_htlc.htlc_id == prev_hop_data.htlc_id; - if matches { - log_info!(logger, "Removing pending to-decode HTLC as it was forwarded to the closed channel"); - } - !matches - }); - !update_add_htlcs.is_empty() - }, + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &args.logger, + ); + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs_legacy, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &args.logger, ); - forward_htlcs.retain(|_, forwards| { + forward_htlcs_legacy.retain(|_, forwards| { forwards.retain(|forward| { if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { if pending_forward_matches_htlc(&htlc_info) { @@ -17670,7 +17725,7 @@ where }); !forwards.is_empty() }); - pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { + pending_intercepted_htlcs_legacy.as_mut().unwrap().retain(|intercepted_id, htlc_info| { if pending_forward_matches_htlc(&htlc_info) { log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", &htlc.payment_hash, &monitor.channel_id()); @@ -17892,6 +17947,17 @@ where } } + for (src, _, _, _, _, _) in failed_htlcs.iter() { + if let HTLCSource::PreviousHopData(prev_hop_data) = src { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was failed backwards during manager read", + &args.logger, + ); + } + } + let expanded_inbound_key = args.node_signer.get_expanded_key(); let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len()); @@ -18142,6 +18208,17 @@ where } } + for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { + for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was already decoded and marked as a claimable payment", + &args.logger, + ); + } + } + let best_block = BestBlock::new(best_block_hash, best_block_height); let flow = OffersMessageFlow::new( chain_hash, @@ -18156,6 +18233,22 @@ where ) .with_async_payments_offers_cache(async_receive_offer_cache); + // If we are reading from a `ChannelManager` that was last serialized on LDK 0.2 or earlier, we + // won't have been able to rebuild `decode_update_add_htlcs` from `Channel`s and should use + // the legacy serialized maps instead. + // TODO: if we read an upgraded channel but there just happened to be no committed update_adds + // present, we'll use the old maps here. Maybe that's fine but we might want to add a flag in + // the `Channel` that indicates it is upgraded and will serialize committed update_adds. + let (forward_htlcs, decode_update_add_htlcs, pending_intercepted_htlcs) = + if decode_update_add_htlcs.is_empty() { + ( + forward_htlcs_legacy, + decode_update_add_htlcs_legacy, + pending_intercepted_htlcs_legacy.unwrap(), + ) + } else { + (new_hash_map(), decode_update_add_htlcs, new_hash_map()) + }; let channel_manager = ChannelManager { chain_hash, fee_estimator: bounded_fee_estimator, @@ -18168,7 +18261,7 @@ where inbound_payment_key: expanded_inbound_key, pending_outbound_payments: pending_outbounds, - pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()), + pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs), forward_htlcs: Mutex::new(forward_htlcs), decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e31630a4926..363221796cc 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1382,9 +1382,10 @@ macro_rules! reload_node { $node.onion_messenger.set_async_payments_handler(&$new_channelmanager); }; ($node: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { + let config = $node.node.get_current_config(); reload_node!( $node, - test_default_channel_config(), + config, $chanman_encoded, $monitors_encoded, $persister, diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 6389f1da786..4a231fd1ddf 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -20,6 +20,7 @@ use crate::chain::transaction::OutPoint; use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields, RAACommitmentOrder}; use crate::ln::msgs; +use crate::ln::outbound_payment::Retry; use crate::ln::types::ChannelId; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, RoutingMessageHandler, ErrorAction, MessageSendEvent}; use crate::util::test_channel_signer::TestChannelSigner; @@ -508,7 +509,6 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { #[cfg(feature = "std")] fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, not_stale: bool) { - use crate::ln::channelmanager::Retry; use crate::types::string::UntrustedString; // When we get a data_loss_protect proving we're behind, we immediately panic as the // chain::Watch API requirements have been violated (e.g. the user restored from a backup). The @@ -1173,6 +1173,93 @@ fn removed_payment_no_manager_persistence() { expect_payment_failed!(nodes[0], payment_hash, false); } +#[test] +fn manager_persisted_pre_htlc_forward_on_outbound_edge() { + do_manager_persisted_pre_htlc_forward_on_outbound_edge(false); +} + +#[test] +fn manager_persisted_pre_intercept_forward_on_outbound_edge() { + do_manager_persisted_pre_htlc_forward_on_outbound_edge(true); +} + +fn do_manager_persisted_pre_htlc_forward_on_outbound_edge(intercept_htlc: bool) { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let mut intercept_forwards_config = test_default_channel_config(); + intercept_forwards_config.accept_intercept_htlcs = true; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; + + let intercept_scid = nodes[1].node.get_intercept_scid(); + + // Lock in the HTLC from node_a <> node_b. + let amt_msat = 5000; + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + if intercept_htlc { + route.paths[0].hops[1].short_channel_id = intercept_scid; + } + nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors(&nodes[0], 1); + let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + + // Decode the HTLC onion but don't forward it to the next hop, such that the HTLC ends up in + // `ChannelManager::forward_htlcs` or `ChannelManager::pending_intercepted_htlcs`. + nodes[1].node.process_pending_update_add_htlcs(); + + // Disconnect peers and reload the forwarding node_b. + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + + let node_b_encoded = nodes[1].node.encode(); + + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); + reload_node!(nodes[1], node_b_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + + reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[0])); + let mut args_b_c = ReconnectArgs::new(&nodes[1], &nodes[2]); + args_b_c.send_channel_ready = (true, true); + args_b_c.send_announcement_sigs = (true, true); + reconnect_nodes(args_b_c); + + // Forward the HTLC and ensure we can claim it post-reload. + nodes[1].node.process_pending_htlc_forwards(); + + if intercept_htlc { + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + let (intercept_id, expected_outbound_amt_msat) = match events[0] { + Event::HTLCIntercepted { intercept_id, expected_outbound_amount_msat, .. } => { + (intercept_id, expected_outbound_amount_msat) + }, + _ => panic!() + }; + nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_id_2, + nodes[2].node.get_our_node_id(), expected_outbound_amt_msat).unwrap(); + nodes[1].node.process_pending_htlc_forwards(); + } + check_added_monitors(&nodes[1], 1); + + let updates = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); + nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[2], &nodes[1], &updates.commitment_signed, false, false); + expect_and_process_pending_htlcs(&nodes[2], false); + + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); + let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, payment_preimage)); + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} + #[test] fn test_reload_partial_funding_batch() { let chanmon_cfgs = create_chanmon_cfgs(3);