@@ -961,12 +961,6 @@ impl MsgHandleErrInternal {
961961 }
962962}
963963
964- /// We hold back HTLCs we intend to relay for a random interval greater than this (see
965- /// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited).
966- /// This provides some limited amount of privacy. Ideally this would range from somewhere like one
967- /// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly.
968- pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
969-
970964/// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
971965/// be sent in the order they appear in the return value, however sometimes the order needs to be
972966/// variable at runtime (eg FundedChannel::channel_reestablish needs to re-send messages in the order
@@ -6328,8 +6322,7 @@ where
63286322
63296323 /// Processes HTLCs which are pending waiting on random forward delay.
63306324 ///
6331- /// Should only really ever be called in response to a PendingHTLCsForwardable event.
6332- /// Will likely generate further events.
6325+ /// Will regularly be called by the background processor.
63336326 pub fn process_pending_htlc_forwards(&self) {
63346327 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
63356328
@@ -7667,23 +7660,20 @@ where
76677660 &self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason,
76687661 destination: HTLCHandlingFailureType,
76697662 ) {
7670- let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(
7663+ self.fail_htlc_backwards_internal_without_forward_event(
76717664 source,
76727665 payment_hash,
76737666 onion_error,
76747667 destination,
76757668 );
7676- if push_forward_event {
7677- self.push_pending_forwards_ev();
7678- }
76797669 }
76807670
76817671 /// Fails an HTLC backwards to the sender of it to us.
76827672 /// Note that we do not assume that channels corresponding to failed HTLCs are still available.
76837673 fn fail_htlc_backwards_internal_without_forward_event(
76847674 &self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason,
76857675 failure_type: HTLCHandlingFailureType,
7686- ) -> bool {
7676+ ) {
76877677 // Ensure that no peer state channel storage lock is held when calling this function.
76887678 // This ensures that future code doesn't introduce a lock-order requirement for
76897679 // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
@@ -7701,10 +7691,9 @@ where
77017691 // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
77027692 // from block_connected which may run during initialization prior to the chain_monitor
77037693 // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
7704- let mut push_forward_event;
77057694 match source {
77067695 HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
7707- push_forward_event = self.pending_outbound_payments.fail_htlc(
7696+ self.pending_outbound_payments.fail_htlc(
77087697 source,
77097698 payment_hash,
77107699 onion_error,
@@ -7760,9 +7749,7 @@ where
77607749 },
77617750 };
77627751
7763- push_forward_event = self.decode_update_add_htlcs.lock().unwrap().is_empty();
77647752 let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
7765- push_forward_event &= forward_htlcs.is_empty();
77667753 match forward_htlcs.entry(*short_channel_id) {
77677754 hash_map::Entry::Occupied(mut entry) => {
77687755 entry.get_mut().push(failure);
@@ -7783,7 +7770,6 @@ where
77837770 ));
77847771 },
77857772 }
7786- push_forward_event
77877773 }
77887774
77897775 /// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any
@@ -9923,9 +9909,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
99239909 }
99249910
99259911 fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec<msgs::UpdateAddHTLC>)) {
9926- let mut push_forward_event = self.forward_htlcs.lock().unwrap().is_empty();
99279912 let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
9928- push_forward_event &= decode_update_add_htlcs.is_empty();
99299913 let scid = update_add_htlcs.0;
99309914 match decode_update_add_htlcs.entry(scid) {
99319915 hash_map::Entry::Occupied(mut e) => {
@@ -9935,25 +9919,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
99359919 e.insert(update_add_htlcs.1);
99369920 },
99379921 }
9938- if push_forward_event {
9939- self.push_pending_forwards_ev();
9940- }
99419922 }
99429923
99439924 #[inline]
99449925 fn forward_htlcs(&self, per_source_pending_forwards: &mut [PerSourcePendingForward]) {
9945- let push_forward_event =
9946- self.forward_htlcs_without_forward_event(per_source_pending_forwards);
9947- if push_forward_event {
9948- self.push_pending_forwards_ev()
9949- }
9926+ self.forward_htlcs_without_forward_event(per_source_pending_forwards);
99509927 }
99519928
99529929 #[inline]
99539930 fn forward_htlcs_without_forward_event(
99549931 &self, per_source_pending_forwards: &mut [PerSourcePendingForward],
9955- ) -> bool {
9956- let mut push_forward_event = false;
9932+ ) {
99579933 for &mut (
99589934 prev_short_channel_id,
99599935 prev_counterparty_node_id,
@@ -9976,10 +9952,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
99769952 // Pull this now to avoid introducing a lock order with `forward_htlcs`.
99779953 let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
99789954
9979- let decode_update_add_htlcs_empty =
9980- self.decode_update_add_htlcs.lock().unwrap().is_empty();
99819955 let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
9982- let forward_htlcs_empty = forward_htlcs.is_empty();
99839956 match forward_htlcs.entry(scid) {
99849957 hash_map::Entry::Occupied(mut entry) => {
99859958 entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
@@ -10079,10 +10052,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1007910052 },
1008010053 }
1008110054 } else {
10082- // We don't want to generate a PendingHTLCsForwardable event if only intercepted
10083- // payments are being processed.
10084- push_forward_event |=
10085- forward_htlcs_empty && decode_update_add_htlcs_empty;
1008610055 entry.insert(vec![HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
1008710056 prev_short_channel_id,
1008810057 prev_counterparty_node_id,
@@ -10101,7 +10070,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1010110070 for (htlc_source, payment_hash, failure_reason, destination) in
1010210071 failed_intercept_forwards.drain(..)
1010310072 {
10104- push_forward_event |= self.fail_htlc_backwards_internal_without_forward_event(
10073+ self.fail_htlc_backwards_internal_without_forward_event(
1010510074 &htlc_source,
1010610075 &payment_hash,
1010710076 &failure_reason,
@@ -10114,30 +10083,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1011410083 events.append(&mut new_intercept_events);
1011510084 }
1011610085 }
10117- push_forward_event
10118- }
10119-
10120- fn push_pending_forwards_ev(&self) {
10121- let mut pending_events = self.pending_events.lock().unwrap();
10122- let is_processing_events = self.pending_events_processor.load(Ordering::Acquire);
10123- let num_forward_events = pending_events
10124- .iter()
10125- .filter(|(ev, _)| matches!(ev, events::Event::PendingHTLCsForwardable { .. }))
10126- .count();
10127- // We only want to push a PendingHTLCsForwardable event if no others are queued. Processing
10128- // events is done in batches and they are not removed until we're done processing each
10129- // batch. Since handling a `PendingHTLCsForwardable` event will call back into the
10130- // `ChannelManager`, we'll still see the original forwarding event not removed. Phantom
10131- // payments will need an additional forwarding event before being claimed to make them look
10132- // real by taking more time.
10133- if (is_processing_events && num_forward_events <= 1) || num_forward_events < 1 {
10134- pending_events.push_back((
10135- Event::PendingHTLCsForwardable {
10136- time_forwardable: Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
10137- },
10138- None,
10139- ));
10140- }
1014110086 }
1014210087
1014310088 /// Checks whether [`ChannelMonitorUpdate`]s generated by the receipt of a remote
@@ -10714,15 +10659,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1071410659 has_pending_monitor_events
1071510660 }
1071610661
10717- /// In chanmon_consistency_target, we'd like to be able to restore monitor updating without
10718- /// handling all pending events (i.e. not PendingHTLCsForwardable). Thus, we expose monitor
10719- /// update events as a separate process method here.
10720- #[cfg(fuzzing)]
10721- pub fn process_monitor_events(&self) {
10722- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
10723- self.process_pending_monitor_events();
10724- }
10725-
1072610662 /// Check the holding cell in each channel and free any pending HTLCs in them if possible.
1072710663 /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
1072810664 /// update was applied.
@@ -16061,21 +15997,6 @@ where
1606115997 }
1606215998 }
1606315999
16064- if !forward_htlcs.is_empty()
16065- || !decode_update_add_htlcs.is_empty()
16066- || pending_outbounds.needs_abandon()
16067- {
16068- // If we have pending HTLCs to forward, assume we either dropped a
16069- // `PendingHTLCsForwardable` or the user received it but never processed it as they
16070- // shut down before the timer hit. Either way, set the time_forwardable to a small
16071- // constant as enough time has likely passed that we should simply handle the forwards
16072- // now, or at least after the user gets a chance to reconnect to our peers.
16073- pending_events_read.push_back((
16074- events::Event::PendingHTLCsForwardable { time_forwardable: Duration::from_secs(2) },
16075- None,
16076- ));
16077- }
16078-
1607916000 let expanded_inbound_key = args.node_signer.get_inbound_payment_key();
1608016001
1608116002 let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len());
0 commit comments