@@ -961,12 +961,6 @@ impl MsgHandleErrInternal {
961961 }
962962}
963963
964- /// We hold back HTLCs we intend to relay for a random interval greater than this (see
965- /// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited).
966- /// This provides some limited amount of privacy. Ideally this would range from somewhere like one
967- /// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly.
968- pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
969-
970964/// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
971965/// be sent in the order they appear in the return value, however sometimes the order needs to be
972966/// variable at runtime (eg FundedChannel::channel_reestablish needs to re-send messages in the order
@@ -6329,8 +6323,7 @@ where
63296323
63306324 /// Processes HTLCs which are pending waiting on random forward delay.
63316325 ///
6332- /// Should only really ever be called in response to a PendingHTLCsForwardable event.
6333- /// Will likely generate further events.
6326+ /// Will regularly be called by the background processor.
63346327 pub fn process_pending_htlc_forwards(&self) {
63356328 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
63366329
@@ -7668,23 +7661,20 @@ where
76687661 &self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason,
76697662 destination: HTLCHandlingFailureType,
76707663 ) {
7671- let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(
7664+ self.fail_htlc_backwards_internal_without_forward_event(
76727665 source,
76737666 payment_hash,
76747667 onion_error,
76757668 destination,
76767669 );
7677- if push_forward_event {
7678- self.push_pending_forwards_ev();
7679- }
76807670 }
76817671
76827672 /// Fails an HTLC backwards to the sender of it to us.
76837673 /// Note that we do not assume that channels corresponding to failed HTLCs are still available.
76847674 fn fail_htlc_backwards_internal_without_forward_event(
76857675 &self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason,
76867676 failure_type: HTLCHandlingFailureType,
7687- ) -> bool {
7677+ ) {
76887678 // Ensure that no peer state channel storage lock is held when calling this function.
76897679 // This ensures that future code doesn't introduce a lock-order requirement for
76907680 // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
@@ -7702,10 +7692,9 @@ where
77027692 // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
77037693 // from block_connected which may run during initialization prior to the chain_monitor
77047694 // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
7705- let mut push_forward_event;
77067695 match source {
77077696 HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
7708- push_forward_event = self.pending_outbound_payments.fail_htlc(
7697+ self.pending_outbound_payments.fail_htlc(
77097698 source,
77107699 payment_hash,
77117700 onion_error,
@@ -7761,9 +7750,7 @@ where
77617750 },
77627751 };
77637752
7764- push_forward_event = self.decode_update_add_htlcs.lock().unwrap().is_empty();
77657753 let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
7766- push_forward_event &= forward_htlcs.is_empty();
77677754 match forward_htlcs.entry(*short_channel_id) {
77687755 hash_map::Entry::Occupied(mut entry) => {
77697756 entry.get_mut().push(failure);
@@ -7784,7 +7771,6 @@ where
77847771 ));
77857772 },
77867773 }
7787- push_forward_event
77887774 }
77897775
77907776 /// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any
@@ -9924,9 +9910,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
99249910 }
99259911
99269912 fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec<msgs::UpdateAddHTLC>)) {
9927- let mut push_forward_event = self.forward_htlcs.lock().unwrap().is_empty();
99289913 let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
9929- push_forward_event &= decode_update_add_htlcs.is_empty();
99309914 let scid = update_add_htlcs.0;
99319915 match decode_update_add_htlcs.entry(scid) {
99329916 hash_map::Entry::Occupied(mut e) => {
@@ -9936,25 +9920,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
99369920 e.insert(update_add_htlcs.1);
99379921 },
99389922 }
9939- if push_forward_event {
9940- self.push_pending_forwards_ev();
9941- }
99429923 }
99439924
99449925 #[inline]
99459926 fn forward_htlcs(&self, per_source_pending_forwards: &mut [PerSourcePendingForward]) {
9946- let push_forward_event =
9947- self.forward_htlcs_without_forward_event(per_source_pending_forwards);
9948- if push_forward_event {
9949- self.push_pending_forwards_ev()
9950- }
9927+ self.forward_htlcs_without_forward_event(per_source_pending_forwards);
99519928 }
99529929
99539930 #[inline]
99549931 fn forward_htlcs_without_forward_event(
99559932 &self, per_source_pending_forwards: &mut [PerSourcePendingForward],
9956- ) -> bool {
9957- let mut push_forward_event = false;
9933+ ) {
99589934 for &mut (
99599935 prev_short_channel_id,
99609936 prev_counterparty_node_id,
@@ -9977,10 +9953,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
99779953 // Pull this now to avoid introducing a lock order with `forward_htlcs`.
99789954 let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
99799955
9980- let decode_update_add_htlcs_empty =
9981- self.decode_update_add_htlcs.lock().unwrap().is_empty();
99829956 let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
9983- let forward_htlcs_empty = forward_htlcs.is_empty();
99849957 match forward_htlcs.entry(scid) {
99859958 hash_map::Entry::Occupied(mut entry) => {
99869959 entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
@@ -10080,10 +10053,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1008010053 },
1008110054 }
1008210055 } else {
10083- // We don't want to generate a PendingHTLCsForwardable event if only intercepted
10084- // payments are being processed.
10085- push_forward_event |=
10086- forward_htlcs_empty && decode_update_add_htlcs_empty;
1008710056 entry.insert(vec![HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
1008810057 prev_short_channel_id,
1008910058 prev_counterparty_node_id,
@@ -10102,7 +10071,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1010210071 for (htlc_source, payment_hash, failure_reason, destination) in
1010310072 failed_intercept_forwards.drain(..)
1010410073 {
10105- push_forward_event |= self.fail_htlc_backwards_internal_without_forward_event(
10074+ self.fail_htlc_backwards_internal_without_forward_event(
1010610075 &htlc_source,
1010710076 &payment_hash,
1010810077 &failure_reason,
@@ -10115,30 +10084,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1011510084 events.append(&mut new_intercept_events);
1011610085 }
1011710086 }
10118- push_forward_event
10119- }
10120-
10121- fn push_pending_forwards_ev(&self) {
10122- let mut pending_events = self.pending_events.lock().unwrap();
10123- let is_processing_events = self.pending_events_processor.load(Ordering::Acquire);
10124- let num_forward_events = pending_events
10125- .iter()
10126- .filter(|(ev, _)| matches!(ev, events::Event::PendingHTLCsForwardable { .. }))
10127- .count();
10128- // We only want to push a PendingHTLCsForwardable event if no others are queued. Processing
10129- // events is done in batches and they are not removed until we're done processing each
10130- // batch. Since handling a `PendingHTLCsForwardable` event will call back into the
10131- // `ChannelManager`, we'll still see the original forwarding event not removed. Phantom
10132- // payments will need an additional forwarding event before being claimed to make them look
10133- // real by taking more time.
10134- if (is_processing_events && num_forward_events <= 1) || num_forward_events < 1 {
10135- pending_events.push_back((
10136- Event::PendingHTLCsForwardable {
10137- time_forwardable: Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
10138- },
10139- None,
10140- ));
10141- }
1014210087 }
1014310088
1014410089 /// Checks whether [`ChannelMonitorUpdate`]s generated by the receipt of a remote
@@ -10715,15 +10660,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1071510660 has_pending_monitor_events
1071610661 }
1071710662
10718- /// In chanmon_consistency_target, we'd like to be able to restore monitor updating without
10719- /// handling all pending events (i.e. not PendingHTLCsForwardable). Thus, we expose monitor
10720- /// update events as a separate process method here.
10721- #[cfg(fuzzing)]
10722- pub fn process_monitor_events(&self) {
10723- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
10724- self.process_pending_monitor_events();
10725- }
10726-
1072710663 /// Check the holding cell in each channel and free any pending HTLCs in them if possible.
1072810664 /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
1072910665 /// update was applied.
@@ -16062,21 +15998,6 @@ where
1606215998 }
1606315999 }
1606416000
16065- if !forward_htlcs.is_empty()
16066- || !decode_update_add_htlcs.is_empty()
16067- || pending_outbounds.needs_abandon()
16068- {
16069- // If we have pending HTLCs to forward, assume we either dropped a
16070- // `PendingHTLCsForwardable` or the user received it but never processed it as they
16071- // shut down before the timer hit. Either way, set the time_forwardable to a small
16072- // constant as enough time has likely passed that we should simply handle the forwards
16073- // now, or at least after the user gets a chance to reconnect to our peers.
16074- pending_events_read.push_back((
16075- events::Event::PendingHTLCsForwardable { time_forwardable: Duration::from_secs(2) },
16076- None,
16077- ));
16078- }
16079-
1608016001 let expanded_inbound_key = args.node_signer.get_inbound_payment_key();
1608116002
1608216003 let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len());
0 commit comments