@@ -962,12 +962,6 @@ impl MsgHandleErrInternal {
962962 }
963963}
964964
965- /// We hold back HTLCs we intend to relay for a random interval greater than this (see
966- /// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited).
967- /// This provides some limited amount of privacy. Ideally this would range from somewhere like one
968- /// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly.
969- pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
970-
971965/// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
972966/// be sent in the order they appear in the return value, however sometimes the order needs to be
973967/// variable at runtime (eg FundedChannel::channel_reestablish needs to re-send messages in the order
@@ -6330,8 +6324,7 @@ where
63306324
63316325 /// Processes HTLCs which are pending waiting on random forward delay.
63326326 ///
6333- /// Should only really ever be called in response to a PendingHTLCsForwardable event.
6334- /// Will likely generate further events.
6327+ /// Will regularly be called by the background processor.
63356328 pub fn process_pending_htlc_forwards(&self) {
63366329 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
63376330
@@ -7650,23 +7643,20 @@ where
76507643 &self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason,
76517644 destination: HTLCHandlingFailureType,
76527645 ) {
7653- let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(
7646+ self.fail_htlc_backwards_internal_without_forward_event(
76547647 source,
76557648 payment_hash,
76567649 onion_error,
76577650 destination,
76587651 );
7659- if push_forward_event {
7660- self.push_pending_forwards_ev();
7661- }
76627652 }
76637653
76647654 /// Fails an HTLC backwards to the sender of it to us.
76657655 /// Note that we do not assume that channels corresponding to failed HTLCs are still available.
76667656 fn fail_htlc_backwards_internal_without_forward_event(
76677657 &self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason,
76687658 failure_type: HTLCHandlingFailureType,
7669- ) -> bool {
7659+ ) {
76707660 // Ensure that no peer state channel storage lock is held when calling this function.
76717661 // This ensures that future code doesn't introduce a lock-order requirement for
76727662 // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
@@ -7684,10 +7674,9 @@ where
76847674 // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
76857675 // from block_connected which may run during initialization prior to the chain_monitor
76867676 // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
7687- let mut push_forward_event;
76887677 match source {
76897678 HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
7690- push_forward_event = self.pending_outbound_payments.fail_htlc(
7679+ self.pending_outbound_payments.fail_htlc(
76917680 source,
76927681 payment_hash,
76937682 onion_error,
@@ -7743,9 +7732,7 @@ where
77437732 },
77447733 };
77457734
7746- push_forward_event = self.decode_update_add_htlcs.lock().unwrap().is_empty();
77477735 let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
7748- push_forward_event &= forward_htlcs.is_empty();
77497736 match forward_htlcs.entry(*short_channel_id) {
77507737 hash_map::Entry::Occupied(mut entry) => {
77517738 entry.get_mut().push(failure);
@@ -7766,7 +7753,6 @@ where
77667753 ));
77677754 },
77687755 }
7769- push_forward_event
77707756 }
77717757
77727758 /// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any
@@ -10046,9 +10032,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1004610032 }
1004710033
1004810034 fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec<msgs::UpdateAddHTLC>)) {
10049- let mut push_forward_event = self.forward_htlcs.lock().unwrap().is_empty();
1005010035 let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
10051- push_forward_event &= decode_update_add_htlcs.is_empty();
1005210036 let scid = update_add_htlcs.0;
1005310037 match decode_update_add_htlcs.entry(scid) {
1005410038 hash_map::Entry::Occupied(mut e) => {
@@ -10058,25 +10042,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1005810042 e.insert(update_add_htlcs.1);
1005910043 },
1006010044 }
10061- if push_forward_event {
10062- self.push_pending_forwards_ev();
10063- }
1006410045 }
1006510046
1006610047 #[inline]
1006710048 fn forward_htlcs(&self, per_source_pending_forwards: &mut [PerSourcePendingForward]) {
10068- let push_forward_event =
10069- self.forward_htlcs_without_forward_event(per_source_pending_forwards);
10070- if push_forward_event {
10071- self.push_pending_forwards_ev()
10072- }
10049+ self.forward_htlcs_without_forward_event(per_source_pending_forwards);
1007310050 }
1007410051
1007510052 #[inline]
1007610053 fn forward_htlcs_without_forward_event(
1007710054 &self, per_source_pending_forwards: &mut [PerSourcePendingForward],
10078- ) -> bool {
10079- let mut push_forward_event = false;
10055+ ) {
1008010056 for &mut (
1008110057 prev_short_channel_id,
1008210058 prev_counterparty_node_id,
@@ -10099,10 +10075,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1009910075 // Pull this now to avoid introducing a lock order with `forward_htlcs`.
1010010076 let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
1010110077
10102- let decode_update_add_htlcs_empty =
10103- self.decode_update_add_htlcs.lock().unwrap().is_empty();
1010410078 let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
10105- let forward_htlcs_empty = forward_htlcs.is_empty();
1010610079 match forward_htlcs.entry(scid) {
1010710080 hash_map::Entry::Occupied(mut entry) => {
1010810081 entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
@@ -10202,10 +10175,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1020210175 },
1020310176 }
1020410177 } else {
10205- // We don't want to generate a PendingHTLCsForwardable event if only intercepted
10206- // payments are being processed.
10207- push_forward_event |=
10208- forward_htlcs_empty && decode_update_add_htlcs_empty;
1020910178 entry.insert(vec![HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
1021010179 prev_short_channel_id,
1021110180 prev_counterparty_node_id,
@@ -10224,7 +10193,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1022410193 for (htlc_source, payment_hash, failure_reason, destination) in
1022510194 failed_intercept_forwards.drain(..)
1022610195 {
10227- push_forward_event |= self.fail_htlc_backwards_internal_without_forward_event(
10196+ self.fail_htlc_backwards_internal_without_forward_event(
1022810197 &htlc_source,
1022910198 &payment_hash,
1023010199 &failure_reason,
@@ -10237,30 +10206,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1023710206 events.append(&mut new_intercept_events);
1023810207 }
1023910208 }
10240- push_forward_event
10241- }
10242-
10243- fn push_pending_forwards_ev(&self) {
10244- let mut pending_events = self.pending_events.lock().unwrap();
10245- let is_processing_events = self.pending_events_processor.load(Ordering::Acquire);
10246- let num_forward_events = pending_events
10247- .iter()
10248- .filter(|(ev, _)| matches!(ev, events::Event::PendingHTLCsForwardable { .. }))
10249- .count();
10250- // We only want to push a PendingHTLCsForwardable event if no others are queued. Processing
10251- // events is done in batches and they are not removed until we're done processing each
10252- // batch. Since handling a `PendingHTLCsForwardable` event will call back into the
10253- // `ChannelManager`, we'll still see the original forwarding event not removed. Phantom
10254- // payments will need an additional forwarding event before being claimed to make them look
10255- // real by taking more time.
10256- if (is_processing_events && num_forward_events <= 1) || num_forward_events < 1 {
10257- pending_events.push_back((
10258- Event::PendingHTLCsForwardable {
10259- time_forwardable: Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
10260- },
10261- None,
10262- ));
10263- }
1026410209 }
1026510210
1026610211 /// Checks whether [`ChannelMonitorUpdate`]s generated by the receipt of a remote
@@ -10883,15 +10828,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1088310828 has_pending_monitor_events
1088410829 }
1088510830
10886- /// In chanmon_consistency_target, we'd like to be able to restore monitor updating without
10887- /// handling all pending events (i.e. not PendingHTLCsForwardable). Thus, we expose monitor
10888- /// update events as a separate process method here.
10889- #[cfg(fuzzing)]
10890- pub fn process_monitor_events(&self) {
10891- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
10892- self.process_pending_monitor_events();
10893- }
10894-
1089510831 /// Check the holding cell in each channel and free any pending HTLCs in them if possible.
1089610832 /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
1089710833 /// update was applied.
@@ -16226,21 +16162,6 @@ where
1622616162 }
1622716163 }
1622816164
16229- if !forward_htlcs.is_empty()
16230- || !decode_update_add_htlcs.is_empty()
16231- || pending_outbounds.needs_abandon()
16232- {
16233- // If we have pending HTLCs to forward, assume we either dropped a
16234- // `PendingHTLCsForwardable` or the user received it but never processed it as they
16235- // shut down before the timer hit. Either way, set the time_forwardable to a small
16236- // constant as enough time has likely passed that we should simply handle the forwards
16237- // now, or at least after the user gets a chance to reconnect to our peers.
16238- pending_events_read.push_back((
16239- events::Event::PendingHTLCsForwardable { time_forwardable: Duration::from_secs(2) },
16240- None,
16241- ));
16242- }
16243-
1624416165 let expanded_inbound_key = args.node_signer.get_inbound_payment_key();
1624516166
1624616167 let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len());
0 commit comments