@@ -964,12 +964,6 @@ impl MsgHandleErrInternal {
964964 }
965965}
966966
967- /// We hold back HTLCs we intend to relay for a random interval greater than this (see
968- /// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited).
969- /// This provides some limited amount of privacy. Ideally this would range from somewhere like one
970- /// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly.
971- pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
972-
973967/// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
974968/// be sent in the order they appear in the return value, however sometimes the order needs to be
975969/// variable at runtime (eg FundedChannel::channel_reestablish needs to re-send messages in the order
@@ -6360,8 +6354,7 @@ where
63606354
63616355 /// Processes HTLCs which are pending waiting on random forward delay.
63626356 ///
6363- /// Should only really ever be called in response to a PendingHTLCsForwardable event.
6364- /// Will likely generate further events.
6357+ /// Will regularly be called by the background processor.
63656358 pub fn process_pending_htlc_forwards(&self) {
63666359 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
63676360
@@ -7776,23 +7769,20 @@ where
77767769 &self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason,
77777770 destination: HTLCHandlingFailureType,
77787771 ) {
7779- let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(
7772+ self.fail_htlc_backwards_internal_without_forward_event(
77807773 source,
77817774 payment_hash,
77827775 onion_error,
77837776 destination,
77847777 );
7785- if push_forward_event {
7786- self.push_pending_forwards_ev();
7787- }
77887778 }
77897779
77907780 /// Fails an HTLC backwards to the sender of it to us.
77917781 /// Note that we do not assume that channels corresponding to failed HTLCs are still available.
77927782 fn fail_htlc_backwards_internal_without_forward_event(
77937783 &self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason,
77947784 failure_type: HTLCHandlingFailureType,
7795- ) -> bool {
7785+ ) {
77967786 // Ensure that no peer state channel storage lock is held when calling this function.
77977787 // This ensures that future code doesn't introduce a lock-order requirement for
77987788 // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
@@ -7810,10 +7800,9 @@ where
78107800 // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
78117801 // from block_connected which may run during initialization prior to the chain_monitor
78127802 // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
7813- let mut push_forward_event;
78147803 match source {
78157804 HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
7816- push_forward_event = self.pending_outbound_payments.fail_htlc(
7805+ self.pending_outbound_payments.fail_htlc(
78177806 source,
78187807 payment_hash,
78197808 onion_error,
@@ -7869,9 +7858,7 @@ where
78697858 },
78707859 };
78717860
7872- push_forward_event = self.decode_update_add_htlcs.lock().unwrap().is_empty();
78737861 let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
7874- push_forward_event &= forward_htlcs.is_empty();
78757862 match forward_htlcs.entry(*short_channel_id) {
78767863 hash_map::Entry::Occupied(mut entry) => {
78777864 entry.get_mut().push(failure);
@@ -7892,7 +7879,6 @@ where
78927879 ));
78937880 },
78947881 }
7895- push_forward_event
78967882 }
78977883
78987884 /// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any
@@ -10033,9 +10019,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1003310019 }
1003410020
1003510021 fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec<msgs::UpdateAddHTLC>)) {
10036- let mut push_forward_event = self.forward_htlcs.lock().unwrap().is_empty();
1003710022 let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
10038- push_forward_event &= decode_update_add_htlcs.is_empty();
1003910023 let scid = update_add_htlcs.0;
1004010024 match decode_update_add_htlcs.entry(scid) {
1004110025 hash_map::Entry::Occupied(mut e) => {
@@ -10045,25 +10029,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1004510029 e.insert(update_add_htlcs.1);
1004610030 },
1004710031 }
10048- if push_forward_event {
10049- self.push_pending_forwards_ev();
10050- }
1005110032 }
1005210033
1005310034 #[inline]
1005410035 fn forward_htlcs(&self, per_source_pending_forwards: &mut [PerSourcePendingForward]) {
10055- let push_forward_event =
10056- self.forward_htlcs_without_forward_event(per_source_pending_forwards);
10057- if push_forward_event {
10058- self.push_pending_forwards_ev()
10059- }
10036+ self.forward_htlcs_without_forward_event(per_source_pending_forwards);
1006010037 }
1006110038
1006210039 #[inline]
1006310040 fn forward_htlcs_without_forward_event(
1006410041 &self, per_source_pending_forwards: &mut [PerSourcePendingForward],
10065- ) -> bool {
10066- let mut push_forward_event = false;
10042+ ) {
1006710043 for &mut (
1006810044 prev_short_channel_id,
1006910045 prev_counterparty_node_id,
@@ -10086,10 +10062,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1008610062 // Pull this now to avoid introducing a lock order with `forward_htlcs`.
1008710063 let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
1008810064
10089- let decode_update_add_htlcs_empty =
10090- self.decode_update_add_htlcs.lock().unwrap().is_empty();
1009110065 let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
10092- let forward_htlcs_empty = forward_htlcs.is_empty();
1009310066 match forward_htlcs.entry(scid) {
1009410067 hash_map::Entry::Occupied(mut entry) => {
1009510068 entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
@@ -10189,10 +10162,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1018910162 },
1019010163 }
1019110164 } else {
10192- // We don't want to generate a PendingHTLCsForwardable event if only intercepted
10193- // payments are being processed.
10194- push_forward_event |=
10195- forward_htlcs_empty && decode_update_add_htlcs_empty;
1019610165 entry.insert(vec![HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
1019710166 prev_short_channel_id,
1019810167 prev_counterparty_node_id,
@@ -10211,7 +10180,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1021110180 for (htlc_source, payment_hash, failure_reason, destination) in
1021210181 failed_intercept_forwards.drain(..)
1021310182 {
10214- push_forward_event |= self.fail_htlc_backwards_internal_without_forward_event(
10183+ self.fail_htlc_backwards_internal_without_forward_event(
1021510184 &htlc_source,
1021610185 &payment_hash,
1021710186 &failure_reason,
@@ -10224,30 +10193,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1022410193 events.append(&mut new_intercept_events);
1022510194 }
1022610195 }
10227- push_forward_event
10228- }
10229-
10230- fn push_pending_forwards_ev(&self) {
10231- let mut pending_events = self.pending_events.lock().unwrap();
10232- let is_processing_events = self.pending_events_processor.load(Ordering::Acquire);
10233- let num_forward_events = pending_events
10234- .iter()
10235- .filter(|(ev, _)| matches!(ev, events::Event::PendingHTLCsForwardable { .. }))
10236- .count();
10237- // We only want to push a PendingHTLCsForwardable event if no others are queued. Processing
10238- // events is done in batches and they are not removed until we're done processing each
10239- // batch. Since handling a `PendingHTLCsForwardable` event will call back into the
10240- // `ChannelManager`, we'll still see the original forwarding event not removed. Phantom
10241- // payments will need an additional forwarding event before being claimed to make them look
10242- // real by taking more time.
10243- if (is_processing_events && num_forward_events <= 1) || num_forward_events < 1 {
10244- pending_events.push_back((
10245- Event::PendingHTLCsForwardable {
10246- time_forwardable: Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
10247- },
10248- None,
10249- ));
10250- }
1025110196 }
1025210197
1025310198 /// Checks whether [`ChannelMonitorUpdate`]s generated by the receipt of a remote
@@ -10838,15 +10783,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1083810783 has_pending_monitor_events
1083910784 }
1084010785
10841- /// In chanmon_consistency_target, we'd like to be able to restore monitor updating without
10842- /// handling all pending events (i.e. not PendingHTLCsForwardable). Thus, we expose monitor
10843- /// update events as a separate process method here.
10844- #[cfg(fuzzing)]
10845- pub fn process_monitor_events(&self) {
10846- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
10847- self.process_pending_monitor_events();
10848- }
10849-
1085010786 /// Check the holding cell in each channel and free any pending HTLCs in them if possible.
1085110787 /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
1085210788 /// update was applied.
@@ -16195,21 +16131,6 @@ where
1619516131 }
1619616132 }
1619716133
16198- if !forward_htlcs.is_empty()
16199- || !decode_update_add_htlcs.is_empty()
16200- || pending_outbounds.needs_abandon()
16201- {
16202- // If we have pending HTLCs to forward, assume we either dropped a
16203- // `PendingHTLCsForwardable` or the user received it but never processed it as they
16204- // shut down before the timer hit. Either way, set the time_forwardable to a small
16205- // constant as enough time has likely passed that we should simply handle the forwards
16206- // now, or at least after the user gets a chance to reconnect to our peers.
16207- pending_events_read.push_back((
16208- events::Event::PendingHTLCsForwardable { time_forwardable: Duration::from_secs(2) },
16209- None,
16210- ));
16211- }
16212-
1621316134 let expanded_inbound_key = args.node_signer.get_inbound_payment_key();
1621416135
1621516136 let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len());
0 commit comments