@@ -960,12 +960,6 @@ impl MsgHandleErrInternal {
960960 }
961961}
962962
963- /// We hold back HTLCs we intend to relay for a random interval greater than this (see
964- /// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited).
965- /// This provides some limited amount of privacy. Ideally this would range from somewhere like one
966- /// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly.
967- pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
968-
969963/// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
970964/// be sent in the order they appear in the return value, however sometimes the order needs to be
971965/// variable at runtime (eg FundedChannel::channel_reestablish needs to re-send messages in the order
@@ -6315,8 +6309,7 @@ where
63156309
63166310 /// Processes HTLCs which are pending waiting on random forward delay.
63176311 ///
6318- /// Should only really ever be called in response to a PendingHTLCsForwardable event.
6319- /// Will likely generate further events.
6312+ /// Will regularly be called by the background processor.
63206313 pub fn process_pending_htlc_forwards(&self) {
63216314 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
63226315
@@ -7721,23 +7714,20 @@ where
77217714 &self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason,
77227715 destination: HTLCHandlingFailureType,
77237716 ) {
7724- let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(
7717+ self.fail_htlc_backwards_internal_without_forward_event(
77257718 source,
77267719 payment_hash,
77277720 onion_error,
77287721 destination,
77297722 );
7730- if push_forward_event {
7731- self.push_pending_forwards_ev();
7732- }
77337723 }
77347724
77357725 /// Fails an HTLC backwards to the sender of it to us.
77367726 /// Note that we do not assume that channels corresponding to failed HTLCs are still available.
77377727 fn fail_htlc_backwards_internal_without_forward_event(
77387728 &self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason,
77397729 failure_type: HTLCHandlingFailureType,
7740- ) -> bool {
7730+ ) {
77417731 // Ensure that no peer state channel storage lock is held when calling this function.
77427732 // This ensures that future code doesn't introduce a lock-order requirement for
77437733 // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
@@ -7755,10 +7745,9 @@ where
77557745 // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
77567746 // from block_connected which may run during initialization prior to the chain_monitor
77577747 // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
7758- let mut push_forward_event;
77597748 match source {
77607749 HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
7761- push_forward_event = self.pending_outbound_payments.fail_htlc(
7750+ self.pending_outbound_payments.fail_htlc(
77627751 source,
77637752 payment_hash,
77647753 onion_error,
@@ -7814,9 +7803,7 @@ where
78147803 },
78157804 };
78167805
7817- push_forward_event = self.decode_update_add_htlcs.lock().unwrap().is_empty();
78187806 let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
7819- push_forward_event &= forward_htlcs.is_empty();
78207807 match forward_htlcs.entry(*short_channel_id) {
78217808 hash_map::Entry::Occupied(mut entry) => {
78227809 entry.get_mut().push(failure);
@@ -7837,7 +7824,6 @@ where
78377824 ));
78387825 },
78397826 }
7840- push_forward_event
78417827 }
78427828
78437829 /// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any
@@ -9978,9 +9964,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
99789964 }
99799965
99809966 fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec<msgs::UpdateAddHTLC>)) {
9981- let mut push_forward_event = self.forward_htlcs.lock().unwrap().is_empty();
99829967 let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
9983- push_forward_event &= decode_update_add_htlcs.is_empty();
99849968 let scid = update_add_htlcs.0;
99859969 match decode_update_add_htlcs.entry(scid) {
99869970 hash_map::Entry::Occupied(mut e) => {
@@ -9990,25 +9974,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
99909974 e.insert(update_add_htlcs.1);
99919975 },
99929976 }
9993- if push_forward_event {
9994- self.push_pending_forwards_ev();
9995- }
99969977 }
99979978
99989979 #[inline]
99999980 fn forward_htlcs(&self, per_source_pending_forwards: &mut [PerSourcePendingForward]) {
10000- let push_forward_event =
10001- self.forward_htlcs_without_forward_event(per_source_pending_forwards);
10002- if push_forward_event {
10003- self.push_pending_forwards_ev()
10004- }
9981+ self.forward_htlcs_without_forward_event(per_source_pending_forwards);
100059982 }
100069983
100079984 #[inline]
100089985 fn forward_htlcs_without_forward_event(
100099986 &self, per_source_pending_forwards: &mut [PerSourcePendingForward],
10010- ) -> bool {
10011- let mut push_forward_event = false;
9987+ ) {
100129988 for &mut (
100139989 prev_short_channel_id,
100149990 prev_counterparty_node_id,
@@ -10031,10 +10007,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1003110007 // Pull this now to avoid introducing a lock order with `forward_htlcs`.
1003210008 let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
1003310009
10034- let decode_update_add_htlcs_empty =
10035- self.decode_update_add_htlcs.lock().unwrap().is_empty();
1003610010 let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
10037- let forward_htlcs_empty = forward_htlcs.is_empty();
1003810011 match forward_htlcs.entry(scid) {
1003910012 hash_map::Entry::Occupied(mut entry) => {
1004010013 entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
@@ -10130,10 +10103,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1013010103 },
1013110104 }
1013210105 } else {
10133- // We don't want to generate a PendingHTLCsForwardable event if only intercepted
10134- // payments are being processed.
10135- push_forward_event |=
10136- forward_htlcs_empty && decode_update_add_htlcs_empty;
1013710106 entry.insert(vec![HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
1013810107 prev_short_channel_id,
1013910108 prev_counterparty_node_id,
@@ -10152,7 +10121,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1015210121 for (htlc_source, payment_hash, failure_reason, destination) in
1015310122 failed_intercept_forwards.drain(..)
1015410123 {
10155- push_forward_event |= self.fail_htlc_backwards_internal_without_forward_event(
10124+ self.fail_htlc_backwards_internal_without_forward_event(
1015610125 &htlc_source,
1015710126 &payment_hash,
1015810127 &failure_reason,
@@ -10165,30 +10134,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1016510134 events.append(&mut new_intercept_events);
1016610135 }
1016710136 }
10168- push_forward_event
10169- }
10170-
10171- fn push_pending_forwards_ev(&self) {
10172- let mut pending_events = self.pending_events.lock().unwrap();
10173- let is_processing_events = self.pending_events_processor.load(Ordering::Acquire);
10174- let num_forward_events = pending_events
10175- .iter()
10176- .filter(|(ev, _)| matches!(ev, events::Event::PendingHTLCsForwardable { .. }))
10177- .count();
10178- // We only want to push a PendingHTLCsForwardable event if no others are queued. Processing
10179- // events is done in batches and they are not removed until we're done processing each
10180- // batch. Since handling a `PendingHTLCsForwardable` event will call back into the
10181- // `ChannelManager`, we'll still see the original forwarding event not removed. Phantom
10182- // payments will need an additional forwarding event before being claimed to make them look
10183- // real by taking more time.
10184- if (is_processing_events && num_forward_events <= 1) || num_forward_events < 1 {
10185- pending_events.push_back((
10186- Event::PendingHTLCsForwardable {
10187- time_forwardable: Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
10188- },
10189- None,
10190- ));
10191- }
1019210137 }
1019310138
1019410139 /// Checks whether [`ChannelMonitorUpdate`]s generated by the receipt of a remote
@@ -10779,15 +10724,6 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1077910724 has_pending_monitor_events
1078010725 }
1078110726
10782- /// In chanmon_consistency_target, we'd like to be able to restore monitor updating without
10783- /// handling all pending events (i.e. not PendingHTLCsForwardable). Thus, we expose monitor
10784- /// update events as a separate process method here.
10785- #[cfg(fuzzing)]
10786- pub fn process_monitor_events(&self) {
10787- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
10788- self.process_pending_monitor_events();
10789- }
10790-
1079110727 /// Check the holding cell in each channel and free any pending HTLCs in them if possible.
1079210728 /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
1079310729 /// update was applied.
@@ -15951,21 +15887,6 @@ where
1595115887 }
1595215888 }
1595315889
15954- if !forward_htlcs.is_empty()
15955- || !decode_update_add_htlcs.is_empty()
15956- || pending_outbounds.needs_abandon()
15957- {
15958- // If we have pending HTLCs to forward, assume we either dropped a
15959- // `PendingHTLCsForwardable` or the user received it but never processed it as they
15960- // shut down before the timer hit. Either way, set the time_forwardable to a small
15961- // constant as enough time has likely passed that we should simply handle the forwards
15962- // now, or at least after the user gets a chance to reconnect to our peers.
15963- pending_events_read.push_back((
15964- events::Event::PendingHTLCsForwardable { time_forwardable: Duration::from_secs(2) },
15965- None,
15966- ));
15967- }
15968-
1596915890 let expanded_inbound_key = args.node_signer.get_inbound_payment_key();
1597015891
1597115892 let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len());
0 commit comments