Skip to content

Commit 677312b

Browse files
Check pruned HTLCs were resolved on startup
In the last commit, we added support for pruning an inbound HTLC's persisted onion once the HTLC has been irrevocably forwarded to the outbound edge. Here, we add a check on startup that those inbound HTLCs were actually handled. Specifically, we check that the inbound HTLC is either (a) currently present in the outbound edge or (b) was removed via claim. If neither of those are true, we infer that the HTLC was removed from the outbound edge via fail and fail the inbound HTLC backwards. Tests for this code are added in a follow-up PR that will be merged in 0.5. We can't test this code right now because of reconstruct_manager_from_monitors logic is needed, and whether it runs during tests is chosen randomly.
1 parent 62c99aa commit 677312b

File tree

2 files changed

+114
-21
lines changed

2 files changed

+114
-21
lines changed

lightning/src/ln/channel.rs

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -315,8 +315,8 @@ impl InboundHTLCState {
315315
/// `ChannelManager` persist.
316316
///
317317
/// Useful for reconstructing the pending HTLC set on startup.
318-
#[derive(Debug)]
319-
enum InboundUpdateAdd {
318+
#[derive(Debug, Clone)]
319+
pub(super) enum InboundUpdateAdd {
320320
/// The inbound committed HTLC's update_add_htlc message.
321321
WithOnion { update_add_htlc: msgs::UpdateAddHTLC },
322322
/// This inbound HTLC is a forward that was irrevocably committed to the outbound edge, allowing
@@ -7828,7 +7828,9 @@ where
78287828
}
78297829

78307830
/// Useful for reconstructing the set of pending HTLCs when deserializing the `ChannelManager`.
7831-
pub(super) fn inbound_committed_unresolved_htlcs(&self) -> Vec<msgs::UpdateAddHTLC> {
7831+
pub(super) fn inbound_committed_unresolved_htlcs(
7832+
&self,
7833+
) -> Vec<(PaymentHash, InboundUpdateAdd)> {
78327834
// We don't want to return an HTLC as needing processing if it already has a resolution that's
78337835
// pending in the holding cell.
78347836
let htlc_resolution_in_holding_cell = |id: u64| -> bool {
@@ -7846,13 +7848,11 @@ where
78467848
.pending_inbound_htlcs
78477849
.iter()
78487850
.filter_map(|htlc| match &htlc.state {
7849-
InboundHTLCState::Committed {
7850-
update_add_htlc: InboundUpdateAdd::WithOnion { update_add_htlc },
7851-
} => {
7851+
InboundHTLCState::Committed { update_add_htlc } => {
78527852
if htlc_resolution_in_holding_cell(htlc.htlc_id) {
78537853
return None;
78547854
}
7855-
Some(update_add_htlc.clone())
7855+
Some((htlc.payment_hash, update_add_htlc.clone()))
78567856
},
78577857
_ => None,
78587858
})

lightning/src/ln/channelmanager.rs

Lines changed: 107 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -58,9 +58,9 @@ use crate::ln::chan_utils::selected_commitment_sat_per_1000_weight;
5858
use crate::ln::channel::QuiescentAction;
5959
use crate::ln::channel::{
6060
self, hold_time_since, Channel, ChannelError, ChannelUpdateStatus, DisconnectResult,
61-
FundedChannel, FundingTxSigned, InboundV1Channel, OutboundV1Channel, PendingV2Channel,
62-
ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, UpdateFulfillCommitFetch,
63-
WithChannelContext,
61+
FundedChannel, FundingTxSigned, InboundUpdateAdd, InboundV1Channel, OutboundV1Channel,
62+
PendingV2Channel, ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse,
63+
UpdateFulfillCommitFetch, WithChannelContext,
6464
};
6565
use crate::ln::channel_state::ChannelDetails;
6666
use crate::ln::funding::SpliceContribution;
@@ -10100,7 +10100,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1010010100
let per_peer_state = self.per_peer_state.read().unwrap();
1010110101
let peer_state = per_peer_state.get(&cp_id).map(|state| state.lock().unwrap()).unwrap();
1010210102
let chan = peer_state.channel_by_id.get(&chan_id).and_then(|c| c.as_funded()).unwrap();
10103-
chan.inbound_committed_unresolved_htlcs().len()
10103+
chan.inbound_committed_unresolved_htlcs()
10104+
.iter()
10105+
.filter(|(_, htlc)| matches!(htlc, InboundUpdateAdd::WithOnion { .. }))
10106+
.count()
1010410107
}
1010510108

1010610109
/// Completes channel resumption after locks have been released.
@@ -18087,7 +18090,7 @@ where
1808718090
decode_update_add_htlcs_legacy.unwrap_or_else(|| new_hash_map());
1808818091
let mut pending_intercepted_htlcs_legacy =
1808918092
pending_intercepted_htlcs_legacy.unwrap_or_else(|| new_hash_map());
18090-
let mut decode_update_add_htlcs = new_hash_map();
18093+
let mut decode_update_add_htlcs: HashMap<u64, Vec<msgs::UpdateAddHTLC>> = new_hash_map();
1809118094
let peer_storage_dir: Vec<(PublicKey, Vec<u8>)> = peer_storage_dir.unwrap_or_else(Vec::new);
1809218095
if fake_scid_rand_bytes.is_none() {
1809318096
fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
@@ -18410,6 +18413,22 @@ where
1841018413
// have a fully-constructed `ChannelManager` at the end.
1841118414
let mut pending_claims_to_replay = Vec::new();
1841218415

18416+
// If we find an inbound HTLC that claims to already be forwarded to the outbound edge, we
18417+
// store an identifier for it here and verify that it is either (a) present in the outbound
18418+
// edge or (b) removed from the outbound edge via claim. If it's in neither of these states, we
18419+
// infer that it was removed from the outbound edge via fail, and fail it backwards to ensure
18420+
// that it is handled.
18421+
let mut already_forwarded_htlcs = Vec::new();
18422+
let prune_forwarded_htlc =
18423+
|already_forwarded_htlcs: &mut Vec<(PaymentHash, HTLCPreviousHopData, u64)>,
18424+
prev_hop: &HTLCPreviousHopData| {
18425+
if let Some(idx) = already_forwarded_htlcs.iter().position(|(_, htlc, _)| {
18426+
prev_hop.htlc_id == htlc.htlc_id
18427+
&& prev_hop.prev_outbound_scid_alias == htlc.prev_outbound_scid_alias
18428+
}) {
18429+
already_forwarded_htlcs.swap_remove(idx);
18430+
}
18431+
};
1841318432
{
1841418433
// If we're tracking pending payments, ensure we haven't lost any by looking at the
1841518434
// ChannelMonitor data for any channels for which we do not have authorative state
@@ -18432,16 +18451,38 @@ where
1843218451
if reconstruct_manager_from_monitors {
1843318452
if let Some(chan) = peer_state.channel_by_id.get(channel_id) {
1843418453
if let Some(funded_chan) = chan.as_funded() {
18454+
let scid_alias = funded_chan.context.outbound_scid_alias();
1843518455
let inbound_committed_update_adds =
1843618456
funded_chan.inbound_committed_unresolved_htlcs();
18437-
if !inbound_committed_update_adds.is_empty() {
18438-
// Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized
18439-
// `Channel`, as part of removing the requirement to regularly persist the
18440-
// `ChannelManager`.
18441-
decode_update_add_htlcs.insert(
18442-
funded_chan.context.outbound_scid_alias(),
18443-
inbound_committed_update_adds,
18444-
);
18457+
for (payment_hash, htlc) in inbound_committed_update_adds {
18458+
match htlc {
18459+
InboundUpdateAdd::WithOnion { update_add_htlc } => {
18460+
// Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized
18461+
// `Channel` as part of removing the requirement to regularly persist the
18462+
// `ChannelManager`.
18463+
match decode_update_add_htlcs.entry(scid_alias) {
18464+
hash_map::Entry::Occupied(mut entry) => {
18465+
entry.get_mut().push(update_add_htlc);
18466+
},
18467+
hash_map::Entry::Vacant(entry) => {
18468+
entry.insert(vec![update_add_htlc]);
18469+
},
18470+
}
18471+
},
18472+
InboundUpdateAdd::Forwarded {
18473+
hop_data,
18474+
outbound_amt_msat,
18475+
} => {
18476+
already_forwarded_htlcs.push((
18477+
payment_hash,
18478+
hop_data,
18479+
outbound_amt_msat,
18480+
));
18481+
},
18482+
InboundUpdateAdd::Legacy => {
18483+
return Err(DecodeError::InvalidValue)
18484+
},
18485+
}
1844518486
}
1844618487
}
1844718488
}
@@ -18494,6 +18535,7 @@ where
1849418535
"HTLC already forwarded to the outbound edge",
1849518536
&args.logger,
1849618537
);
18538+
prune_forwarded_htlc(&mut already_forwarded_htlcs, &prev_hop);
1849718539
}
1849818540
}
1849918541
}
@@ -18522,6 +18564,7 @@ where
1852218564
"HTLC already forwarded to the outbound edge",
1852318565
&args.logger,
1852418566
);
18567+
prune_forwarded_htlc(&mut already_forwarded_htlcs, &prev_hop_data);
1852518568
}
1852618569

1852718570
if !is_channel_closed || reconstruct_manager_from_monitors {
@@ -19045,6 +19088,7 @@ where
1904519088
"HTLC was failed backwards during manager read",
1904619089
&args.logger,
1904719090
);
19091+
prune_forwarded_htlc(&mut already_forwarded_htlcs, prev_hop_data);
1904819092
}
1904919093
}
1905019094

@@ -19164,9 +19208,47 @@ where
1916419208
};
1916519209

1916619210
let mut processed_claims: HashSet<Vec<MPPClaimHTLCSource>> = new_hash_set();
19167-
for (_, monitor) in args.channel_monitors.iter() {
19211+
for (channel_id, monitor) in args.channel_monitors.iter() {
1916819212
for (payment_hash, (payment_preimage, payment_claims)) in monitor.get_stored_preimages()
1916919213
{
19214+
// If we have unresolved inbound committed HTLCs that were already forwarded to the
19215+
// outbound edge and removed via claim, we need to make sure to claim them backwards via
19216+
// adding them to `pending_claims_to_replay`.
19217+
for (hash, hop_data, outbound_amt_msat) in
19218+
mem::take(&mut already_forwarded_htlcs).drain(..)
19219+
{
19220+
if hash != payment_hash {
19221+
already_forwarded_htlcs.push((hash, hop_data, outbound_amt_msat));
19222+
continue;
19223+
}
19224+
let new_pending_claim = !pending_claims_to_replay.iter().any(|(src, _, _, _, _, _, _)| {
19225+
matches!(src, HTLCSource::PreviousHopData(hop) if hop.htlc_id == hop_data.htlc_id && hop.prev_outbound_scid_alias == hop_data.prev_outbound_scid_alias)
19226+
});
19227+
if new_pending_claim {
19228+
let counterparty_node_id = monitor.get_counterparty_node_id();
19229+
let is_channel_closed = channel_manager
19230+
.per_peer_state
19231+
.read()
19232+
.unwrap()
19233+
.get(&counterparty_node_id)
19234+
.map_or(true, |peer_state_mtx| {
19235+
!peer_state_mtx
19236+
.lock()
19237+
.unwrap()
19238+
.channel_by_id
19239+
.contains_key(channel_id)
19240+
});
19241+
pending_claims_to_replay.push((
19242+
HTLCSource::PreviousHopData(hop_data),
19243+
payment_preimage,
19244+
outbound_amt_msat,
19245+
is_channel_closed,
19246+
counterparty_node_id,
19247+
monitor.get_funding_txo(),
19248+
monitor.channel_id(),
19249+
));
19250+
}
19251+
}
1917019252
if !payment_claims.is_empty() {
1917119253
for payment_claim in payment_claims {
1917219254
if processed_claims.contains(&payment_claim.mpp_parts) {
@@ -19408,6 +19490,17 @@ where
1940819490
channel_manager
1940919491
.fail_htlc_backwards_internal(&source, &hash, &reason, receiver, ev_action);
1941019492
}
19493+
for (hash, htlc, _) in already_forwarded_htlcs {
19494+
let channel_id = htlc.channel_id;
19495+
let node_id = htlc.counterparty_node_id;
19496+
let source = HTLCSource::PreviousHopData(htlc);
19497+
let reason =
19498+
HTLCFailReason::reason(LocalHTLCFailureReason::TemporaryNodeFailure, Vec::new());
19499+
let receiver = HTLCHandlingFailureType::Forward { node_id, channel_id };
19500+
// The event completion action is only relevant for HTLCs that originate from our node, not
19501+
// forwarded HTLCs.
19502+
channel_manager.fail_htlc_backwards_internal(&source, &hash, &reason, receiver, None);
19503+
}
1941119504

1941219505
for (
1941319506
source,

0 commit comments

Comments
 (0)