Skip to content

Commit 3f383b0

Browse files
Don't double-forward HTLCs in rebuilt update_adds map
We recently began reconstructing ChannelManager::decode_update_add_htlcs on startup, using data present in the Channels. However, we failed to prune HTLCs from this rebuilt map if a given HTLC was already forwarded to the outbound edge (we pruned correctly if the outbound edge was a closed channel, but not otherwise). Here we fix this bug that would have caused us to double-forward inbound HTLC forwards.
1 parent 62c5849 commit 3f383b0

File tree

2 files changed

+170
-103
lines changed

2 files changed

+170
-103
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 114 additions & 103 deletions
Original file line numberDiff line numberDiff line change
@@ -18015,33 +18015,32 @@ where
1801518015
is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id);
1801618016
}
1801718017

18018-
if is_channel_closed {
18019-
for (htlc_source, (htlc, preimage_opt)) in
18020-
monitor.get_all_current_outbound_htlcs()
18021-
{
18022-
let logger = WithChannelMonitor::from(
18023-
&args.logger,
18024-
monitor,
18025-
Some(htlc.payment_hash),
18026-
);
18027-
let htlc_id = SentHTLCId::from_source(&htlc_source);
18028-
match htlc_source {
18029-
HTLCSource::PreviousHopData(prev_hop_data) => {
18030-
let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
18031-
info.prev_funding_outpoint == prev_hop_data.outpoint
18032-
&& info.prev_htlc_id == prev_hop_data.htlc_id
18033-
};
18018+
for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs()
18019+
{
18020+
let logger =
18021+
WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash));
18022+
let htlc_id = SentHTLCId::from_source(&htlc_source);
18023+
match htlc_source {
18024+
HTLCSource::PreviousHopData(prev_hop_data) => {
18025+
let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
18026+
info.prev_funding_outpoint == prev_hop_data.outpoint
18027+
&& info.prev_htlc_id == prev_hop_data.htlc_id
18028+
};
18029+
// We always add all inbound committed HTLCs to `decode_update_add_htlcs` in the above
18030+
// loop, but we need to prune from those added HTLCs if they were already forwarded to
18031+
// the outbound edge. Otherwise, we'll double-forward.
18032+
dedup_decode_update_add_htlcs(
18033+
&mut decode_update_add_htlcs,
18034+
&prev_hop_data,
18035+
"HTLC was forwarded to the closed channel",
18036+
&args.logger,
18037+
);
18038+
if is_channel_closed {
1803418039
// The ChannelMonitor is now responsible for this HTLC's
1803518040
// failure/success and will let us know what its outcome is. If we
1803618041
// still have an entry for this HTLC in `forward_htlcs`,
1803718042
// `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not
1803818043
// persisted after the monitor was when forwarding the payment.
18039-
dedup_decode_update_add_htlcs(
18040-
&mut decode_update_add_htlcs,
18041-
&prev_hop_data,
18042-
"HTLC was forwarded to the closed channel",
18043-
&args.logger,
18044-
);
1804518044
dedup_decode_update_add_htlcs(
1804618045
&mut decode_update_add_htlcs_legacy,
1804718046
&prev_hop_data,
@@ -18072,99 +18071,111 @@ where
1807218071
false
1807318072
} else { true }
1807418073
});
18075-
},
18076-
HTLCSource::OutboundRoute {
18077-
payment_id,
18078-
session_priv,
18079-
path,
18080-
bolt12_invoice,
18081-
..
18082-
} => {
18083-
if let Some(preimage) = preimage_opt {
18084-
let pending_events = Mutex::new(pending_events_read);
18085-
let update = PaymentCompleteUpdate {
18086-
counterparty_node_id: monitor.get_counterparty_node_id(),
18087-
channel_funding_outpoint: monitor.get_funding_txo(),
18088-
channel_id: monitor.channel_id(),
18089-
htlc_id,
18090-
};
18091-
let mut compl_action = Some(
18074+
}
18075+
},
18076+
HTLCSource::OutboundRoute {
18077+
payment_id,
18078+
session_priv,
18079+
path,
18080+
bolt12_invoice,
18081+
..
18082+
} => {
18083+
if !is_channel_closed {
18084+
continue;
18085+
}
18086+
if let Some(preimage) = preimage_opt {
18087+
let pending_events = Mutex::new(pending_events_read);
18088+
let update = PaymentCompleteUpdate {
18089+
counterparty_node_id: monitor.get_counterparty_node_id(),
18090+
channel_funding_outpoint: monitor.get_funding_txo(),
18091+
channel_id: monitor.channel_id(),
18092+
htlc_id,
18093+
};
18094+
let mut compl_action = Some(
1809218095
EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update)
1809318096
);
18094-
pending_outbounds.claim_htlc(
18095-
payment_id,
18096-
preimage,
18097-
bolt12_invoice,
18098-
session_priv,
18099-
path,
18100-
true,
18101-
&mut compl_action,
18102-
&pending_events,
18103-
);
18104-
// If the completion action was not consumed, then there was no
18105-
// payment to claim, and we need to tell the `ChannelMonitor`
18106-
// we don't need to hear about the HTLC again, at least as long
18107-
// as the PaymentSent event isn't still sitting around in our
18108-
// event queue.
18109-
let have_action = if compl_action.is_some() {
18110-
let pending_events = pending_events.lock().unwrap();
18111-
pending_events.iter().any(|(_, act)| *act == compl_action)
18112-
} else {
18113-
false
18114-
};
18115-
if !have_action && compl_action.is_some() {
18116-
let mut peer_state = per_peer_state
18117-
.get(&counterparty_node_id)
18118-
.map(|state| state.lock().unwrap())
18119-
.expect("Channels originating a preimage must have peer state");
18120-
let update_id = peer_state
18121-
.closed_channel_monitor_update_ids
18122-
.get_mut(channel_id)
18123-
.expect("Channels originating a preimage must have a monitor");
18124-
// Note that for channels closed pre-0.1, the latest
18125-
// update_id is `u64::MAX`.
18126-
*update_id = update_id.saturating_add(1);
18127-
18128-
pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
18129-
counterparty_node_id: monitor.get_counterparty_node_id(),
18097+
pending_outbounds.claim_htlc(
18098+
payment_id,
18099+
preimage,
18100+
bolt12_invoice,
18101+
session_priv,
18102+
path,
18103+
true,
18104+
&mut compl_action,
18105+
&pending_events,
18106+
);
18107+
// If the completion action was not consumed, then there was no
18108+
// payment to claim, and we need to tell the `ChannelMonitor`
18109+
// we don't need to hear about the HTLC again, at least as long
18110+
// as the PaymentSent event isn't still sitting around in our
18111+
// event queue.
18112+
let have_action = if compl_action.is_some() {
18113+
let pending_events = pending_events.lock().unwrap();
18114+
pending_events.iter().any(|(_, act)| *act == compl_action)
18115+
} else {
18116+
false
18117+
};
18118+
if !have_action && compl_action.is_some() {
18119+
let mut peer_state = per_peer_state
18120+
.get(&counterparty_node_id)
18121+
.map(|state| state.lock().unwrap())
18122+
.expect(
18123+
"Channels originating a preimage must have peer state",
18124+
);
18125+
let update_id = peer_state
18126+
.closed_channel_monitor_update_ids
18127+
.get_mut(channel_id)
18128+
.expect(
18129+
"Channels originating a preimage must have a monitor",
18130+
);
18131+
// Note that for channels closed pre-0.1, the latest
18132+
// update_id is `u64::MAX`.
18133+
*update_id = update_id.saturating_add(1);
18134+
18135+
pending_background_events.push(
18136+
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
18137+
counterparty_node_id: monitor
18138+
.get_counterparty_node_id(),
1813018139
funding_txo: monitor.get_funding_txo(),
1813118140
channel_id: monitor.channel_id(),
1813218141
update: ChannelMonitorUpdate {
1813318142
update_id: *update_id,
1813418143
channel_id: Some(monitor.channel_id()),
18135-
updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete {
18144+
updates: vec![
18145+
ChannelMonitorUpdateStep::ReleasePaymentComplete {
1813618146
htlc: htlc_id,
18137-
}],
18147+
},
18148+
],
1813818149
},
18139-
});
18140-
}
18141-
pending_events_read = pending_events.into_inner().unwrap();
18150+
},
18151+
);
1814218152
}
18143-
},
18144-
}
18153+
pending_events_read = pending_events.into_inner().unwrap();
18154+
}
18155+
},
1814518156
}
18146-
for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() {
18147-
log_info!(
18148-
args.logger,
18149-
"Failing HTLC with payment hash {} as it was resolved on-chain.",
18150-
payment_hash
18151-
);
18152-
let completion_action = Some(PaymentCompleteUpdate {
18153-
counterparty_node_id: monitor.get_counterparty_node_id(),
18154-
channel_funding_outpoint: monitor.get_funding_txo(),
18155-
channel_id: monitor.channel_id(),
18156-
htlc_id: SentHTLCId::from_source(&htlc_source),
18157-
});
18157+
}
18158+
for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() {
18159+
log_info!(
18160+
args.logger,
18161+
"Failing HTLC with payment hash {} as it was resolved on-chain.",
18162+
payment_hash
18163+
);
18164+
let completion_action = Some(PaymentCompleteUpdate {
18165+
counterparty_node_id: monitor.get_counterparty_node_id(),
18166+
channel_funding_outpoint: monitor.get_funding_txo(),
18167+
channel_id: monitor.channel_id(),
18168+
htlc_id: SentHTLCId::from_source(&htlc_source),
18169+
});
1815818170

18159-
failed_htlcs.push((
18160-
htlc_source,
18161-
payment_hash,
18162-
monitor.get_counterparty_node_id(),
18163-
monitor.channel_id(),
18164-
LocalHTLCFailureReason::OnChainTimeout,
18165-
completion_action,
18166-
));
18167-
}
18171+
failed_htlcs.push((
18172+
htlc_source,
18173+
payment_hash,
18174+
monitor.get_counterparty_node_id(),
18175+
monitor.channel_id(),
18176+
LocalHTLCFailureReason::OnChainTimeout,
18177+
completion_action,
18178+
));
1816818179
}
1816918180

1817018181
// Whether the downstream channel was closed or not, try to re-apply any payment

lightning/src/ln/reload_tests.rs

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1258,6 +1258,62 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) {
12581258
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
12591259
}
12601260

1261+
#[test]
1262+
fn test_manager_persisted_post_outbound_edge_forward() {
1263+
let chanmon_cfgs = create_chanmon_cfgs(3);
1264+
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1265+
let persister;
1266+
let new_chain_monitor;
1267+
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1268+
let nodes_1_deserialized;
1269+
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1270+
1271+
let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1272+
let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
1273+
1274+
// Lock in the HTLC from node_a <> node_b.
1275+
let amt_msat = 5000;
1276+
let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat);
1277+
nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
1278+
check_added_monitors(&nodes[0], 1);
1279+
let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
1280+
nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
1281+
do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false);
1282+
1283+
// Add the HTLC to the outbound edge, node_b <> node_c.
1284+
nodes[1].node.process_pending_htlc_forwards();
1285+
check_added_monitors(&nodes[1], 1);
1286+
1287+
// Disconnect peers and reload the forwarding node_b.
1288+
nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
1289+
nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id());
1290+
1291+
let node_b_encoded = nodes[1].node.encode();
1292+
let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
1293+
let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
1294+
reload_node!(nodes[1], node_b_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
1295+
1296+
reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[0]));
1297+
let mut args_b_c = ReconnectArgs::new(&nodes[1], &nodes[2]);
1298+
args_b_c.send_channel_ready = (true, true);
1299+
args_b_c.send_announcement_sigs = (true, true);
1300+
args_b_c.pending_htlc_adds = (0, 1);
1301+
// While reconnecting, we re-send node_b's outbound update_add and commit the HTLC to the b<>c
1302+
// channel.
1303+
reconnect_nodes(args_b_c);
1304+
1305+
// Ensure node_b won't double-forward the outbound HTLC (this was previously broken).
1306+
nodes[1].node.process_pending_htlc_forwards();
1307+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1308+
1309+
// Claim the HTLC backwards to node_a.
1310+
expect_and_process_pending_htlcs(&nodes[2], false);
1311+
expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id());
1312+
let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]];
1313+
do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, payment_preimage));
1314+
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
1315+
}
1316+
12611317
#[test]
12621318
fn test_reload_partial_funding_batch() {
12631319
let chanmon_cfgs = create_chanmon_cfgs(3);

0 commit comments

Comments
 (0)