Skip to content

Commit e41ee6d

Browse files
Don't double-forward HTLCs in rebuilt update_adds map
We recently began reconstructing ChannelManager::decode_update_add_htlcs on startup, using data present in the Channels. However, we failed to prune HTLCs from this rebuilt map if a given HTLC was already forwarded to the outbound edge (we pruned correctly if the outbound edge was a closed channel, but not otherwise). Here we fix this bug that would have caused us to double-forward inbound HTLC forwards.
1 parent 593d6aa commit e41ee6d

File tree

2 files changed

+170
-103
lines changed

2 files changed

+170
-103
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 114 additions & 103 deletions
Original file line numberDiff line numberDiff line change
@@ -18018,33 +18018,32 @@ where
1801818018
is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id);
1801918019
}
1802018020

18021-
if is_channel_closed {
18022-
for (htlc_source, (htlc, preimage_opt)) in
18023-
monitor.get_all_current_outbound_htlcs()
18024-
{
18025-
let logger = WithChannelMonitor::from(
18026-
&args.logger,
18027-
monitor,
18028-
Some(htlc.payment_hash),
18029-
);
18030-
let htlc_id = SentHTLCId::from_source(&htlc_source);
18031-
match htlc_source {
18032-
HTLCSource::PreviousHopData(prev_hop_data) => {
18033-
let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
18034-
info.prev_funding_outpoint == prev_hop_data.outpoint
18035-
&& info.prev_htlc_id == prev_hop_data.htlc_id
18036-
};
18021+
for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs()
18022+
{
18023+
let logger =
18024+
WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash));
18025+
let htlc_id = SentHTLCId::from_source(&htlc_source);
18026+
match htlc_source {
18027+
HTLCSource::PreviousHopData(prev_hop_data) => {
18028+
let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
18029+
info.prev_funding_outpoint == prev_hop_data.outpoint
18030+
&& info.prev_htlc_id == prev_hop_data.htlc_id
18031+
};
18032+
// We always add all inbound committed HTLCs to `decode_update_add_htlcs` in the above
18033+
// loop, but we need to prune from those added HTLCs if they were already forwarded to
18034+
// the outbound edge. Otherwise, we'll double-forward.
18035+
dedup_decode_update_add_htlcs(
18036+
&mut decode_update_add_htlcs,
18037+
&prev_hop_data,
18038+
"HTLC was forwarded to the closed channel",
18039+
&args.logger,
18040+
);
18041+
if is_channel_closed {
1803718042
// The ChannelMonitor is now responsible for this HTLC's
1803818043
// failure/success and will let us know what its outcome is. If we
1803918044
// still have an entry for this HTLC in `forward_htlcs`,
1804018045
// `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not
1804118046
// persisted after the monitor was when forwarding the payment.
18042-
dedup_decode_update_add_htlcs(
18043-
&mut decode_update_add_htlcs,
18044-
&prev_hop_data,
18045-
"HTLC was forwarded to the closed channel",
18046-
&args.logger,
18047-
);
1804818047
dedup_decode_update_add_htlcs(
1804918048
&mut decode_update_add_htlcs_legacy,
1805018049
&prev_hop_data,
@@ -18075,99 +18074,111 @@ where
1807518074
false
1807618075
} else { true }
1807718076
});
18078-
},
18079-
HTLCSource::OutboundRoute {
18080-
payment_id,
18081-
session_priv,
18082-
path,
18083-
bolt12_invoice,
18084-
..
18085-
} => {
18086-
if let Some(preimage) = preimage_opt {
18087-
let pending_events = Mutex::new(pending_events_read);
18088-
let update = PaymentCompleteUpdate {
18089-
counterparty_node_id: monitor.get_counterparty_node_id(),
18090-
channel_funding_outpoint: monitor.get_funding_txo(),
18091-
channel_id: monitor.channel_id(),
18092-
htlc_id,
18093-
};
18094-
let mut compl_action = Some(
18077+
}
18078+
},
18079+
HTLCSource::OutboundRoute {
18080+
payment_id,
18081+
session_priv,
18082+
path,
18083+
bolt12_invoice,
18084+
..
18085+
} => {
18086+
if !is_channel_closed {
18087+
continue;
18088+
}
18089+
if let Some(preimage) = preimage_opt {
18090+
let pending_events = Mutex::new(pending_events_read);
18091+
let update = PaymentCompleteUpdate {
18092+
counterparty_node_id: monitor.get_counterparty_node_id(),
18093+
channel_funding_outpoint: monitor.get_funding_txo(),
18094+
channel_id: monitor.channel_id(),
18095+
htlc_id,
18096+
};
18097+
let mut compl_action = Some(
1809518098
EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update)
1809618099
);
18097-
pending_outbounds.claim_htlc(
18098-
payment_id,
18099-
preimage,
18100-
bolt12_invoice,
18101-
session_priv,
18102-
path,
18103-
true,
18104-
&mut compl_action,
18105-
&pending_events,
18106-
);
18107-
// If the completion action was not consumed, then there was no
18108-
// payment to claim, and we need to tell the `ChannelMonitor`
18109-
// we don't need to hear about the HTLC again, at least as long
18110-
// as the PaymentSent event isn't still sitting around in our
18111-
// event queue.
18112-
let have_action = if compl_action.is_some() {
18113-
let pending_events = pending_events.lock().unwrap();
18114-
pending_events.iter().any(|(_, act)| *act == compl_action)
18115-
} else {
18116-
false
18117-
};
18118-
if !have_action && compl_action.is_some() {
18119-
let mut peer_state = per_peer_state
18120-
.get(&counterparty_node_id)
18121-
.map(|state| state.lock().unwrap())
18122-
.expect("Channels originating a preimage must have peer state");
18123-
let update_id = peer_state
18124-
.closed_channel_monitor_update_ids
18125-
.get_mut(channel_id)
18126-
.expect("Channels originating a preimage must have a monitor");
18127-
// Note that for channels closed pre-0.1, the latest
18128-
// update_id is `u64::MAX`.
18129-
*update_id = update_id.saturating_add(1);
18130-
18131-
pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
18132-
counterparty_node_id: monitor.get_counterparty_node_id(),
18100+
pending_outbounds.claim_htlc(
18101+
payment_id,
18102+
preimage,
18103+
bolt12_invoice,
18104+
session_priv,
18105+
path,
18106+
true,
18107+
&mut compl_action,
18108+
&pending_events,
18109+
);
18110+
// If the completion action was not consumed, then there was no
18111+
// payment to claim, and we need to tell the `ChannelMonitor`
18112+
// we don't need to hear about the HTLC again, at least as long
18113+
// as the PaymentSent event isn't still sitting around in our
18114+
// event queue.
18115+
let have_action = if compl_action.is_some() {
18116+
let pending_events = pending_events.lock().unwrap();
18117+
pending_events.iter().any(|(_, act)| *act == compl_action)
18118+
} else {
18119+
false
18120+
};
18121+
if !have_action && compl_action.is_some() {
18122+
let mut peer_state = per_peer_state
18123+
.get(&counterparty_node_id)
18124+
.map(|state| state.lock().unwrap())
18125+
.expect(
18126+
"Channels originating a preimage must have peer state",
18127+
);
18128+
let update_id = peer_state
18129+
.closed_channel_monitor_update_ids
18130+
.get_mut(channel_id)
18131+
.expect(
18132+
"Channels originating a preimage must have a monitor",
18133+
);
18134+
// Note that for channels closed pre-0.1, the latest
18135+
// update_id is `u64::MAX`.
18136+
*update_id = update_id.saturating_add(1);
18137+
18138+
pending_background_events.push(
18139+
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
18140+
counterparty_node_id: monitor
18141+
.get_counterparty_node_id(),
1813318142
funding_txo: monitor.get_funding_txo(),
1813418143
channel_id: monitor.channel_id(),
1813518144
update: ChannelMonitorUpdate {
1813618145
update_id: *update_id,
1813718146
channel_id: Some(monitor.channel_id()),
18138-
updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete {
18147+
updates: vec![
18148+
ChannelMonitorUpdateStep::ReleasePaymentComplete {
1813918149
htlc: htlc_id,
18140-
}],
18150+
},
18151+
],
1814118152
},
18142-
});
18143-
}
18144-
pending_events_read = pending_events.into_inner().unwrap();
18153+
},
18154+
);
1814518155
}
18146-
},
18147-
}
18156+
pending_events_read = pending_events.into_inner().unwrap();
18157+
}
18158+
},
1814818159
}
18149-
for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() {
18150-
log_info!(
18151-
args.logger,
18152-
"Failing HTLC with payment hash {} as it was resolved on-chain.",
18153-
payment_hash
18154-
);
18155-
let completion_action = Some(PaymentCompleteUpdate {
18156-
counterparty_node_id: monitor.get_counterparty_node_id(),
18157-
channel_funding_outpoint: monitor.get_funding_txo(),
18158-
channel_id: monitor.channel_id(),
18159-
htlc_id: SentHTLCId::from_source(&htlc_source),
18160-
});
18160+
}
18161+
for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() {
18162+
log_info!(
18163+
args.logger,
18164+
"Failing HTLC with payment hash {} as it was resolved on-chain.",
18165+
payment_hash
18166+
);
18167+
let completion_action = Some(PaymentCompleteUpdate {
18168+
counterparty_node_id: monitor.get_counterparty_node_id(),
18169+
channel_funding_outpoint: monitor.get_funding_txo(),
18170+
channel_id: monitor.channel_id(),
18171+
htlc_id: SentHTLCId::from_source(&htlc_source),
18172+
});
1816118173

18162-
failed_htlcs.push((
18163-
htlc_source,
18164-
payment_hash,
18165-
monitor.get_counterparty_node_id(),
18166-
monitor.channel_id(),
18167-
LocalHTLCFailureReason::OnChainTimeout,
18168-
completion_action,
18169-
));
18170-
}
18174+
failed_htlcs.push((
18175+
htlc_source,
18176+
payment_hash,
18177+
monitor.get_counterparty_node_id(),
18178+
monitor.channel_id(),
18179+
LocalHTLCFailureReason::OnChainTimeout,
18180+
completion_action,
18181+
));
1817118182
}
1817218183

1817318184
// Whether the downstream channel was closed or not, try to re-apply any payment

lightning/src/ln/reload_tests.rs

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1258,6 +1258,62 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) {
12581258
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
12591259
}
12601260

1261+
#[test]
1262+
fn test_manager_persisted_post_outbound_edge_forward() {
1263+
let chanmon_cfgs = create_chanmon_cfgs(3);
1264+
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1265+
let persister;
1266+
let new_chain_monitor;
1267+
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1268+
let nodes_1_deserialized;
1269+
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1270+
1271+
let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1272+
let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
1273+
1274+
// Lock in the HTLC from node_a <> node_b.
1275+
let amt_msat = 5000;
1276+
let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat);
1277+
nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
1278+
check_added_monitors(&nodes[0], 1);
1279+
let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
1280+
nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
1281+
do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false);
1282+
1283+
// Add the HTLC to the outbound edge, node_b <> node_c.
1284+
nodes[1].node.process_pending_htlc_forwards();
1285+
check_added_monitors(&nodes[1], 1);
1286+
1287+
// Disconnect peers and reload the forwarding node_b.
1288+
nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
1289+
nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id());
1290+
1291+
let node_b_encoded = nodes[1].node.encode();
1292+
let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
1293+
let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
1294+
reload_node!(nodes[1], node_b_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
1295+
1296+
reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[0]));
1297+
let mut args_b_c = ReconnectArgs::new(&nodes[1], &nodes[2]);
1298+
args_b_c.send_channel_ready = (true, true);
1299+
args_b_c.send_announcement_sigs = (true, true);
1300+
args_b_c.pending_htlc_adds = (0, 1);
1301+
// While reconnecting, we re-send node_b's outbound update_add and commit the HTLC to the b<>c
1302+
// channel.
1303+
reconnect_nodes(args_b_c);
1304+
1305+
// Ensure node_b won't double-forward the outbound HTLC (this was previously broken).
1306+
nodes[1].node.process_pending_htlc_forwards();
1307+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1308+
1309+
// Claim the HTLC backwards to node_a.
1310+
expect_and_process_pending_htlcs(&nodes[2], false);
1311+
expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id());
1312+
let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]];
1313+
do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, payment_preimage));
1314+
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
1315+
}
1316+
12611317
#[test]
12621318
fn test_reload_partial_funding_batch() {
12631319
let chanmon_cfgs = create_chanmon_cfgs(3);

0 commit comments

Comments
 (0)