diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 6f411273aab..22dd2a6b099 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -15695,15 +15695,28 @@ where }, None, )); - for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { - let mut found_htlc = false; - for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() { - if *channel_htlc_source == monitor_htlc_source { - found_htlc = true; - break; - } - } - if !found_htlc { + // Collecting all outbound HTLC sources into a HashSet allows for efficient, + // O(1) average-time lookups. This also avoids a slow, nested O(n*m) loop later on. + let monitor_htlc_sources: HashSet = monitor + .get_all_current_outbound_htlcs() + .into_iter() + .map(|(source, _)| source) + .collect(); + + // Grouping all HTLCs from the `ChannelManager` by their + // `payment_hash`, ensures that we treat all identical HTLCs + // as a single logical unit, enabling us to fail them all immediately. + let mut htlcs_by_hash: HashMap> = new_hash_map(); + for (source, payment_hash) in channel.inflight_htlc_sources() { + htlcs_by_hash.entry(*payment_hash).or_default().push(source.clone()); + } + + for (payment_hash, htlc_sources) in htlcs_by_hash { + let is_any_htlc_missing = htlc_sources + .iter() + .any(|source| !monitor_htlc_sources.contains(source)); + + if is_any_htlc_missing { // If we have some HTLCs in the channel which are not present in the newer // ChannelMonitor, they have been removed and should be failed back to // ensure we don't forget them entirely. Note that if the missing HTLC(s) @@ -15714,17 +15727,24 @@ where let logger = WithChannelContext::from( &args.logger, &channel.context, - Some(*payment_hash), + Some(payment_hash), ); log_info!(logger, - "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", - &channel.context.channel_id(), &payment_hash); - failed_htlcs.push(( - channel_htlc_source.clone(), - *payment_hash, - channel.context.get_counterparty_node_id(), - channel.context.channel_id(), - )); + "Failing ALL HTLCs with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", + &payment_hash, &channel.context.channel_id()); + + // Since at least one HTLC was missing, we now + // iterate through the entire group we collected and add each one to the + // `failed_htlcs` list. This guarantees that all identical HTLCs are + // failed back at the same time, not just the single one we detected was missing. + for source in htlc_sources { + failed_htlcs.push(( + source, + payment_hash, + channel.context.get_counterparty_node_id(), + channel.context.channel_id(), + )); + } } } } else { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 388db6d174f..5aa07f6ca5d 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -11717,3 +11717,78 @@ pub fn test_funding_signed_event() { nodes[0].node.get_and_clear_pending_msg_events(); nodes[1].node.get_and_clear_pending_msg_events(); } + +#[xtest(feature = "_externalize_tests")] +pub fn test_stale_force_close_with_identical_htlcs() { + // This tests that if a peer force-closes with a stale + // state after we've received two identical HTLCs, we must fail back both + // of them immediately. + + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_a_b = create_announced_chan_between_nodes(&nodes, 0, 1); + let _chan_b_c = create_announced_chan_between_nodes(&nodes, 1, 2); + + let stale_tx = get_local_commitment_txn!(nodes[0], chan_a_b.2)[0].clone(); + + let (payment_preimage, payment_hash, _payment_secret, _payment_id_1) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 10_000); + + *nodes[0].network_payment_count.borrow_mut() -= 1; + let (payment_preimage_2, payment_hash_2, _payment_secret_2, _payment_id_2) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 10_000); + + assert_eq!(payment_hash, payment_hash_2, "Both HTLCs should have identical payment hashes"); + assert_eq!(payment_preimage, payment_preimage_2, "Both HTLCs should have identical preimages"); + + mine_transaction(&nodes[1], &stale_tx); + check_added_monitors!(nodes[1], 1); + + nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[1].node.peer_disconnected(nodes[2].node.get_our_node_id()); + + nodes[0] + .node + .peer_connected( + nodes[1].node.get_our_node_id(), + &msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }, + true, + ) + .unwrap(); + nodes[2] + .node + .peer_connected( + nodes[1].node.get_our_node_id(), + &msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }, + true, + ) + .unwrap(); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + let mut fail_htlc_count = 0; + for event in &events { + match event { + MessageSendEvent::UpdateHTLCs { updates, .. } => { + fail_htlc_count += updates.update_fail_htlcs.len(); + }, + _ => {}, + } + } + + assert_eq!( + fail_htlc_count, 2, + "Force-close immediately fails 2 committed HTLCs (expected). Got: {}", + fail_htlc_count + ); +}