diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 0de372813b3..973692d1033 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -1490,8 +1490,8 @@ mod tests { assert_eq!(close_tx.len(), 1); mine_transaction(&nodes[2], &close_tx[0]); - check_added_monitors(&nodes[2], 1); check_closed_broadcast(&nodes[2], 1, true); + check_added_monitors(&nodes[2], 1); let closure_reason = ClosureReason::CommitmentTxConfirmed; check_closed_event!(&nodes[2], 1, closure_reason, false, [node_a_id], 1000000); diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index f72eea850d5..eea72618168 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -205,6 +205,10 @@ pub enum MonitorEvent { /// channel. HolderForceClosed(OutPoint), + /// Indicates that we've detected a commitment transaction (either holder's or counterparty's) + /// be included in a block and should consider the channel closed. + CommitmentTxConfirmed(()), + /// Indicates a [`ChannelMonitor`] update has completed. See /// [`ChannelMonitorUpdateStatus::InProgress`] for more information on how this is used. /// @@ -236,6 +240,7 @@ impl_writeable_tlv_based_enum_upgradable_legacy!(MonitorEvent, (4, channel_id, required), }, ; + (1, CommitmentTxConfirmed), (2, HTLCEvent), (4, HolderForceClosed), // 6 was `UpdateFailed` until LDK 0.0.117 @@ -5088,6 +5093,9 @@ impl ChannelMonitorImpl { ); log_info!(logger, "Channel {} closed by funding output spend in txid {txid}", self.channel_id()); + if !self.funding_spend_seen { + self.pending_monitor_events.push(MonitorEvent::CommitmentTxConfirmed(())); + } self.funding_spend_seen = true; let mut balance_spendable_csv = None; @@ -6371,6 +6379,7 @@ mod tests { use bitcoin::{Sequence, Witness}; use crate::chain::chaininterface::LowerBoundedFeeEstimator; + use crate::events::ClosureReason; use super::ChannelMonitorUpdateStep; use crate::chain::channelmonitor::{ChannelMonitor, WithChannelMonitor}; @@ -6468,22 +6477,26 @@ mod tests { // Build a new ChannelMonitorUpdate which contains both the failing commitment tx update // and provides the claim preimages for the two pending HTLCs. The first update generates // an error, but the point of this test is to ensure the later updates are still applied. - let monitor_updates = nodes[1].chain_monitor.monitor_updates.lock().unwrap(); - let mut replay_update = monitor_updates.get(&channel.2).unwrap().iter().next_back().unwrap().clone(); - assert_eq!(replay_update.updates.len(), 1); - if let ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { .. } = replay_update.updates[0] { - } else { panic!(); } - replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { - payment_preimage: payment_preimage_1, payment_info: None, - }); - replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { - payment_preimage: payment_preimage_2, payment_info: None, - }); + let replay_update = { + let monitor_updates = nodes[1].chain_monitor.monitor_updates.lock().unwrap(); + let mut replay_update = monitor_updates.get(&channel.2).unwrap().iter().next_back().unwrap().clone(); + assert_eq!(replay_update.updates.len(), 1); + if let ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { .. } = replay_update.updates[0] { + } else { panic!(); } + replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage: payment_preimage_1, payment_info: None, + }); + replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage: payment_preimage_2, payment_info: None, + }); + replay_update + }; let broadcaster = TestBroadcaster::with_blocks(Arc::clone(&nodes[1].blocks)); assert!( pre_update_monitor.update_monitor(&replay_update, &&broadcaster, &&chanmon_cfgs[1].fee_estimator, &nodes[1].logger) .is_err()); + // Even though we error'd on the first update, we should still have generated an HTLC claim // transaction let txn_broadcasted = broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -6495,7 +6508,12 @@ mod tests { assert_eq!(htlc_txn.len(), 2); check_spends!(htlc_txn[0], broadcast_tx); check_spends!(htlc_txn[1], broadcast_tx); + + check_closed_broadcast(&nodes[1], 1, true); + check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[1], 1); } + #[test] fn test_funding_spend_refuses_updates() { do_test_funding_spend_refuses_updates(true); diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index d66ba79cc81..00696515724 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -1010,7 +1010,7 @@ fn do_test_async_holder_signatures(anchors: bool, remote_commitment: bool) { // Route an HTLC and set the signer as unavailable. let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); - route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (_, payment_hash, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); if remote_commitment { let message = "Channel force-closed".to_owned(); @@ -1086,16 +1086,14 @@ fn do_test_async_holder_signatures(anchors: bool, remote_commitment: bool) { nodes[0].disable_channel_signer_op(&node_b_id, &chan_id, sign_htlc_op); mine_transaction(&nodes[0], &commitment_tx); - check_added_monitors(&nodes[0], 1); check_closed_broadcast(&nodes[0], 1, true); - check_closed_event( - &nodes[0], - 1, - ClosureReason::CommitmentTxConfirmed, - false, - &[node_b_id], - 100_000, - ); + check_added_monitors(&nodes[0], 1); + let closure_reason = if remote_commitment { + ClosureReason::CommitmentTxConfirmed + } else { + ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) } + }; + check_closed_event(&nodes[0], 1, closure_reason, false, &[node_b_id], 100_000); // If the counterparty broadcast its latest commitment, we need to mine enough blocks for the // HTLC timeout. diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 1302932a8a9..4aa587ab111 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -3895,6 +3895,11 @@ fn do_test_durable_preimages_on_closed_channel( persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); } } + if !close_chans_before_reload { + check_closed_broadcast(&nodes[1], 1, true); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); + } nodes[1].node.timer_tick_occurred(); check_added_monitors(&nodes[1], mons_added); @@ -3907,12 +3912,6 @@ fn do_test_durable_preimages_on_closed_channel( .unwrap(); check_spends!(bs_preimage_tx, as_closing_tx[0]); - if !close_chans_before_reload { - check_closed_broadcast(&nodes[1], 1, true); - let reason = ClosureReason::CommitmentTxConfirmed; - check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); - } - mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]); check_closed_broadcast(&nodes[0], 1, true); expect_payment_sent(&nodes[0], payment_preimage, None, true, true); @@ -4048,7 +4047,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { let mut events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), if close_during_reload { 2 } else { 1 }); expect_payment_forwarded( - events.pop().unwrap(), + events.remove(0), &nodes[1], &nodes[0], &nodes[2], @@ -4491,9 +4490,9 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { assert_eq!(as_commit_tx.len(), 1); mine_transaction(&nodes[1], &as_commit_tx[0]); + check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 1000000); - check_closed_broadcast!(nodes[1], true); // Now that B has a pending forwarded payment across it with the inbound edge on-chain, claim // the payment on C and give B the preimage for it. @@ -4567,9 +4566,9 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { assert_eq!(as_commit_tx.len(), 1); mine_transaction(&nodes[1], &as_commit_tx[0]); + check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 1000000); - check_closed_broadcast!(nodes[1], true); // Now that B has a pending payment with the inbound HTLC on a closed channel, claim the // payment on disk, but don't let the `ChannelMonitorUpdate` complete. This should prevent the diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 56d38d5545c..c72559c366b 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -5775,35 +5775,6 @@ where Ok(false) } - #[rustfmt::skip] - fn check_for_funding_tx_spent( - &mut self, funding: &FundingScope, tx: &Transaction, logger: &L, - ) -> Result<(), ClosureReason> - where - L::Target: Logger, - { - let funding_txo = match funding.get_funding_txo() { - Some(funding_txo) => funding_txo, - None => { - debug_assert!(false); - return Ok(()); - }, - }; - - for input in tx.input.iter() { - if input.previous_output == funding_txo.into_bitcoin_outpoint() { - log_info!( - logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", - tx.compute_txid(), input.previous_output.txid, input.previous_output.vout, - &self.channel_id(), - ); - return Err(ClosureReason::CommitmentTxConfirmed); - } - } - - Ok(()) - } - /// Returns SCIDs that have been associated with the channel's funding transactions. pub fn historical_scids(&self) -> &[u64] { &self.historical_scids[..] @@ -10005,12 +9976,6 @@ where } if let Some(channel_ready) = self.check_get_channel_ready(height, logger) { - for &(idx, tx) in txdata.iter() { - if idx > index_in_block { - self.context.check_for_funding_tx_spent(&self.funding, tx, logger)?; - } - } - log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id); let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger); return Ok((Some(FundingConfirmedMessage::Establishment(channel_ready)), announcement_sigs)); @@ -10051,12 +10016,6 @@ where let funding = self.pending_funding.get(confirmed_funding_index).unwrap(); if let Some(splice_locked) = pending_splice.check_get_splice_locked(&self.context, funding, height) { - for &(idx, tx) in txdata.iter() { - if idx > index_in_block { - self.context.check_for_funding_tx_spent(funding, tx, logger)?; - } - } - log_info!( logger, "Sending splice_locked txid {} to our peer for channel {}", @@ -10076,13 +10035,6 @@ where return Ok((Some(FundingConfirmedMessage::Splice(splice_locked, funding_txo, monitor_update)), announcement_sigs)); } } - - self.context.check_for_funding_tx_spent(&self.funding, tx, logger)?; - #[cfg(splicing)] - for funding in self.pending_funding.iter() { - self.context.check_for_funding_tx_spent(funding, tx, logger)?; - } - } Ok((None, None)) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e4614351ee9..cc2110a9785 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -11316,6 +11316,22 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } }, + MonitorEvent::CommitmentTxConfirmed(_) => { + let per_peer_state = self.per_peer_state.read().unwrap(); + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + if let hash_map::Entry::Occupied(chan_entry) = + peer_state.channel_by_id.entry(channel_id) + { + let reason = ClosureReason::CommitmentTxConfirmed; + let err = ChannelError::Close((reason.to_string(), reason)); + let mut chan = chan_entry.remove(); + let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan); + failed_channels.push((Err(e), counterparty_node_id)); + } + } + }, MonitorEvent::Completed { channel_id, monitor_update_id, .. } => { self.channel_monitor_updated( &channel_id, diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 8c09a960f48..a0258d32664 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -713,6 +713,39 @@ pub fn test_duplicate_htlc_different_direction_onchain() { assert_eq!(has_both_htlcs, 2); mine_transaction(&nodes[0], &remote_txn[0]); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 3); + for e in events { + match e { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + MessageSendEvent::HandleError { + node_id, + action: msgs::ErrorAction::SendErrorMessage { ref msg }, + } => { + assert_eq!(node_id, node_b_id); + assert_eq!(msg.data, "Channel closed because commitment or closing transaction was confirmed on chain."); + }, + MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + .. + }, + .. + } => { + assert!(update_add_htlcs.is_empty()); + assert!(update_fail_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_malformed_htlcs.is_empty()); + assert_eq!(node_b_id, *node_id); + }, + _ => panic!("Unexpected event"), + } + } check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires @@ -752,40 +785,6 @@ pub fn test_duplicate_htlc_different_direction_onchain() { remote_txn[0].output[timeout_tx.input[0].previous_output.vout as usize].value.to_sat(), 900 ); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 3); - for e in events { - match e { - MessageSendEvent::BroadcastChannelUpdate { .. } => {}, - MessageSendEvent::HandleError { - node_id, - action: msgs::ErrorAction::SendErrorMessage { ref msg }, - } => { - assert_eq!(node_id, node_b_id); - assert_eq!(msg.data, "Channel closed because commitment or closing transaction was confirmed on chain."); - }, - MessageSendEvent::UpdateHTLCs { - ref node_id, - updates: - msgs::CommitmentUpdate { - ref update_add_htlcs, - ref update_fulfill_htlcs, - ref update_fail_htlcs, - ref update_fail_malformed_htlcs, - .. - }, - .. - } => { - assert!(update_add_htlcs.is_empty()); - assert!(update_fail_htlcs.is_empty()); - assert_eq!(update_fulfill_htlcs.len(), 1); - assert!(update_fail_malformed_htlcs.is_empty()); - assert_eq!(node_b_id, *node_id); - }, - _ => panic!("Unexpected event"), - } - } } #[xtest(feature = "_externalize_tests")] @@ -1003,10 +1002,10 @@ pub fn channel_monitor_network_test() { } mine_transaction(&nodes[0], &node_txn[0]); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE); } - check_closed_broadcast!(nodes[0], true); assert_eq!(nodes[0].node.list_channels().len(), 0); assert_eq!(nodes[1].node.list_channels().len(), 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); @@ -1032,10 +1031,10 @@ pub fn channel_monitor_network_test() { ); test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT); mine_transaction(&nodes[2], &node_txn[0]); + check_closed_broadcast(&nodes[2], 1, true); check_added_monitors(&nodes[2], 1); test_txn_broadcast(&nodes[2], &chan_2, Some(node_txn[0].clone()), HTLCType::NONE); } - check_closed_broadcast!(nodes[2], true); assert_eq!(nodes[1].node.list_channels().len(), 0); assert_eq!(nodes[2].node.list_channels().len(), 1); let node_b_reason = @@ -1086,10 +1085,10 @@ pub fn channel_monitor_network_test() { // Claim the payment on nodes[3], giving it knowledge of the preimage claim_funds!(nodes[3], nodes[2], payment_preimage_1, payment_hash_1); mine_transaction(&nodes[3], &node_txn[0]); + check_closed_broadcast(&nodes[3], 1, true); check_added_monitors(&nodes[3], 1); check_preimage_claim(&nodes[3], &node_txn); } - check_closed_broadcast!(nodes[3], true); assert_eq!(nodes[2].node.list_channels().len(), 0); assert_eq!(nodes[3].node.list_channels().len(), 1); let node_c_reason = @@ -1241,8 +1240,8 @@ pub fn test_justice_tx_htlc_timeout() { assert_ne!(node_txn[0].input[0].previous_output, node_txn[0].input[1].previous_output); node_txn.clear(); } - check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + check_added_monitors(&nodes[1], 1); test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE); mine_transaction(&nodes[0], &revoked_local_txn[0]); @@ -1255,8 +1254,8 @@ pub fn test_justice_tx_htlc_timeout() { Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT, ); - check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + check_added_monitors(&nodes[0], 1); // Broadcast revoked HTLC-timeout on node 1 mine_transaction(&nodes[1], &node_txn[1]); test_revoked_htlc_claim_txn_broadcast( @@ -1317,10 +1316,12 @@ pub fn test_justice_tx_htlc_success() { check_spends!(node_txn[0], revoked_local_txn[0]); node_txn.swap_remove(0); } + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE); mine_transaction(&nodes[1], &revoked_local_txn[0]); + check_closed_broadcast(&nodes[1], 1, true); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let node_txn = test_txn_broadcast( &nodes[1], @@ -1337,7 +1338,6 @@ pub fn test_justice_tx_htlc_success() { revoked_local_txn[0].clone(), ); } - get_announce_close_broadcast_events(&nodes, 0, 1); assert_eq!(nodes[0].node.list_channels().len(), 0); assert_eq!(nodes[1].node.list_channels().len(), 0); } @@ -1365,8 +1365,8 @@ pub fn revoked_output_claim() { // Inform nodes[1] that nodes[0] broadcast a stale tx mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + check_added_monitors(&nodes[1], 1); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output @@ -1427,10 +1427,10 @@ fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: mine_transactions(&nodes[1], &[revoked_commitment_tx, &justice_tx]); mine_transactions(&nodes[0], &[revoked_commitment_tx, &justice_tx]); + get_announce_close_broadcast_events(&nodes, 1, 0); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100_000); - get_announce_close_broadcast_events(&nodes, 1, 0); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::CommitmentTxConfirmed; @@ -1498,9 +1498,11 @@ pub fn claim_htlc_outputs() { { mine_transaction(&nodes[0], &revoked_local_txn[0]); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); mine_transaction(&nodes[1], &revoked_local_txn[0]); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); @@ -1533,7 +1535,6 @@ pub fn claim_htlc_outputs() { connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[1], payment_hash_2, false); } - get_announce_close_broadcast_events(&nodes, 0, 1); assert_eq!(nodes[0].node.list_channels().len(), 0); assert_eq!(nodes[1].node.list_channels().len(), 0); } @@ -1879,20 +1880,10 @@ pub fn test_htlc_on_chain_success() { let txn = vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()]; connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, txn)); connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires - { - let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 1); - assert_eq!(added_monitors[0].0, chan_2.2); - added_monitors.clear(); - } let forwarded_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(forwarded_events.len(), 3); - match forwarded_events[0] { - Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}, - _ => panic!("Unexpected event"), - } let chan_id = Some(chan_1.2); - match forwarded_events[1] { + match forwarded_events[0] { Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, @@ -1909,7 +1900,7 @@ pub fn test_htlc_on_chain_success() { }, _ => panic!(), } - match forwarded_events[2] { + match forwarded_events[1] { Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, @@ -1926,12 +1917,17 @@ pub fn test_htlc_on_chain_success() { }, _ => panic!(), } + match forwarded_events[2] { + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}, + _ => panic!("Unexpected event"), + } let mut events = nodes[1].node.get_and_clear_pending_msg_events(); { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 2); - assert_eq!(added_monitors[0].0, chan_1.2); + assert_eq!(added_monitors.len(), 3); + assert_eq!(added_monitors[0].0, chan_2.2); assert_eq!(added_monitors[1].0, chan_1.2); + assert_eq!(added_monitors[2].0, chan_1.2); added_monitors.clear(); } assert_eq!(events.len(), 3); @@ -2433,7 +2429,6 @@ fn do_test_commitment_revoked_fail_backward_exhaustive( assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_added_monitors(&nodes[1], 1); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let events = nodes[1].node.get_and_clear_pending_events(); @@ -2452,7 +2447,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive( ))); nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors(&nodes[1], 1); + check_added_monitors(&nodes[1], 2); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 }); @@ -3973,7 +3968,6 @@ pub fn test_static_spendable_outputs_preimage_tx() { expect_payment_claimed!(nodes[1], payment_hash, 3_000_000); check_added_monitors(&nodes[1], 1); mine_transaction(&nodes[1], &commitment_tx[0]); - check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); match events[0] { MessageSendEvent::UpdateHTLCs { .. } => {}, @@ -3983,6 +3977,7 @@ pub fn test_static_spendable_outputs_preimage_tx() { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, _ => panic!("Unexepected event"), } + check_added_monitors(&nodes[1], 1); // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: preimage tx @@ -4022,12 +4017,12 @@ pub fn test_static_spendable_outputs_timeout_tx() { // Settle A's commitment tx on B' chain mine_transaction(&nodes[1], &commitment_tx[0]); - check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); match events[1] { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, _ => panic!("Unexpected event"), } + check_added_monitors(&nodes[1], 1); connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires // Check B's monitor was able to send back output descriptor event for timeout tx on A's commitment tx @@ -4326,14 +4321,9 @@ pub fn test_onchain_to_onchain_claim() { // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor let txn = vec![commitment_tx[0].clone(), c_txn[0].clone()]; connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, txn)); - check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}, - _ => panic!("Unexpected event"), - } - match events[1] { Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, @@ -4350,7 +4340,11 @@ pub fn test_onchain_to_onchain_claim() { }, _ => panic!("Unexpected event"), } - check_added_monitors(&nodes[1], 1); + match events[1] { + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}, + _ => panic!("Unexpected event"), + } + check_added_monitors(&nodes[1], 2); let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 3); let nodes_2_event = remove_first_msg_event_to_node(&node_c_id, &mut msg_events); @@ -4518,9 +4512,9 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { // generate (note that the ChannelMonitor doesn't differentiate between HTLCs once it has the // preimage). mine_transaction(&nodes[2], &commitment_txn[0]); + check_closed_broadcast(&nodes[2], 1, true); check_added_monitors(&nodes[2], 1); check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); - check_closed_broadcast(&nodes[2], 1, true); let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(htlc_success_txn.len(), 2); // ChannelMonitor: HTLC-Success txn (*2 due to 2-HTLC outputs) @@ -4617,8 +4611,6 @@ pub fn test_dynamic_spendable_outputs_local_htlc_success_tx() { check_added_monitors(&nodes[1], 1); mine_transaction(&nodes[1], &local_txn[0]); - check_added_monitors(&nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let events = nodes[1].node.get_and_clear_pending_msg_events(); match events[0] { MessageSendEvent::UpdateHTLCs { .. } => {}, @@ -4628,6 +4620,8 @@ pub fn test_dynamic_spendable_outputs_local_htlc_success_tx() { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, _ => panic!("Unexepected event"), } + check_added_monitors(&nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let node_tx = { let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 1); @@ -6739,6 +6733,7 @@ pub fn test_bump_penalty_txn_on_revoked_commitment() { // Actually revoke tx by claiming a HTLC claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); connect_block(&nodes[1], &create_dummy_block(header_114, 42, vec![revoked_txn[0].clone()])); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); macro_rules! check_broadcasted_txn { @@ -6874,7 +6869,7 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { &nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()]), ); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 1000000); connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above) @@ -6899,6 +6894,8 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { let hash_128 = connect_blocks(&nodes[0], 40); let block_11 = create_dummy_block(hash_128, 42, vec![revoked_local_txn[0].clone()]); connect_block(&nodes[0], &block_11); + check_closed_broadcast(&nodes[0], 1, true); + check_added_monitors(&nodes[0], 1); let block_129 = create_dummy_block( block_11.block_hash(), 42, @@ -7005,8 +7002,6 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { assert_eq!(node_txn.len(), 0); node_txn.clear(); } - check_closed_broadcast!(nodes[0], true); - check_added_monitors(&nodes[0], 1); } #[xtest(feature = "_externalize_tests")] @@ -7042,7 +7037,9 @@ pub fn test_bump_penalty_txn_on_remote_commitment() { // Claim a HTLC without revocation (provide B monitor with preimage) nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], payment_hash, htlc_value_a_msats); + let _ = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); mine_transaction(&nodes[1], &remote_txn[0]); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 2); connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires @@ -11558,12 +11555,33 @@ pub fn test_funding_and_commitment_tx_confirm_same_block() { mine_transactions(&nodes[0], &[&funding_tx, &commitment_tx]); mine_transactions(&nodes[1], &[&funding_tx, &commitment_tx]); - check_closed_broadcast(&nodes[0], 1, true); + let check_msg_events = |node: &Node| { + let mut msg_events = node.node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 3, "{msg_events:?}"); + if let MessageSendEvent::SendChannelReady { .. } = msg_events.remove(0) { + } else { + panic!(); + } + if let MessageSendEvent::HandleError { + action: msgs::ErrorAction::SendErrorMessage { .. }, + node_id: _, + } = msg_events.remove(0) + { + } else { + panic!(); + } + if let MessageSendEvent::BroadcastChannelUpdate { ref msg } = msg_events.remove(0) { + assert_eq!(msg.contents.channel_flags & 2, 2); + } else { + panic!(); + } + }; + check_msg_events(&nodes[0]); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 1_000_000); - check_closed_broadcast(&nodes[1], 1, true); + check_msg_events(&nodes[1]); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 1_000_000); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 11dd13317e0..417f892a079 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -82,8 +82,8 @@ fn chanmon_fail_from_stale_commitment() { // Don't bother delivering the new HTLC add/commits, instead confirming the pre-HTLC commitment // transaction for nodes[1]. mine_transaction(&nodes[1], &bs_txn[0]); - check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); + check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -138,9 +138,9 @@ fn revoked_output_htlc_resolution_timing() { // Confirm the revoked commitment transaction, closing the channel. mine_transaction(&nodes[1], &revoked_local_txn[0]); + check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); - check_closed_broadcast!(nodes[1], true); // Two justice transactions will be broadcast, one on the unpinnable, revoked to_self output, // and one on the pinnable revoked HTLC output. @@ -658,14 +658,14 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { assert_eq!(remote_txn[0].output[b_broadcast_txn[0].input[0].previous_output.vout as usize].value.to_sat(), 3_000); assert_eq!(remote_txn[0].output[b_broadcast_txn[1].input[0].previous_output.vout as usize].value.to_sat(), 4_000); - assert!(nodes[0].node.list_channels().is_empty()); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000); - assert!(nodes[1].node.list_channels().is_empty()); + assert!(nodes[0].node.list_channels().is_empty()); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); + assert!(nodes[1].node.list_channels().is_empty()); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); @@ -944,8 +944,8 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { // Get nodes[1]'s HTLC claim tx for the second HTLC mine_transaction(&nodes[1], &commitment_tx); - check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); + check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); let bs_htlc_claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_htlc_claim_txn.len(), 1); @@ -1162,16 +1162,16 @@ fn test_no_preimage_inbound_htlc_balances() { mine_transaction(&nodes[0], &as_txn[0]); nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); - check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], true); + check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000); assert_eq!(as_pre_spend_claims, sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transaction(&nodes[1], &as_txn[0]); - check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); + check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); let node_b_commitment_claimable = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1; @@ -2743,8 +2743,8 @@ fn test_anchors_aggregated_revoked_htlc_tx() { for node in &nodes { mine_transactions(node, &[&revoked_commitment_txs[0], &anchor_txs[0], &revoked_commitment_txs[1], &anchor_txs[1]]); } - check_added_monitors!(&nodes[0], 2); check_closed_broadcast(&nodes[0], 2, true); + check_added_monitors!(&nodes[0], 2); check_closed_event!(&nodes[0], 2, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id(); 2], 1000000); // Alice should detect the confirmed revoked commitments, and attempt to claim all of the @@ -2989,6 +2989,7 @@ fn do_test_anchors_monitor_fixes_counterparty_payment_script_on_reload(confirm_c // with the incorrect P2WPKH script but reading it with the correct P2WSH script. *nodes[1].chain_monitor.expect_monitor_round_trip_fail.lock().unwrap() = Some(chan_id); let commitment_tx_conf_height = block_from_scid(mine_transaction(&nodes[1], &commitment_tx)); + check_closed_broadcast(&nodes[1], 1, true); let serialized_monitor = get_monitor!(nodes[1], chan_id).encode(); reload_node!(nodes[1], user_config, &nodes[1].node.encode(), &[&serialized_monitor], persister, chain_monitor, node_deserialized); commitment_tx_conf_height @@ -2996,8 +2997,8 @@ fn do_test_anchors_monitor_fixes_counterparty_payment_script_on_reload(confirm_c let serialized_monitor = get_monitor!(nodes[1], chan_id).encode(); reload_node!(nodes[1], user_config, &nodes[1].node.encode(), &[&serialized_monitor], persister, chain_monitor, node_deserialized); let commitment_tx_conf_height = block_from_scid(mine_transaction(&nodes[1], &commitment_tx)); - check_added_monitors(&nodes[1], 1); check_closed_broadcast(&nodes[1], 1, true); + check_added_monitors(&nodes[1], 1); commitment_tx_conf_height }; check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, @@ -3071,13 +3072,14 @@ fn do_test_monitor_claims_with_random_signatures(anchors: bool, confirm_counterp }; mine_transaction(closing_node, &commitment_tx); - check_added_monitors!(closing_node, 1); check_closed_broadcast!(closing_node, true); - check_closed_event!(closing_node, 1, ClosureReason::CommitmentTxConfirmed, [other_node.node.get_our_node_id()], 1_000_000); + check_added_monitors!(closing_node, 1); + let message = "ChannelMonitor-initiated commitment transaction broadcast".to_string(); + check_closed_event!(closing_node, 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }, [other_node.node.get_our_node_id()], 1_000_000); mine_transaction(other_node, &commitment_tx); - check_added_monitors!(other_node, 1); check_closed_broadcast!(other_node, true); + check_added_monitors!(other_node, 1); check_closed_event!(other_node, 1, ClosureReason::CommitmentTxConfirmed, [closing_node.node.get_our_node_id()], 1_000_000); // If we update the best block to the new height before providing the confirmed transactions, diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 4c63bfa4f8e..67c7599a6c0 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -4270,8 +4270,8 @@ fn do_claim_from_closed_chan(fail_payment: bool) { assert_eq!(bs_tx.len(), 1); mine_transaction(&nodes[3], &bs_tx[0]); - check_added_monitors(&nodes[3], 1); check_closed_broadcast(&nodes[3], 1, true); + check_added_monitors(&nodes[3], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event!(&nodes[3], 1, reason, false, [node_b_id], 1000000); diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 93b2a050776..d85c95c0a1d 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -78,8 +78,8 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { // Give node 2 node 1's transactions and get its response (claiming the HTLC instead). connect_block(&nodes[2], &create_dummy_block(nodes[2].best_block_hash(), 42, node_1_commitment_txn.clone())); - check_added_monitors!(nodes[2], 1); check_closed_broadcast!(nodes[2], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate) + check_added_monitors!(nodes[2], 1); check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_2_commitment_txn.len(), 1); // ChannelMonitor: 1 offered HTLC-Claim @@ -112,8 +112,8 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { // ...but return node 2's commitment tx (and claim) in case claim is set and we're preparing to reorg vec![node_2_commitment_txn.pop().unwrap()] }; - check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate) + check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); // Connect ANTI_REORG_DELAY - 2 blocks, giving us a confirmation count of ANTI_REORG_DELAY - 1. connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2); @@ -210,9 +210,9 @@ fn test_counterparty_revoked_reorg() { // Now mine A's old commitment transaction, which should close the channel, but take no action // on any of the HTLCs, at least until we get six confirmations (which we won't get). mine_transaction(&nodes[1], &revoked_local_txn[0]); + check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); - check_closed_broadcast!(nodes[1], true); // Connect up to one block before the revoked transaction would be considered final, then do a // reorg that disconnects the full chain and goes up to the height at which the revoked @@ -569,12 +569,12 @@ fn do_test_to_remote_after_local_detection(style: ConnectStyle) { mine_transaction(&nodes[0], &remote_txn_a[0]); mine_transaction(&nodes[1], &remote_txn_a[0]); - assert!(nodes[0].node.list_channels().is_empty()); check_closed_broadcast!(nodes[0], true); + assert!(nodes[0].node.list_channels().is_empty()); check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000); - assert!(nodes[1].node.list_channels().is_empty()); check_closed_broadcast!(nodes[1], true); + assert!(nodes[1].node.list_channels().is_empty()); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index dc89981c0e4..054842abd9b 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -370,11 +370,11 @@ fn expect_channel_shutdown_state_with_force_closure() { assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], chan_1.3); mine_transaction(&nodes[0], &node_txn[0]); + check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[1].node.list_channels().is_empty()); - check_closed_broadcast!(nodes[0], true); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); let reason_b = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000);