diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index d56670f4d67..3212f748f6c 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -2167,8 +2167,7 @@ fn offer_cache_round_trip_ser() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let payee_node_deserialized; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan_id = - create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0).0.channel_id; + create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); let server = &nodes[0]; let recipient = &nodes[1]; @@ -2188,12 +2187,10 @@ fn offer_cache_round_trip_ser() { // offers. let cached_offers_pre_ser = recipient.node.flow.test_get_async_receive_offers(); let config = test_default_channel_config(); - let serialized_monitor = get_monitor!(recipient, chan_id).encode(); - reload_node!( + reload_node_and_monitors!( nodes[1], config, recipient.node.encode(), - &[&serialized_monitor], persister, chain_monitor, payee_node_deserialized diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 1bc1bfbc2ff..d5e0e1c6ebd 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -3503,11 +3503,7 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); if completion_mode == BlockedUpdateComplMode::AtReload { let node_ser = nodes[1].node.encode(); - let chan_mon_0 = get_monitor!(nodes[1], chan_id_1).encode(); - let chan_mon_1 = get_monitor!(nodes[1], chan_id_2).encode(); - - let mons = &[&chan_mon_0[..], &chan_mon_1[..]]; - reload_node!(nodes[1], &node_ser, mons, persister, new_chain_mon, nodes_1_reload); + reload_node_and_monitors!(nodes[1], &node_ser, persister, new_chain_mon, nodes_1_reload); nodes[0].node.peer_disconnected(node_b_id); nodes[2].node.peer_disconnected(node_b_id); @@ -4036,10 +4032,8 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { // Finally, reload node B and check that after we call `process_pending_events` once we realize // we've completed the A<->B preimage-including monitor update and so can release the B<->C // preimage-removing monitor update. - let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode(); - let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode(); let manager_b = nodes[1].node.encode(); - reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, chain_mon, node_b_reload); + reload_node_and_monitors!(nodes[1], &manager_b, persister, chain_mon, node_b_reload); let msg = "Channel force-closed".to_owned(); if close_during_reload { @@ -4391,10 +4385,13 @@ fn do_test_partial_claim_mon_update_compl_actions(reload_a: bool, reload_b: bool // reload once the HTLCs for the first payment have been removed and the monitors // completed. let node_ser = nodes[3].node.encode(); - let chan_3_monitor_serialized = get_monitor!(nodes[3], chan_3_id).encode(); - let chan_4_monitor_serialized = get_monitor!(nodes[3], chan_4_id).encode(); - let mons = &[&chan_3_monitor_serialized[..], &chan_4_monitor_serialized[..]]; - reload_node!(nodes[3], &node_ser, mons, persister_2, new_chain_mon_2, nodes_3_reload_2); + reload_node_and_monitors!( + nodes[3], + &node_ser, + persister_2, + new_chain_mon_2, + nodes_3_reload_2 + ); check_added_monitors(&nodes[3], 0); nodes[1].node.peer_disconnected(node_d_id); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 1d87eccfe66..10a708f2c99 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -19066,10 +19066,9 @@ mod tests { *chanmgr_fwd_htlcs = forward_htlcs.clone(); core::mem::drop(chanmgr_fwd_htlcs); - reload_node!(nodes[0], nodes[0].node.encode(), &[], persister, chain_monitor, deserialized_chanmgr); + reload_node_and_monitors!(nodes[0], nodes[0].node.encode(), persister, chain_monitor, deserialized_chanmgr); - let mut deserialized_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap(); - for scid in [scid_1, scid_2].iter() { + let mut deserialized_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap(); for scid in [scid_1, scid_2].iter() { let deserialized_htlcs = deserialized_fwd_htlcs.remove(scid).unwrap(); assert_eq!(forward_htlcs.remove(scid).unwrap(), deserialized_htlcs); } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 6bde99bb59b..767f155472f 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1382,6 +1382,38 @@ macro_rules! reload_node { }; } +#[cfg(test)] +macro_rules! reload_node_and_monitors { + ($node: expr, $new_config: expr, $chanman_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { + let monitors_serialized = { + let monitor_map = $node.chain_monitor.persisted_monitors.lock().unwrap(); + monitor_map.values().cloned().collect::>() + }; + let monitors_serialized_ref: Vec<&[u8]> = + monitors_serialized.iter().map(|v| v.as_slice()).collect(); + + reload_node!( + $node, + $new_config, + $chanman_encoded, + &monitors_serialized_ref, + $persister, + $new_chain_monitor, + $new_channelmanager + ); + }; + ($node: expr, $chanman_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { + reload_node_and_monitors!( + $node, + $crate::util::config::UserConfig::default(), + $chanman_encoded, + $persister, + $new_chain_monitor, + $new_channelmanager + ); + }; +} + pub fn create_funding_transaction<'a, 'b, 'c>( node: &Node<'a, 'b, 'c>, expected_counterparty_node_id: &PublicKey, expected_chan_value: u64, expected_user_chan_id: u128, diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index b316381398e..eec4491709a 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -3244,7 +3244,7 @@ fn test_event_replay_causing_monitor_replay() { let node_deserialized; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 1_000_000).0; @@ -3262,9 +3262,7 @@ fn test_event_replay_causing_monitor_replay() { expect_payment_sent(&nodes[0], payment_preimage, None, true, true /* expected post-event monitor update*/); assert!(nodes[0].node.get_and_clear_needs_persistence()); - let serialized_monitor = get_monitor!(nodes[0], chan.2).encode(); - reload_node!(nodes[0], &serialized_channel_manager, &[&serialized_monitor], persister, new_chain_monitor, node_deserialized); - + reload_node_and_monitors!(nodes[0], &serialized_channel_manager, persister, new_chain_monitor, node_deserialized); // Expect the `PaymentSent` to get replayed, this time without the duplicate monitor update expect_payment_sent(&nodes[0], payment_preimage, None, false, false /* expected post-event monitor update*/); } @@ -3363,7 +3361,7 @@ fn test_claim_event_never_handled() { let init_node_ser = nodes[1].node.encode(); - let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes(&nodes, 0, 1); // Send the payment we'll ultimately test the PaymentClaimed event for. let (preimage_a, payment_hash_a, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); @@ -3392,10 +3390,7 @@ fn test_claim_event_never_handled() { // Finally, reload node B with an empty `ChannelManager` and check that we get the // `PaymentClaimed` event. - let chan_0_monitor_serialized = get_monitor!(nodes[1], chan.2).encode(); - let mons = &[&chan_0_monitor_serialized[..]]; - reload_node!(nodes[1], &init_node_ser, mons, persister, new_chain_mon, nodes_1_reload); - + reload_node_and_monitors!(nodes[1], &init_node_ser, persister, new_chain_mon, nodes_1_reload); expect_payment_claimed!(nodes[1], payment_hash_a, 1_000_000); // The reload logic spuriously generates a redundant payment preimage-containing // `ChannelMonitorUpdate`. diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index d2479bbb0e5..25cac0aa789 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -2613,7 +2613,7 @@ fn do_automatic_retries(test: AutoRetry) { let node_b_id = nodes[1].node.get_our_node_id(); let node_c_id = nodes[2].node.get_our_node_id(); - let channel_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; + create_announced_chan_between_nodes(&nodes, 0, 1); let channel_id_2 = create_announced_chan_between_nodes(&nodes, 2, 1).2; // Marshall data to send the payment @@ -2801,8 +2801,7 @@ fn do_automatic_retries(test: AutoRetry) { // Restart the node and ensure that ChannelManager does not use its remaining retry attempt let node_encoded = nodes[0].node.encode(); - let mon_ser = get_monitor!(nodes[0], channel_id_1).encode(); - reload_node!(nodes[0], node_encoded, &[&mon_ser], persister, chain_monitor, node_a_reload); + reload_node_and_monitors!(nodes[0], node_encoded, persister, chain_monitor, node_a_reload); nodes[0].node.process_pending_htlc_forwards(); // Make sure we don't retry again. diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 8c9e552fa04..12fc7b7d6b6 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -93,11 +93,9 @@ fn test_funding_peer_disconnect() { nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &as_reestablish); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 3); - let chan_id; let bs_channel_ready = match events_4[0] { MessageSendEvent::SendChannelReady { ref node_id, ref msg } => { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - chan_id = msg.channel_id; msg.clone() }, _ => panic!("Unexpected event {:?}", events_4[0]), @@ -183,9 +181,7 @@ fn test_funding_peer_disconnect() { // channel_announcement from the cached signatures. nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); - - reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); + reload_node_and_monitors!(nodes[0], &nodes[0].node.encode(), persister, new_chain_monitor, nodes_0_deserialized); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); } @@ -205,10 +201,7 @@ fn test_no_txn_manager_serialize_deserialize() { nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - let chan_0_monitor_serialized = - get_monitor!(nodes[0], ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.compute_txid(), index: 0 })).encode(); - reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); - + reload_node_and_monitors!(nodes[0], nodes[0].node.encode(), persister, new_chain_monitor, nodes_0_deserialized); nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), networks: None, remote_network_address: None }, true).unwrap(); @@ -291,11 +284,9 @@ fn test_manager_serialize_deserialize_events() { nodes.push(node_b); // Start the de/seriailization process mid-channel creation to check that the channel manager will hold onto events that are serialized - let chan_0_monitor_serialized = get_monitor!(nodes[0], bs_funding_signed.channel_id).encode(); - reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); + reload_node_and_monitors!(nodes[0], nodes[0].node.encode(), persister, new_chain_monitor, nodes_0_deserialized); nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - // After deserializing, make sure the funding_transaction is still held by the channel manager let events_4 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_4.len(), 0); @@ -341,15 +332,14 @@ fn test_simple_manager_serialize_deserialize() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes_0_deserialized; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; + create_announced_chan_between_nodes(&nodes, 0, 1); let (our_payment_preimage, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000); let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000); nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); - reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); + reload_node_and_monitors!(nodes[0], nodes[0].node.encode(), persister, new_chain_monitor, nodes_0_deserialized); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); @@ -1112,7 +1102,7 @@ fn removed_payment_no_manager_persistence() { let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; + create_announced_chan_between_nodes(&nodes, 0, 1); let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); @@ -1135,9 +1125,7 @@ fn removed_payment_no_manager_persistence() { _ => panic!("Unexpected event"), } - let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); - let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); - reload_node!(nodes[1], node_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + reload_node_and_monitors!(nodes[1], node_encoded, persister, new_chain_monitor, nodes_1_deserialized); match nodes[1].node.pop_pending_event().unwrap() { Event::ChannelClosed { ref reason, .. } => { @@ -1206,8 +1194,7 @@ fn test_reload_partial_funding_batch() { // Reload the node while a subset of the channels in the funding batch have persisted monitors. let channel_id_1 = ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.compute_txid(), index: 0 }); let node_encoded = nodes[0].node.encode(); - let channel_monitor_1_serialized = get_monitor!(nodes[0], channel_id_1).encode(); - reload_node!(nodes[0], node_encoded, &[&channel_monitor_1_serialized], new_persister, new_chain_monitor, new_channel_manager); + reload_node_and_monitors!(nodes[0], node_encoded, new_persister, new_chain_monitor, new_channel_manager); // Process monitor events. assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); @@ -1283,8 +1270,7 @@ fn test_htlc_localremoved_persistence() { nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - let monitor_encoded = get_monitor!(nodes[1], _chan.3).encode(); - reload_node!(nodes[1], nodes[1].node.encode(), &[&monitor_encoded], persister, chain_monitor, deserialized_chanmgr); + reload_node_and_monitors!(nodes[1], nodes[1].node.encode(), persister, chain_monitor, deserialized_chanmgr); nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), networks: None, remote_network_address: None @@ -1419,4 +1405,3 @@ fn test_peer_storage() { let res = std::panic::catch_unwind(|| drop(nodes)); assert!(res.is_err()); } - diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index aacc38e366a..8e5d2766218 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -486,6 +486,8 @@ pub struct TestChainMonitor<'a> { pub expect_monitor_round_trip_fail: Mutex>, #[cfg(feature = "std")] pub write_blocker: Mutex>>, + /// The latest persisted monitor for each channel. + pub persisted_monitors: Mutex>>, } impl<'a> TestChainMonitor<'a> { pub fn new( @@ -511,6 +513,7 @@ impl<'a> TestChainMonitor<'a> { expect_monitor_round_trip_fail: Mutex::new(None), #[cfg(feature = "std")] write_blocker: Mutex::new(None), + persisted_monitors: Mutex::new(new_hash_map()), } } @@ -564,6 +567,9 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { // monitor to a serialized copy and get he same one back. let mut w = TestVecWriter(Vec::new()); monitor.write(&mut w).unwrap(); + + self.persisted_monitors.lock().unwrap().insert(channel_id, w.0.clone()); + let new_monitor = <(BlockHash, ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager), @@ -620,6 +626,9 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { let monitor = self.chain_monitor.get_monitor(channel_id).unwrap(); w.0.clear(); monitor.write(&mut w).unwrap(); + + self.persisted_monitors.lock().unwrap().insert(channel_id, w.0.clone()); + let new_monitor = <(BlockHash, ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager),