77
88use lightning:: ln:: functional_test_utils:: {
99 connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, create_dummy_block,
10- create_network, create_node_cfgs, create_node_chanmgrs, send_payment,
10+ create_network, create_node_cfgs, create_node_chanmgrs, send_payment, check_closed_event ,
1111} ;
12- use lightning:: util:: persist:: { read_channel_monitors , KVStore , KVSTORE_NAMESPACE_KEY_MAX_LEN } ;
12+ use lightning:: util:: persist:: { MonitorUpdatingPersister , MonitorName , KVStore , CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE , KVSTORE_NAMESPACE_KEY_MAX_LEN } ;
1313
1414use lightning:: events:: ClosureReason ;
1515use lightning:: util:: test_utils;
16- use lightning:: { check_added_monitors, check_closed_broadcast, check_closed_event } ;
16+ use lightning:: { check_added_monitors, check_closed_broadcast} ;
1717
1818use rand:: distributions:: Alphanumeric ;
1919use rand:: { thread_rng, Rng } ;
2020
2121use std:: panic:: RefUnwindSafe ;
2222use std:: path:: PathBuf ;
2323
24+ const EXPECTED_UPDATES_PER_PAYMENT : u64 = 5 ;
25+
2426pub ( crate ) fn random_storage_path ( ) -> PathBuf {
2527 let mut temp_path = std:: env:: temp_dir ( ) ;
2628 let mut rng = thread_rng ( ) ;
@@ -81,54 +83,104 @@ pub(crate) fn do_read_write_remove_list_persist<K: KVStore + RefUnwindSafe>(kv_s
8183// Integration-test the given KVStore implementation. Test relaying a few payments and check that
8284// the persisted data is updated the appropriate number of times.
8385pub ( crate ) fn do_test_store < K : KVStore > ( store_0 : & K , store_1 : & K ) {
86+ // This value is used later to limit how many iterations we perform.
87+ let persister_0_max_pending_updates = 7 ;
88+ // Intentionally set this to a smaller value to test a different alignment.
89+ let persister_1_max_pending_updates = 3 ;
90+
8491 let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
92+
93+ let persister_0 = MonitorUpdatingPersister :: new (
94+ store_0,
95+ & chanmon_cfgs[ 0 ] . logger ,
96+ persister_0_max_pending_updates,
97+ & chanmon_cfgs[ 0 ] . keys_manager ,
98+ & chanmon_cfgs[ 0 ] . keys_manager ,
99+ & chanmon_cfgs[ 0 ] . tx_broadcaster ,
100+ & chanmon_cfgs[ 0 ] . fee_estimator ,
101+ ) ;
102+
103+ let persister_1 = MonitorUpdatingPersister :: new (
104+ store_1,
105+ & chanmon_cfgs[ 1 ] . logger ,
106+ persister_1_max_pending_updates,
107+ & chanmon_cfgs[ 1 ] . keys_manager ,
108+ & chanmon_cfgs[ 1 ] . keys_manager ,
109+ & chanmon_cfgs[ 1 ] . tx_broadcaster ,
110+ & chanmon_cfgs[ 1 ] . fee_estimator ,
111+ ) ;
112+
85113 let mut node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs) ;
114+
86115 let chain_mon_0 = test_utils:: TestChainMonitor :: new (
87116 Some ( & chanmon_cfgs[ 0 ] . chain_source ) ,
88117 & chanmon_cfgs[ 0 ] . tx_broadcaster ,
89118 & chanmon_cfgs[ 0 ] . logger ,
90119 & chanmon_cfgs[ 0 ] . fee_estimator ,
91- store_0 ,
92- node_cfgs [ 0 ] . keys_manager ,
120+ & persister_0 ,
121+ & chanmon_cfgs [ 0 ] . keys_manager ,
93122 ) ;
123+
94124 let chain_mon_1 = test_utils:: TestChainMonitor :: new (
95125 Some ( & chanmon_cfgs[ 1 ] . chain_source ) ,
96126 & chanmon_cfgs[ 1 ] . tx_broadcaster ,
97127 & chanmon_cfgs[ 1 ] . logger ,
98128 & chanmon_cfgs[ 1 ] . fee_estimator ,
99- store_1 ,
100- node_cfgs [ 1 ] . keys_manager ,
129+ & persister_1 ,
130+ & chanmon_cfgs [ 1 ] . keys_manager ,
101131 ) ;
132+
102133 node_cfgs[ 0 ] . chain_monitor = chain_mon_0;
103134 node_cfgs[ 1 ] . chain_monitor = chain_mon_1;
104135 let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs, & [ None , None ] ) ;
105136 let nodes = create_network ( 2 , & node_cfgs, & node_chanmgrs) ;
106137
107138 // Check that the persisted channel data is empty before any channels are
108139 // open.
109- let mut persisted_chan_data_0 =
110- read_channel_monitors ( store_0, nodes[ 0 ] . keys_manager , nodes[ 0 ] . keys_manager ) . unwrap ( ) ;
140+ let mut persisted_chan_data_0 = persister_0. read_all_channel_monitors_with_updates ( ) . unwrap ( ) ;
111141 assert_eq ! ( persisted_chan_data_0. len( ) , 0 ) ;
112- let mut persisted_chan_data_1 =
113- read_channel_monitors ( store_1, nodes[ 1 ] . keys_manager , nodes[ 1 ] . keys_manager ) . unwrap ( ) ;
142+ let mut persisted_chan_data_1 = persister_1. read_all_channel_monitors_with_updates ( ) . unwrap ( ) ;
114143 assert_eq ! ( persisted_chan_data_1. len( ) , 0 ) ;
115144
116145 // Helper to make sure the channel is on the expected update ID.
117146 macro_rules! check_persisted_data {
118147 ( $expected_update_id: expr) => {
119- persisted_chan_data_0 =
120- read_channel_monitors( store_0, nodes[ 0 ] . keys_manager, nodes[ 0 ] . keys_manager)
121- . unwrap( ) ;
148+ persisted_chan_data_0 = persister_0. read_all_channel_monitors_with_updates( ) . unwrap( ) ;
149+ // check that we stored only one monitor
122150 assert_eq!( persisted_chan_data_0. len( ) , 1 ) ;
123151 for ( _, mon) in persisted_chan_data_0. iter( ) {
124152 assert_eq!( mon. get_latest_update_id( ) , $expected_update_id) ;
153+
154+ let monitor_name = MonitorName :: from( mon. get_funding_txo( ) . 0 ) ;
155+ assert_eq!(
156+ store_0
157+ . list(
158+ CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE ,
159+ monitor_name. as_str( )
160+ )
161+ . unwrap( )
162+ . len( ) as u64 ,
163+ mon. get_latest_update_id( ) % persister_0_max_pending_updates,
164+ "Wrong number of updates stored in persister 0" ,
165+ ) ;
125166 }
126- persisted_chan_data_1 =
127- read_channel_monitors( store_1, nodes[ 1 ] . keys_manager, nodes[ 1 ] . keys_manager)
128- . unwrap( ) ;
167+ persisted_chan_data_1 = persister_1. read_all_channel_monitors_with_updates( ) . unwrap( ) ;
129168 assert_eq!( persisted_chan_data_1. len( ) , 1 ) ;
130169 for ( _, mon) in persisted_chan_data_1. iter( ) {
131170 assert_eq!( mon. get_latest_update_id( ) , $expected_update_id) ;
171+
172+ let monitor_name = MonitorName :: from( mon. get_funding_txo( ) . 0 ) ;
173+ assert_eq!(
174+ store_1
175+ . list(
176+ CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE ,
177+ monitor_name. as_str( )
178+ )
179+ . unwrap( )
180+ . len( ) as u64 ,
181+ mon. get_latest_update_id( ) % persister_1_max_pending_updates,
182+ "Wrong number of updates stored in persister 1" ,
183+ ) ;
132184 }
133185 } ;
134186 }
@@ -138,52 +190,52 @@ pub(crate) fn do_test_store<K: KVStore>(store_0: &K, store_1: &K) {
138190 check_persisted_data ! ( 0 ) ;
139191
140192 // Send a few payments and make sure the monitors are updated to the latest.
141- send_payment ( & nodes[ 0 ] , & vec ! [ & nodes[ 1 ] ] [ ..] , 8000000 ) ;
142- check_persisted_data ! ( 5 ) ;
143- send_payment ( & nodes[ 1 ] , & vec ! [ & nodes[ 0 ] ] [ ..] , 4000000 ) ;
144- check_persisted_data ! ( 10 ) ;
193+ send_payment ( & nodes[ 0 ] , & vec ! [ & nodes[ 1 ] ] [ ..] , 8_000_000 ) ;
194+ check_persisted_data ! ( EXPECTED_UPDATES_PER_PAYMENT ) ;
195+ send_payment ( & nodes[ 1 ] , & vec ! [ & nodes[ 0 ] ] [ ..] , 4_000_000 ) ;
196+ check_persisted_data ! ( 2 * EXPECTED_UPDATES_PER_PAYMENT ) ;
197+
198+ // Send a few more payments to try all the alignments of max pending updates with
199+ // updates for a payment sent and received.
200+ let mut sender = 0 ;
201+ for i in 3 ..=persister_0_max_pending_updates * 2 {
202+ let receiver;
203+ if sender == 0 {
204+ sender = 1 ;
205+ receiver = 0 ;
206+ } else {
207+ sender = 0 ;
208+ receiver = 1 ;
209+ }
210+ send_payment ( & nodes[ sender] , & vec ! [ & nodes[ receiver] ] [ ..] , 21_000 ) ;
211+ check_persisted_data ! ( i * EXPECTED_UPDATES_PER_PAYMENT ) ;
212+ }
145213
146214 // Force close because cooperative close doesn't result in any persisted
147215 // updates.
148- nodes[ 0 ]
149- . node
150- . force_close_broadcasting_latest_txn (
151- & nodes[ 0 ] . node . list_channels ( ) [ 0 ] . channel_id ,
152- & nodes[ 1 ] . node . get_our_node_id ( ) ,
153- "whoops" . to_string ( ) ,
154- )
155- . unwrap ( ) ;
156- check_closed_event ! (
157- nodes[ 0 ] ,
158- 1 ,
159- ClosureReason :: HolderForceClosed { broadcasted_latest_txn: Some ( true ) } ,
160- [ nodes[ 1 ] . node. get_our_node_id( ) ] ,
161- 100000
162- ) ;
216+
217+ let node_id_1 = nodes[ 1 ] . node . get_our_node_id ( ) ;
218+ let chan_id = nodes[ 0 ] . node . list_channels ( ) [ 0 ] . channel_id ;
219+ let err_msg = "Channel force-closed" . to_string ( ) ;
220+ nodes[ 0 ] . node . force_close_broadcasting_latest_txn ( & chan_id, & node_id_1, err_msg) . unwrap ( ) ;
221+
222+ let reason = ClosureReason :: HolderForceClosed { broadcasted_latest_txn : Some ( true ) } ;
223+ check_closed_event ( & nodes[ 0 ] , 1 , reason, false , & [ node_id_1] , 100000 ) ;
163224 check_closed_broadcast ! ( nodes[ 0 ] , true ) ;
164225 check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
165226
166- let node_txn = nodes[ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) ;
227+ let node_txn = nodes[ 0 ] . tx_broadcaster . txn_broadcast ( ) ;
167228 assert_eq ! ( node_txn. len( ) , 1 ) ;
229+ let txn = vec ! [ node_txn[ 0 ] . clone( ) , node_txn[ 0 ] . clone( ) ] ;
230+ let dummy_block = create_dummy_block ( nodes[ 0 ] . best_block_hash ( ) , 42 , txn) ;
231+ connect_block ( & nodes[ 1 ] , & dummy_block) ;
168232
169- connect_block (
170- & nodes[ 1 ] ,
171- & create_dummy_block (
172- nodes[ 0 ] . best_block_hash ( ) ,
173- 42 ,
174- vec ! [ node_txn[ 0 ] . clone( ) , node_txn[ 0 ] . clone( ) ] ,
175- ) ,
176- ) ;
177233 check_closed_broadcast ! ( nodes[ 1 ] , true ) ;
178- check_closed_event ! (
179- nodes[ 1 ] ,
180- 1 ,
181- ClosureReason :: CommitmentTxConfirmed ,
182- [ nodes[ 0 ] . node. get_our_node_id( ) ] ,
183- 100000
184- ) ;
234+ let reason = ClosureReason :: CommitmentTxConfirmed ;
235+ let node_id_0 = nodes[ 0 ] . node . get_our_node_id ( ) ;
236+ check_closed_event ( & nodes[ 1 ] , 1 , reason, false , & [ node_id_0] , 100000 ) ;
185237 check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
186238
187239 // Make sure everything is persisted as expected after close.
188- check_persisted_data ! ( 11 ) ;
240+ check_persisted_data ! ( persister_0_max_pending_updates * 2 * EXPECTED_UPDATES_PER_PAYMENT + 1 ) ;
189241}
0 commit comments