1010//! Tests which test upgrading from previous versions of LDK or downgrading to previous versions of
1111//! LDK.
1212
13+ use lightning_0_1:: events:: ClosureReason as ClosureReason_0_1 ;
1314use lightning_0_1:: get_monitor as get_monitor_0_1;
1415use lightning_0_1:: ln:: functional_test_utils as lightning_0_1_utils;
1516use lightning_0_1:: util:: ser:: Writeable as _;
@@ -28,10 +29,19 @@ use lightning_0_0_125::ln::msgs::ChannelMessageHandler as _;
2829use lightning_0_0_125:: routing:: router as router_0_0_125;
2930use lightning_0_0_125:: util:: ser:: Writeable as _;
3031
32+ use lightning:: chain:: channelmonitor:: ANTI_REORG_DELAY ;
33+ use lightning:: events:: { ClosureReason , Event } ;
3134use lightning:: ln:: functional_test_utils:: * ;
35+ use lightning:: sign:: OutputSpender ;
3236
3337use lightning_types:: payment:: PaymentPreimage ;
3438
39+ use bitcoin:: opcodes;
40+ use bitcoin:: script:: Builder ;
41+ use bitcoin:: secp256k1:: Secp256k1 ;
42+
43+ use std:: sync:: Arc ;
44+
3545#[ test]
3646fn simple_upgrade ( ) {
3747 // Tests a simple case of upgrading from LDK 0.1 with a pending payment
@@ -213,3 +223,79 @@ fn test_125_dangling_post_update_actions() {
213223 let config = test_default_channel_config ( ) ;
214224 reload_node ! ( nodes[ 3 ] , config, & node_d_ser, & [ & mon_ser] , persister, chain_mon, node) ;
215225}
226+
227+ #[ test]
228+ fn test_0_1_legacy_remote_key_derivation ( ) {
229+ // Test that a channel opened with a v1/legacy `remote_key` derivation will be properly spent
230+ // even after upgrading and opting into the new v2 derivation for new channels.
231+ let ( node_a_ser, node_b_ser, mon_a_ser, mon_b_ser, commitment_tx, channel_id) ;
232+ let node_a_blocks;
233+ {
234+ let chanmon_cfgs = lightning_0_1_utils:: create_chanmon_cfgs ( 2 ) ;
235+ let node_cfgs = lightning_0_1_utils:: create_node_cfgs ( 2 , & chanmon_cfgs) ;
236+ let node_chanmgrs = lightning_0_1_utils:: create_node_chanmgrs ( 2 , & node_cfgs, & [ None , None ] ) ;
237+ let nodes = lightning_0_1_utils:: create_network ( 2 , & node_cfgs, & node_chanmgrs) ;
238+
239+ let node_a_id = nodes[ 0 ] . node . get_our_node_id ( ) ;
240+
241+ let chan_id = lightning_0_1_utils:: create_announced_chan_between_nodes ( & nodes, 0 , 1 ) . 2 ;
242+ channel_id = chan_id. 0 ;
243+
244+ let err = "" . to_owned ( ) ;
245+ nodes[ 1 ] . node . force_close_broadcasting_latest_txn ( & chan_id, & node_a_id, err) . unwrap ( ) ;
246+ commitment_tx = nodes[ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
247+ assert_eq ! ( commitment_tx. len( ) , 1 ) ;
248+
249+ lightning_0_1_utils:: check_added_monitors ( & nodes[ 1 ] , 1 ) ;
250+ let reason = ClosureReason_0_1 :: HolderForceClosed { broadcasted_latest_txn : Some ( true ) } ;
251+ lightning_0_1_utils:: check_closed_event ( & nodes[ 1 ] , 1 , reason, false , & [ node_a_id] , 100000 ) ;
252+ lightning_0_1_utils:: check_closed_broadcast ( & nodes[ 1 ] , 1 , true ) ;
253+
254+ node_a_ser = nodes[ 0 ] . node . encode ( ) ;
255+ node_b_ser = nodes[ 1 ] . node . encode ( ) ;
256+ mon_a_ser = get_monitor_0_1 ! ( nodes[ 0 ] , chan_id) . encode ( ) ;
257+ mon_b_ser = get_monitor_0_1 ! ( nodes[ 1 ] , chan_id) . encode ( ) ;
258+
259+ node_a_blocks = Arc :: clone ( & nodes[ 0 ] . blocks ) ;
260+ }
261+
262+ // Create a dummy node to reload over with the 0.1 state
263+ let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
264+ let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs) ;
265+ let ( persister_a, persister_b, chain_mon_a, chain_mon_b) ;
266+ let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs, & [ None , None ] ) ;
267+ let ( node_a, node_b) ;
268+ let mut nodes = create_network ( 2 , & node_cfgs, & node_chanmgrs) ;
269+
270+ let config = test_default_channel_config ( ) ;
271+ let a_mons = & [ & mon_a_ser[ ..] ] ;
272+ reload_node ! ( nodes[ 0 ] , config. clone( ) , & node_a_ser, a_mons, persister_a, chain_mon_a, node_a) ;
273+ reload_node ! ( nodes[ 1 ] , config, & node_b_ser, & [ & mon_b_ser] , persister_b, chain_mon_b, node_b) ;
274+
275+ nodes[ 0 ] . blocks = node_a_blocks;
276+
277+ let node_b_id = nodes[ 1 ] . node . get_our_node_id ( ) ;
278+
279+ mine_transaction ( & nodes[ 0 ] , & commitment_tx[ 0 ] ) ;
280+ let reason = ClosureReason :: CommitmentTxConfirmed ;
281+ check_closed_event ( & nodes[ 0 ] , 1 , reason, false , & [ node_b_id] , 100_000 ) ;
282+ check_added_monitors ( & nodes[ 0 ] , 1 ) ;
283+ check_closed_broadcast ( & nodes[ 0 ] , 1 , false ) ;
284+
285+ connect_blocks ( & nodes[ 0 ] , ANTI_REORG_DELAY - 1 ) ;
286+ let mut spendable_event = nodes[ 0 ] . chain_monitor . chain_monitor . get_and_clear_pending_events ( ) ;
287+ assert_eq ! ( spendable_event. len( ) , 1 ) ;
288+ if let Event :: SpendableOutputs { outputs, channel_id : ev_id } = spendable_event. pop ( ) . unwrap ( ) {
289+ assert_eq ! ( ev_id. unwrap( ) . 0 , channel_id) ;
290+ assert_eq ! ( outputs. len( ) , 1 ) ;
291+ let spk = Builder :: new ( ) . push_opcode ( opcodes:: all:: OP_RETURN ) . into_script ( ) ;
292+ let spend_tx = nodes[ 0 ]
293+ . keys_manager
294+ . backing
295+ . spend_spendable_outputs ( & [ & outputs[ 0 ] ] , Vec :: new ( ) , spk, 253 , None , & Secp256k1 :: new ( ) )
296+ . unwrap ( ) ;
297+ check_spends ! ( spend_tx, commitment_tx[ 0 ] ) ;
298+ } else {
299+ panic ! ( "Wrong event" ) ;
300+ }
301+ }
0 commit comments