@@ -15,9 +15,10 @@ use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, ChannelMonitor};
1515use  crate :: sign:: EntropySource ; 
1616use  crate :: chain:: transaction:: OutPoint ; 
1717use  crate :: events:: { ClosureReason ,  Event ,  HTLCDestination ,  MessageSendEvent ,  MessageSendEventsProvider } ; 
18- use  crate :: ln:: channelmanager:: { ChannelManager ,  ChannelManagerReadArgs ,  PaymentId ,  RecipientOnionFields } ; 
18+ use  crate :: ln:: channelmanager:: { ChannelManager ,  ChannelManagerReadArgs ,  PaymentId ,  Retry ,   RecipientOnionFields } ; 
1919use  crate :: ln:: msgs; 
2020use  crate :: ln:: msgs:: { ChannelMessageHandler ,  RoutingMessageHandler ,  ErrorAction } ; 
21+ use  crate :: routing:: router:: { RouteParameters ,  PaymentParameters } ; 
2122use  crate :: util:: test_channel_signer:: TestChannelSigner ; 
2223use  crate :: util:: test_utils; 
2324use  crate :: util:: errors:: APIError ; 
@@ -493,7 +494,8 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
493494	assert ! ( found_err) ; 
494495} 
495496
496- fn  do_test_data_loss_protect ( reconnect_panicing :  bool )  { 
497+ #[ cfg( feature = "std" ) ]  
498+ fn  do_test_data_loss_protect ( reconnect_panicing :  bool ,  substantially_old :  bool ,  not_stale :  bool )  { 
497499	// When we get a data_loss_protect proving we're behind, we immediately panic as the 
498500	// chain::Watch API requirements have been violated (e.g. the user restored from a backup). The 
499501	// panic message informs the user they should force-close without broadcasting, which is tested 
@@ -517,8 +519,38 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
517519	let  previous_node_state = nodes[ 0 ] . node . encode ( ) ; 
518520	let  previous_chain_monitor_state = get_monitor ! ( nodes[ 0 ] ,  chan. 2 ) . encode ( ) ; 
519521
520- 	send_payment ( & nodes[ 0 ] ,  & vec ! ( & nodes[ 1 ] ) [ ..] ,  8000000 ) ; 
521- 	send_payment ( & nodes[ 0 ] ,  & vec ! ( & nodes[ 1 ] ) [ ..] ,  8000000 ) ; 
522+ 	assert ! ( !substantially_old || !not_stale,  "substantially_old and not_stale doesn't make sense" ) ; 
523+ 	if  not_stale || !substantially_old { 
524+ 		// Previously, we'd only hit the data_loss_protect assertion if we had a state which 
525+ 		// revoked at least two revocations ago, not the latest revocation. Here, we use 
526+ 		// `not_stale` to test the boundary condition. 
527+ 		let  pay_params = PaymentParameters :: for_keysend ( nodes[ 1 ] . node . get_our_node_id ( ) ,  100 ,  false ) ; 
528+ 		let  route_params = RouteParameters :: from_payment_params_and_value ( pay_params,  40000 ) ; 
529+ 		nodes[ 0 ] . node . send_spontaneous_payment_with_retry ( None ,  RecipientOnionFields :: spontaneous_empty ( ) ,  PaymentId ( [ 0 ;  32 ] ) ,  route_params,  Retry :: Attempts ( 0 ) ) ; 
530+ 		check_added_monitors ( & nodes[ 0 ] ,  1 ) ; 
531+ 		let  update_add_commit = SendEvent :: from_node ( & nodes[ 0 ] ) ; 
532+ 
533+ 		nodes[ 1 ] . node . handle_update_add_htlc ( & nodes[ 0 ] . node . get_our_node_id ( ) ,  & update_add_commit. msgs [ 0 ] ) ; 
534+ 		nodes[ 1 ] . node . handle_commitment_signed ( & nodes[ 0 ] . node . get_our_node_id ( ) ,  & update_add_commit. commitment_msg ) ; 
535+ 		check_added_monitors ( & nodes[ 1 ] ,  1 ) ; 
536+ 		let  ( raa,  cs)  = get_revoke_commit_msgs ( & nodes[ 1 ] ,  & nodes[ 0 ] . node . get_our_node_id ( ) ) ; 
537+ 
538+ 		nodes[ 0 ] . node . handle_revoke_and_ack ( & nodes[ 1 ] . node . get_our_node_id ( ) ,  & raa) ; 
539+ 		check_added_monitors ( & nodes[ 0 ] ,  1 ) ; 
540+ 		assert ! ( nodes[ 0 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ; 
541+ 		if  !not_stale { 
542+ 			nodes[ 0 ] . node . handle_commitment_signed ( & nodes[ 1 ] . node . get_our_node_id ( ) ,  & cs) ; 
543+ 			check_added_monitors ( & nodes[ 0 ] ,  1 ) ; 
544+ 			// A now revokes their original state, at which point reconnect should panic 
545+ 			let  raa = get_event_msg ! ( nodes[ 0 ] ,  MessageSendEvent :: SendRevokeAndACK ,  nodes[ 1 ] . node. get_our_node_id( ) ) ; 
546+ 			nodes[ 1 ] . node . handle_revoke_and_ack ( & nodes[ 0 ] . node . get_our_node_id ( ) ,  & raa) ; 
547+ 			check_added_monitors ( & nodes[ 1 ] ,  1 ) ; 
548+ 			expect_pending_htlcs_forwardable_ignore ! ( nodes[ 1 ] ) ; 
549+ 		} 
550+ 	}  else  { 
551+ 		send_payment ( & nodes[ 0 ] ,  & [ & nodes[ 1 ] ] ,  8000000 ) ; 
552+ 		send_payment ( & nodes[ 0 ] ,  & [ & nodes[ 1 ] ] ,  8000000 ) ; 
553+ 	} 
522554
523555	nodes[ 0 ] . node . peer_disconnected ( & nodes[ 1 ] . node . get_our_node_id ( ) ) ; 
524556	nodes[ 1 ] . node . peer_disconnected ( & nodes[ 0 ] . node . get_our_node_id ( ) ) ; 
@@ -535,89 +567,131 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) {
535567
536568		let  reestablish_1 = get_chan_reestablish_msgs ! ( nodes[ 0 ] ,  nodes[ 1 ] ) ; 
537569
538- 		// Check we close channel detecting A is fallen-behind 
539- 		// Check that we sent the warning message when we detected that A has fallen behind, 
540- 		// and give the possibility for A to recover from the warning. 
570+ 		// If A has fallen behind substantially, B should send it a message letting it know 
571+ 		// that. 
541572		nodes[ 1 ] . node . handle_channel_reestablish ( & nodes[ 0 ] . node . get_our_node_id ( ) ,  & reestablish_1[ 0 ] ) ; 
542- 		let  warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction" . to_owned ( ) ; 
543- 		assert ! ( check_warn_msg!( nodes[ 1 ] ,  nodes[ 0 ] . node. get_our_node_id( ) ,  chan. 2 ) . contains( & warn_msg) ) ; 
573+ 		let  reestablish_msg; 
574+ 		if  substantially_old { 
575+ 			let  warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction: 0 (received) vs 4 (expected)" . to_owned ( ) ; 
576+ 
577+ 			let  warn_reestablish = nodes[ 1 ] . node . get_and_clear_pending_msg_events ( ) ; 
578+ 			assert_eq ! ( warn_reestablish. len( ) ,  2 ) ; 
579+ 			match  warn_reestablish[ 1 ]  { 
580+ 				MessageSendEvent :: HandleError  {  action :  ErrorAction :: SendWarningMessage  {  ref  msg,  .. } ,  .. }  => { 
581+ 					assert_eq ! ( msg. data,  warn_msg) ; 
582+ 				} , 
583+ 				_ => panic ! ( "Unexpected events: {:?}" ,  warn_reestablish) , 
584+ 			} 
585+ 			reestablish_msg = match  & warn_reestablish[ 0 ]  { 
586+ 				MessageSendEvent :: SendChannelReestablish  {  msg,  .. }  => msg. clone ( ) , 
587+ 				_ => panic ! ( "Unexpected events: {:?}" ,  warn_reestablish) , 
588+ 			} ; 
589+ 		}  else  { 
590+ 			let  msgs = nodes[ 1 ] . node . get_and_clear_pending_msg_events ( ) ; 
591+ 			assert ! ( msgs. len( )  >= 4 ) ; 
592+ 			match  msgs. last ( )  { 
593+ 				Some ( MessageSendEvent :: SendChannelUpdate  {  .. } )  => { } , 
594+ 				_ => panic ! ( "Unexpected events: {:?}" ,  msgs) , 
595+ 			} 
596+ 			assert ! ( msgs. iter( ) . any( |msg| matches!( msg,  MessageSendEvent :: SendRevokeAndACK  {  .. } ) ) ) ; 
597+ 			assert ! ( msgs. iter( ) . any( |msg| matches!( msg,  MessageSendEvent :: UpdateHTLCs  {  .. } ) ) ) ; 
598+ 			reestablish_msg = match  & msgs[ 0 ]  { 
599+ 				MessageSendEvent :: SendChannelReestablish  {  msg,  .. }  => msg. clone ( ) , 
600+ 				_ => panic ! ( "Unexpected events: {:?}" ,  msgs) , 
601+ 			} ; 
602+ 		} 
544603
545604		{ 
546- 			let  mut  node_txn = nodes[ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . clone ( ) ; 
547- 			// The node B should not broadcast the transaction to  force  close the channel!  
605+ 			let  mut  node_txn = nodes[ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) ; 
606+ 			// The node B should never  force- close the channel.  
548607			assert ! ( node_txn. is_empty( ) ) ; 
549608		} 
550609
551- 		let  reestablish_0 = get_chan_reestablish_msgs ! ( nodes[ 1 ] ,  nodes[ 0 ] ) ; 
552610		// Check A panics upon seeing proof it has fallen behind. 
553- 		nodes[ 0 ] . node . handle_channel_reestablish ( & nodes[ 1 ] . node . get_our_node_id ( ) ,  & reestablish_0[ 0 ] ) ; 
554- 		return ;  // By this point we should have panic'ed! 
555- 	} 
611+ 		let  reconnect_res = std:: panic:: catch_unwind ( || { 
612+ 			nodes[ 0 ] . node . handle_channel_reestablish ( & nodes[ 1 ] . node . get_our_node_id ( ) ,  & reestablish_msg) ; 
613+ 		} ) ; 
614+ 		if  not_stale { 
615+ 			assert ! ( reconnect_res. is_ok( ) ) ; 
616+ 			// At this point A gets confused because B expects a commitment state newer than A 
617+ 			// has sent, but not a newer revocation secret, so A just (correctly) closes. 
618+ 			check_closed_broadcast ( & nodes[ 0 ] ,  1 ,  true ) ; 
619+ 			check_added_monitors ( & nodes[ 0 ] ,  1 ) ; 
620+ 			check_closed_event ! ( nodes[ 0 ] ,  1 ,  ClosureReason :: ProcessingError  { 
621+ 				err:  "Peer attempted to reestablish channel with a future remote commitment transaction: 2 (received) vs 1 (expected)" . to_owned( ) 
622+ 			} ,  [ nodes[ 1 ] . node. get_our_node_id( ) ] ,  1000000 ) ; 
623+ 		}  else  { 
624+ 			assert ! ( reconnect_res. is_err( ) ) ; 
625+ 			// Skip the `Drop` handler for `Node`s as some may be in an invalid (panicked) state. 
626+ 			std:: mem:: forget ( nodes) ; 
627+ 		} 
628+ 	}  else  { 
629+ 		assert ! ( !not_stale,  "We only care about the stale case when not testing panicking" ) ; 
556630
557- 	nodes[ 0 ] . node . force_close_without_broadcasting_txn ( & chan. 2 ,  & nodes[ 1 ] . node . get_our_node_id ( ) ) . unwrap ( ) ; 
558- 	check_added_monitors ! ( nodes[ 0 ] ,  1 ) ; 
559- 	check_closed_event ! ( nodes[ 0 ] ,  1 ,  ClosureReason :: HolderForceClosed ,  [ nodes[ 1 ] . node. get_our_node_id( ) ] ,  1000000 ) ; 
560- 	{ 
561- 		let  node_txn = nodes[ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) ; 
562- 		assert_eq ! ( node_txn. len( ) ,  0 ) ; 
563- 	} 
631+ 		nodes[ 0 ] . node . force_close_without_broadcasting_txn ( & chan. 2 ,  & nodes[ 1 ] . node . get_our_node_id ( ) ) . unwrap ( ) ; 
632+ 		check_added_monitors ! ( nodes[ 0 ] ,  1 ) ; 
633+ 		check_closed_event ! ( nodes[ 0 ] ,  1 ,  ClosureReason :: HolderForceClosed ,  [ nodes[ 1 ] . node. get_our_node_id( ) ] ,  1000000 ) ; 
634+ 		{ 
635+ 			let  node_txn = nodes[ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) ; 
636+ 			assert_eq ! ( node_txn. len( ) ,  0 ) ; 
637+ 		} 
638+ 
639+ 		for  msg in  nodes[ 0 ] . node . get_and_clear_pending_msg_events ( )  { 
640+ 			if  let  MessageSendEvent :: BroadcastChannelUpdate  {  .. }  = msg { 
641+ 			}  else  if  let  MessageSendEvent :: HandleError  {  ref  action,  .. }  = msg { 
642+ 				match  action { 
643+ 					& ErrorAction :: DisconnectPeer  {  ref  msg }  => { 
644+ 						assert_eq ! ( msg. as_ref( ) . unwrap( ) . data,  "Channel force-closed" ) ; 
645+ 					} , 
646+ 					_ => panic ! ( "Unexpected event!" ) , 
647+ 				} 
648+ 			}  else  { 
649+ 				panic ! ( "Unexpected event {:?}" ,  msg) 
650+ 			} 
651+ 		} 
564652
565- 	for  msg in  nodes[ 0 ] . node . get_and_clear_pending_msg_events ( )  { 
566- 		if  let  MessageSendEvent :: BroadcastChannelUpdate  {  .. }  = msg { 
567- 		}  else  if  let  MessageSendEvent :: HandleError  {  ref  action,  .. }  = msg { 
653+ 		// after the warning message sent by B, we should not able to 
654+ 		// use the channel, or reconnect with success to the channel. 
655+ 		assert ! ( nodes[ 0 ] . node. list_usable_channels( ) . is_empty( ) ) ; 
656+ 		nodes[ 0 ] . node . peer_connected ( & nodes[ 1 ] . node . get_our_node_id ( ) ,  & msgs:: Init  { 
657+ 			features :  nodes[ 1 ] . node . init_features ( ) ,  networks :  None ,  remote_network_address :  None 
658+ 		} ,  true ) . unwrap ( ) ; 
659+ 		nodes[ 1 ] . node . peer_connected ( & nodes[ 0 ] . node . get_our_node_id ( ) ,  & msgs:: Init  { 
660+ 			features :  nodes[ 0 ] . node . init_features ( ) ,  networks :  None ,  remote_network_address :  None 
661+ 		} ,  false ) . unwrap ( ) ; 
662+ 		let  retry_reestablish = get_chan_reestablish_msgs ! ( nodes[ 1 ] ,  nodes[ 0 ] ) ; 
663+ 
664+ 		nodes[ 0 ] . node . handle_channel_reestablish ( & nodes[ 1 ] . node . get_our_node_id ( ) ,  & retry_reestablish[ 0 ] ) ; 
665+ 		let  mut  err_msgs_0 = Vec :: with_capacity ( 1 ) ; 
666+ 		if  let  MessageSendEvent :: HandleError  {  ref  action,  .. }  = nodes[ 0 ] . node . get_and_clear_pending_msg_events ( ) [ 1 ]  { 
568667			match  action { 
569- 				& ErrorAction :: DisconnectPeer  {  ref  msg }  => { 
570- 					assert_eq ! ( msg. as_ref( ) . unwrap( ) . data,  "Channel force-closed" ) ; 
668+ 				& ErrorAction :: SendErrorMessage  {  ref  msg }  => { 
669+ 					assert_eq ! ( msg. data,  format!( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" ,  & nodes[ 1 ] . node. get_our_node_id( ) ) ) ; 
670+ 					err_msgs_0. push ( msg. clone ( ) ) ; 
571671				} , 
572672				_ => panic ! ( "Unexpected event!" ) , 
573673			} 
574674		}  else  { 
575- 			panic ! ( "Unexpected event {:?}"  ,  msg ) 
675+ 			panic ! ( "Unexpected event!"  ) ; 
576676		} 
677+ 		assert_eq ! ( err_msgs_0. len( ) ,  1 ) ; 
678+ 		nodes[ 1 ] . node . handle_error ( & nodes[ 0 ] . node . get_our_node_id ( ) ,  & err_msgs_0[ 0 ] ) ; 
679+ 		assert ! ( nodes[ 1 ] . node. list_usable_channels( ) . is_empty( ) ) ; 
680+ 		check_added_monitors ! ( nodes[ 1 ] ,  1 ) ; 
681+ 		check_closed_event ! ( nodes[ 1 ] ,  1 ,  ClosureReason :: CounterpartyForceClosed  {  peer_msg:  UntrustedString ( format!( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" ,  & nodes[ 1 ] . node. get_our_node_id( ) ) )  } 
682+ 			,  [ nodes[ 0 ] . node. get_our_node_id( ) ] ,  1000000 ) ; 
683+ 		check_closed_broadcast ! ( nodes[ 1 ] ,  false ) ; 
577684	} 
578- 
579- 	// after the warning message sent by B, we should not able to 
580- 	// use the channel, or reconnect with success to the channel. 
581- 	assert ! ( nodes[ 0 ] . node. list_usable_channels( ) . is_empty( ) ) ; 
582- 	nodes[ 0 ] . node . peer_connected ( & nodes[ 1 ] . node . get_our_node_id ( ) ,  & msgs:: Init  { 
583- 		features :  nodes[ 1 ] . node . init_features ( ) ,  networks :  None ,  remote_network_address :  None 
584- 	} ,  true ) . unwrap ( ) ; 
585- 	nodes[ 1 ] . node . peer_connected ( & nodes[ 0 ] . node . get_our_node_id ( ) ,  & msgs:: Init  { 
586- 		features :  nodes[ 0 ] . node . init_features ( ) ,  networks :  None ,  remote_network_address :  None 
587- 	} ,  false ) . unwrap ( ) ; 
588- 	let  retry_reestablish = get_chan_reestablish_msgs ! ( nodes[ 1 ] ,  nodes[ 0 ] ) ; 
589- 
590- 	nodes[ 0 ] . node . handle_channel_reestablish ( & nodes[ 1 ] . node . get_our_node_id ( ) ,  & retry_reestablish[ 0 ] ) ; 
591- 	let  mut  err_msgs_0 = Vec :: with_capacity ( 1 ) ; 
592- 	if  let  MessageSendEvent :: HandleError  {  ref  action,  .. }  = nodes[ 0 ] . node . get_and_clear_pending_msg_events ( ) [ 1 ]  { 
593- 		match  action { 
594- 			& ErrorAction :: SendErrorMessage  {  ref  msg }  => { 
595- 				assert_eq ! ( msg. data,  format!( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" ,  & nodes[ 1 ] . node. get_our_node_id( ) ) ) ; 
596- 				err_msgs_0. push ( msg. clone ( ) ) ; 
597- 			} , 
598- 			_ => panic ! ( "Unexpected event!" ) , 
599- 		} 
600- 	}  else  { 
601- 		panic ! ( "Unexpected event!" ) ; 
602- 	} 
603- 	assert_eq ! ( err_msgs_0. len( ) ,  1 ) ; 
604- 	nodes[ 1 ] . node . handle_error ( & nodes[ 0 ] . node . get_our_node_id ( ) ,  & err_msgs_0[ 0 ] ) ; 
605- 	assert ! ( nodes[ 1 ] . node. list_usable_channels( ) . is_empty( ) ) ; 
606- 	check_added_monitors ! ( nodes[ 1 ] ,  1 ) ; 
607- 	check_closed_event ! ( nodes[ 1 ] ,  1 ,  ClosureReason :: CounterpartyForceClosed  {  peer_msg:  UntrustedString ( format!( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" ,  & nodes[ 1 ] . node. get_our_node_id( ) ) )  } 
608- 		,  [ nodes[ 0 ] . node. get_our_node_id( ) ] ,  1000000 ) ; 
609- 	check_closed_broadcast ! ( nodes[ 1 ] ,  false ) ; 
610- } 
611- 
612- #[ test]  
613- #[ should_panic]  
614- fn  test_data_loss_protect_showing_stale_state_panics ( )  { 
615- 	do_test_data_loss_protect ( true ) ; 
616685} 
617686
618687#[ test]  
619- fn  test_force_close_without_broadcast ( )  { 
620- 	do_test_data_loss_protect ( false ) ; 
688+ #[ cfg( feature = "std" ) ]  
689+ fn  test_data_loss_protect ( )  { 
690+ 	do_test_data_loss_protect ( true ,  false ,  true ) ; 
691+ 	do_test_data_loss_protect ( true ,  true ,  false ) ; 
692+ 	do_test_data_loss_protect ( true ,  false ,  false ) ; 
693+ 	do_test_data_loss_protect ( false ,  true ,  false ) ; 
694+ 	do_test_data_loss_protect ( false ,  false ,  false ) ; 
621695} 
622696
623697#[ test]  
0 commit comments