@@ -1417,7 +1417,7 @@ macro_rules! emit_channel_ready_event {
14171417}
14181418
14191419macro_rules! handle_monitor_update_completion {
1420- ( $self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan: expr) => { {
1420+ ( $self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock : expr , $ chan: expr) => { {
14211421 let mut updates = $chan. monitor_updating_restored( & $self. logger,
14221422 & $self. node_signer, $self. genesis_hash, & $self. default_configuration,
14231423 $self. best_block. read( ) . unwrap( ) . height( ) ) ;
@@ -1450,6 +1450,7 @@ macro_rules! handle_monitor_update_completion {
14501450
14511451 let channel_id = $chan. channel_id( ) ;
14521452 core:: mem:: drop( $peer_state_lock) ;
1453+ core:: mem:: drop( $per_peer_state_lock) ;
14531454
14541455 $self. handle_monitor_update_completion_actions( update_actions) ;
14551456
@@ -1465,7 +1466,7 @@ macro_rules! handle_monitor_update_completion {
14651466}
14661467
14671468macro_rules! handle_new_monitor_update {
1468- ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan: expr, MANUALLY_REMOVING , $remove: expr) => { {
1469+ ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock : expr , $ chan: expr, MANUALLY_REMOVING , $remove: expr) => { {
14691470 // update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
14701471 // any case so that it won't deadlock.
14711472 debug_assert!( $self. id_to_peer. try_lock( ) . is_ok( ) ) ;
@@ -1492,14 +1493,14 @@ macro_rules! handle_new_monitor_update {
14921493 . update_id == $update_id) &&
14931494 $chan. get_latest_monitor_update_id( ) == $update_id
14941495 {
1495- handle_monitor_update_completion!( $self, $update_id, $peer_state_lock, $peer_state, $chan) ;
1496+ handle_monitor_update_completion!( $self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock , $ chan) ;
14961497 }
14971498 Ok ( ( ) )
14981499 } ,
14991500 }
15001501 } } ;
1501- ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan_entry: expr) => {
1502- handle_new_monitor_update!( $self, $update_res, $update_id, $peer_state_lock, $peer_state, $chan_entry. get_mut( ) , MANUALLY_REMOVING , $chan_entry. remove_entry( ) )
1502+ ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock : expr , $ chan_entry: expr) => {
1503+ handle_new_monitor_update!( $self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock , $ chan_entry. get_mut( ) , MANUALLY_REMOVING , $chan_entry. remove_entry( ) )
15031504 }
15041505}
15051506
@@ -1835,7 +1836,7 @@ where
18351836 if let Some ( monitor_update) = monitor_update_opt. take ( ) {
18361837 let update_id = monitor_update. update_id ;
18371838 let update_res = self . chain_monitor . update_channel ( funding_txo_opt. unwrap ( ) , monitor_update) ;
1838- break handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, chan_entry) ;
1839+ break handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, per_peer_state , chan_entry) ;
18391840 }
18401841
18411842 if chan_entry. get ( ) . is_shutdown ( ) {
@@ -2464,7 +2465,7 @@ where
24642465 Some ( monitor_update) => {
24652466 let update_id = monitor_update. update_id ;
24662467 let update_res = self . chain_monitor . update_channel ( funding_txo, monitor_update) ;
2467- if let Err ( e) = handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, chan) {
2468+ if let Err ( e) = handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, per_peer_state , chan) {
24682469 break Err ( e) ;
24692470 }
24702471 if update_res == ChannelMonitorUpdateStatus :: InProgress {
@@ -3991,7 +3992,8 @@ where
39913992 )
39923993 ) . unwrap_or ( None ) ;
39933994
3994- if let Some ( mut peer_state_lock) = peer_state_opt. take ( ) {
3995+ if peer_state_opt. is_some ( ) {
3996+ let mut peer_state_lock = peer_state_opt. unwrap ( ) ;
39953997 let peer_state = & mut * peer_state_lock;
39963998 if let hash_map:: Entry :: Occupied ( mut chan) = peer_state. channel_by_id . entry ( chan_id) {
39973999 let counterparty_node_id = chan. get ( ) . get_counterparty_node_id ( ) ;
@@ -4006,7 +4008,7 @@ where
40064008 let update_id = monitor_update. update_id ;
40074009 let update_res = self . chain_monitor . update_channel ( prev_hop. outpoint , monitor_update) ;
40084010 let res = handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock,
4009- peer_state, chan) ;
4011+ peer_state, per_peer_state , chan) ;
40104012 if let Err ( e) = res {
40114013 // TODO: This is a *critical* error - we probably updated the outbound edge
40124014 // of the HTLC's monitor with a preimage. We should retry this monitor
@@ -4207,7 +4209,7 @@ where
42074209 if !channel. get ( ) . is_awaiting_monitor_update ( ) || channel. get ( ) . get_latest_monitor_update_id ( ) != highest_applied_update_id {
42084210 return ;
42094211 }
4210- handle_monitor_update_completion ! ( self , highest_applied_update_id, peer_state_lock, peer_state, channel. get_mut( ) ) ;
4212+ handle_monitor_update_completion ! ( self , highest_applied_update_id, peer_state_lock, peer_state, per_peer_state , channel. get_mut( ) ) ;
42114213 }
42124214
42134215 /// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
@@ -4513,7 +4515,8 @@ where
45134515 let monitor_res = self . chain_monitor . watch_channel ( monitor. get_funding_txo ( ) . 0 , monitor) ;
45144516
45154517 let chan = e. insert ( chan) ;
4516- let mut res = handle_new_monitor_update ! ( self , monitor_res, 0 , peer_state_lock, peer_state, chan, MANUALLY_REMOVING , { peer_state. channel_by_id. remove( & new_channel_id) } ) ;
4518+ let mut res = handle_new_monitor_update ! ( self , monitor_res, 0 , peer_state_lock, peer_state,
4519+ per_peer_state, chan, MANUALLY_REMOVING , { peer_state. channel_by_id. remove( & new_channel_id) } ) ;
45174520
45184521 // Note that we reply with the new channel_id in error messages if we gave up on the
45194522 // channel, not the temporary_channel_id. This is compatible with ourselves, but the
@@ -4546,7 +4549,7 @@ where
45464549 let monitor = try_chan_entry ! ( self ,
45474550 chan. get_mut( ) . funding_signed( & msg, best_block, & self . signer_provider, & self . logger) , chan) ;
45484551 let update_res = self . chain_monitor . watch_channel ( chan. get ( ) . get_funding_txo ( ) . unwrap ( ) , monitor) ;
4549- let mut res = handle_new_monitor_update ! ( self , update_res, 0 , peer_state_lock, peer_state, chan) ;
4552+ let mut res = handle_new_monitor_update ! ( self , update_res, 0 , peer_state_lock, peer_state, per_peer_state , chan) ;
45504553 if let Err ( MsgHandleErrInternal { ref mut shutdown_finish, .. } ) = res {
45514554 // We weren't able to watch the channel to begin with, so no updates should be made on
45524555 // it. Previously, full_stack_target found an (unreachable) panic when the
@@ -4642,7 +4645,7 @@ where
46424645 if let Some ( monitor_update) = monitor_update_opt {
46434646 let update_id = monitor_update. update_id ;
46444647 let update_res = self . chain_monitor . update_channel ( funding_txo_opt. unwrap ( ) , monitor_update) ;
4645- break handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, chan_entry) ;
4648+ break handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, per_peer_state , chan_entry) ;
46464649 }
46474650 break Ok ( ( ) ) ;
46484651 } ,
@@ -4834,7 +4837,7 @@ where
48344837 let update_res = self . chain_monitor . update_channel ( funding_txo. unwrap ( ) , monitor_update) ;
48354838 let update_id = monitor_update. update_id ;
48364839 handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock,
4837- peer_state, chan)
4840+ peer_state, per_peer_state , chan)
48384841 } ,
48394842 hash_map:: Entry :: Vacant ( _) => return Err ( MsgHandleErrInternal :: send_err_msg_no_close ( format ! ( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" , counterparty_node_id) , msg. channel_id ) )
48404843 }
@@ -4940,21 +4943,20 @@ where
49404943 fn internal_revoke_and_ack ( & self , counterparty_node_id : & PublicKey , msg : & msgs:: RevokeAndACK ) -> Result < ( ) , MsgHandleErrInternal > {
49414944 let ( htlcs_to_fail, res) = {
49424945 let per_peer_state = self . per_peer_state . read ( ) . unwrap ( ) ;
4943- let peer_state_mutex = per_peer_state. get ( counterparty_node_id)
4946+ let mut peer_state_lock = per_peer_state. get ( counterparty_node_id)
49444947 . ok_or_else ( || {
49454948 debug_assert ! ( false ) ;
49464949 MsgHandleErrInternal :: send_err_msg_no_close ( format ! ( "Can't find a peer matching the passed counterparty node_id {}" , counterparty_node_id) , msg. channel_id )
4947- } ) ?;
4948- let mut peer_state_lock = peer_state_mutex. lock ( ) . unwrap ( ) ;
4950+ } ) . map ( |mtx| mtx. lock ( ) . unwrap ( ) ) ?;
49494951 let peer_state = & mut * peer_state_lock;
49504952 match peer_state. channel_by_id . entry ( msg. channel_id ) {
49514953 hash_map:: Entry :: Occupied ( mut chan) => {
49524954 let funding_txo = chan. get ( ) . get_funding_txo ( ) ;
49534955 let ( htlcs_to_fail, monitor_update) = try_chan_entry ! ( self , chan. get_mut( ) . revoke_and_ack( & msg, & self . logger) , chan) ;
49544956 let update_res = self . chain_monitor . update_channel ( funding_txo. unwrap ( ) , monitor_update) ;
49554957 let update_id = monitor_update. update_id ;
4956- let res = handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock ,
4957- peer_state, chan) ;
4958+ let res = handle_new_monitor_update ! ( self , update_res, update_id,
4959+ peer_state_lock , peer_state, per_peer_state , chan) ;
49584960 ( htlcs_to_fail, res)
49594961 } ,
49604962 hash_map:: Entry :: Vacant ( _) => return Err ( MsgHandleErrInternal :: send_err_msg_no_close ( format ! ( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" , counterparty_node_id) , msg. channel_id ) )
@@ -5211,38 +5213,45 @@ where
52115213 let mut has_monitor_update = false ;
52125214 let mut failed_htlcs = Vec :: new ( ) ;
52135215 let mut handle_errors = Vec :: new ( ) ;
5214- let per_peer_state = self . per_peer_state . read ( ) . unwrap ( ) ;
52155216
5216- for ( _cp_id, peer_state_mutex) in per_peer_state. iter ( ) {
5217- ' chan_loop: loop {
5218- let mut peer_state_lock = peer_state_mutex. lock ( ) . unwrap ( ) ;
5219- let peer_state: & mut PeerState < _ > = & mut * peer_state_lock;
5220- for ( channel_id, chan) in peer_state. channel_by_id . iter_mut ( ) {
5221- let counterparty_node_id = chan. get_counterparty_node_id ( ) ;
5222- let funding_txo = chan. get_funding_txo ( ) ;
5223- let ( monitor_opt, holding_cell_failed_htlcs) =
5224- chan. maybe_free_holding_cell_htlcs ( & self . logger ) ;
5225- if !holding_cell_failed_htlcs. is_empty ( ) {
5226- failed_htlcs. push ( ( holding_cell_failed_htlcs, * channel_id, counterparty_node_id) ) ;
5227- }
5228- if let Some ( monitor_update) = monitor_opt {
5229- has_monitor_update = true ;
5230-
5231- let update_res = self . chain_monitor . update_channel (
5232- funding_txo. expect ( "channel is live" ) , monitor_update) ;
5233- let update_id = monitor_update. update_id ;
5234- let channel_id: [ u8 ; 32 ] = * channel_id;
5235- let res = handle_new_monitor_update ! ( self , update_res, update_id,
5236- peer_state_lock, peer_state, chan, MANUALLY_REMOVING ,
5237- peer_state. channel_by_id. remove( & channel_id) ) ;
5238- if res. is_err ( ) {
5239- handle_errors. push ( ( counterparty_node_id, res) ) ;
5217+ // Walk our list of channels and find any that need to update. Note that when we do find an
5218+ // update, if it includes actions that must be taken afterwards, we have to drop the
5219+ // per-peer state lock as well as the top level per_peer_state lock. Thus, we loop until we
5220+ // manage to go through all our peers without finding a single channel to update.
5221+ ' peer_loop: loop {
5222+ let per_peer_state = self . per_peer_state . read ( ) . unwrap ( ) ;
5223+ for ( _cp_id, peer_state_mutex) in per_peer_state. iter ( ) {
5224+ ' chan_loop: loop {
5225+ let mut peer_state_lock = peer_state_mutex. lock ( ) . unwrap ( ) ;
5226+ let peer_state: & mut PeerState < _ > = & mut * peer_state_lock;
5227+ for ( channel_id, chan) in peer_state. channel_by_id . iter_mut ( ) {
5228+ let counterparty_node_id = chan. get_counterparty_node_id ( ) ;
5229+ let funding_txo = chan. get_funding_txo ( ) ;
5230+ let ( monitor_opt, holding_cell_failed_htlcs) =
5231+ chan. maybe_free_holding_cell_htlcs ( & self . logger ) ;
5232+ if !holding_cell_failed_htlcs. is_empty ( ) {
5233+ failed_htlcs. push ( ( holding_cell_failed_htlcs, * channel_id, counterparty_node_id) ) ;
5234+ }
5235+ if let Some ( monitor_update) = monitor_opt {
5236+ has_monitor_update = true ;
5237+
5238+ let update_res = self . chain_monitor . update_channel (
5239+ funding_txo. expect ( "channel is live" ) , monitor_update) ;
5240+ let update_id = monitor_update. update_id ;
5241+ let channel_id: [ u8 ; 32 ] = * channel_id;
5242+ let res = handle_new_monitor_update ! ( self , update_res, update_id,
5243+ peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING ,
5244+ peer_state. channel_by_id. remove( & channel_id) ) ;
5245+ if res. is_err ( ) {
5246+ handle_errors. push ( ( counterparty_node_id, res) ) ;
5247+ }
5248+ continue ' peer_loop;
52405249 }
5241- continue ' chan_loop;
52425250 }
5251+ break ' chan_loop;
52435252 }
5244- break ' chan_loop;
52455253 }
5254+ break ' peer_loop;
52465255 }
52475256
52485257 let has_update = has_monitor_update || !failed_htlcs. is_empty ( ) || !handle_errors. is_empty ( ) ;
0 commit comments