@@ -1417,7 +1417,7 @@ macro_rules! emit_channel_ready_event {
1417
1417
}
1418
1418
1419
1419
macro_rules! handle_monitor_update_completion {
1420
- ( $self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan: expr) => { {
1420
+ ( $self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock : expr , $ chan: expr) => { {
1421
1421
let mut updates = $chan. monitor_updating_restored( & $self. logger,
1422
1422
& $self. node_signer, $self. genesis_hash, & $self. default_configuration,
1423
1423
$self. best_block. read( ) . unwrap( ) . height( ) ) ;
@@ -1450,6 +1450,7 @@ macro_rules! handle_monitor_update_completion {
1450
1450
1451
1451
let channel_id = $chan. channel_id( ) ;
1452
1452
core:: mem:: drop( $peer_state_lock) ;
1453
+ core:: mem:: drop( $per_peer_state_lock) ;
1453
1454
1454
1455
$self. handle_monitor_update_completion_actions( update_actions) ;
1455
1456
@@ -1465,7 +1466,7 @@ macro_rules! handle_monitor_update_completion {
1465
1466
}
1466
1467
1467
1468
macro_rules! handle_new_monitor_update {
1468
- ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan: expr, MANUALLY_REMOVING , $remove: expr) => { {
1469
+ ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock : expr , $ chan: expr, MANUALLY_REMOVING , $remove: expr) => { {
1469
1470
// update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
1470
1471
// any case so that it won't deadlock.
1471
1472
debug_assert!( $self. id_to_peer. try_lock( ) . is_ok( ) ) ;
@@ -1492,14 +1493,14 @@ macro_rules! handle_new_monitor_update {
1492
1493
. update_id == $update_id) &&
1493
1494
$chan. get_latest_monitor_update_id( ) == $update_id
1494
1495
{
1495
- handle_monitor_update_completion!( $self, $update_id, $peer_state_lock, $peer_state, $chan) ;
1496
+ handle_monitor_update_completion!( $self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock , $ chan) ;
1496
1497
}
1497
1498
Ok ( ( ) )
1498
1499
} ,
1499
1500
}
1500
1501
} } ;
1501
- ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan_entry: expr) => {
1502
- handle_new_monitor_update!( $self, $update_res, $update_id, $peer_state_lock, $peer_state, $chan_entry. get_mut( ) , MANUALLY_REMOVING , $chan_entry. remove_entry( ) )
1502
+ ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock : expr , $ chan_entry: expr) => {
1503
+ handle_new_monitor_update!( $self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock , $ chan_entry. get_mut( ) , MANUALLY_REMOVING , $chan_entry. remove_entry( ) )
1503
1504
}
1504
1505
}
1505
1506
@@ -1835,7 +1836,7 @@ where
1835
1836
if let Some ( monitor_update) = monitor_update_opt. take ( ) {
1836
1837
let update_id = monitor_update. update_id ;
1837
1838
let update_res = self . chain_monitor . update_channel ( funding_txo_opt. unwrap ( ) , monitor_update) ;
1838
- break handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, chan_entry) ;
1839
+ break handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, per_peer_state , chan_entry) ;
1839
1840
}
1840
1841
1841
1842
if chan_entry. get ( ) . is_shutdown ( ) {
@@ -2464,7 +2465,7 @@ where
2464
2465
Some ( monitor_update) => {
2465
2466
let update_id = monitor_update. update_id ;
2466
2467
let update_res = self . chain_monitor . update_channel ( funding_txo, monitor_update) ;
2467
- if let Err ( e) = handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, chan) {
2468
+ if let Err ( e) = handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, per_peer_state , chan) {
2468
2469
break Err ( e) ;
2469
2470
}
2470
2471
if update_res == ChannelMonitorUpdateStatus :: InProgress {
@@ -3990,7 +3991,8 @@ where
3990
3991
)
3991
3992
) . unwrap_or ( None ) ;
3992
3993
3993
- if let Some ( mut peer_state_lock) = peer_state_opt. take ( ) {
3994
+ if peer_state_opt. is_some ( ) {
3995
+ let mut peer_state_lock = peer_state_opt. unwrap ( ) ;
3994
3996
let peer_state = & mut * peer_state_lock;
3995
3997
if let hash_map:: Entry :: Occupied ( mut chan) = peer_state. channel_by_id . entry ( chan_id) {
3996
3998
let counterparty_node_id = chan. get ( ) . get_counterparty_node_id ( ) ;
@@ -4005,7 +4007,7 @@ where
4005
4007
let update_id = monitor_update. update_id ;
4006
4008
let update_res = self . chain_monitor . update_channel ( prev_hop. outpoint , monitor_update) ;
4007
4009
let res = handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock,
4008
- peer_state, chan) ;
4010
+ peer_state, per_peer_state , chan) ;
4009
4011
if let Err ( e) = res {
4010
4012
// TODO: This is a *critical* error - we probably updated the outbound edge
4011
4013
// of the HTLC's monitor with a preimage. We should retry this monitor
@@ -4206,7 +4208,7 @@ where
4206
4208
if !channel. get ( ) . is_awaiting_monitor_update ( ) || channel. get ( ) . get_latest_monitor_update_id ( ) != highest_applied_update_id {
4207
4209
return ;
4208
4210
}
4209
- handle_monitor_update_completion ! ( self , highest_applied_update_id, peer_state_lock, peer_state, channel. get_mut( ) ) ;
4211
+ handle_monitor_update_completion ! ( self , highest_applied_update_id, peer_state_lock, peer_state, per_peer_state , channel. get_mut( ) ) ;
4210
4212
}
4211
4213
4212
4214
/// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
@@ -4512,7 +4514,8 @@ where
4512
4514
let monitor_res = self . chain_monitor . watch_channel ( monitor. get_funding_txo ( ) . 0 , monitor) ;
4513
4515
4514
4516
let chan = e. insert ( chan) ;
4515
- let mut res = handle_new_monitor_update ! ( self , monitor_res, 0 , peer_state_lock, peer_state, chan, MANUALLY_REMOVING , { peer_state. channel_by_id. remove( & new_channel_id) } ) ;
4517
+ let mut res = handle_new_monitor_update ! ( self , monitor_res, 0 , peer_state_lock, peer_state,
4518
+ per_peer_state, chan, MANUALLY_REMOVING , { peer_state. channel_by_id. remove( & new_channel_id) } ) ;
4516
4519
4517
4520
// Note that we reply with the new channel_id in error messages if we gave up on the
4518
4521
// channel, not the temporary_channel_id. This is compatible with ourselves, but the
@@ -4545,7 +4548,7 @@ where
4545
4548
let monitor = try_chan_entry ! ( self ,
4546
4549
chan. get_mut( ) . funding_signed( & msg, best_block, & self . signer_provider, & self . logger) , chan) ;
4547
4550
let update_res = self . chain_monitor . watch_channel ( chan. get ( ) . get_funding_txo ( ) . unwrap ( ) , monitor) ;
4548
- let mut res = handle_new_monitor_update ! ( self , update_res, 0 , peer_state_lock, peer_state, chan) ;
4551
+ let mut res = handle_new_monitor_update ! ( self , update_res, 0 , peer_state_lock, peer_state, per_peer_state , chan) ;
4549
4552
if let Err ( MsgHandleErrInternal { ref mut shutdown_finish, .. } ) = res {
4550
4553
// We weren't able to watch the channel to begin with, so no updates should be made on
4551
4554
// it. Previously, full_stack_target found an (unreachable) panic when the
@@ -4641,7 +4644,7 @@ where
4641
4644
if let Some ( monitor_update) = monitor_update_opt {
4642
4645
let update_id = monitor_update. update_id ;
4643
4646
let update_res = self . chain_monitor . update_channel ( funding_txo_opt. unwrap ( ) , monitor_update) ;
4644
- break handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, chan_entry) ;
4647
+ break handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, per_peer_state , chan_entry) ;
4645
4648
}
4646
4649
break Ok ( ( ) ) ;
4647
4650
} ,
@@ -4833,7 +4836,7 @@ where
4833
4836
let update_res = self . chain_monitor . update_channel ( funding_txo. unwrap ( ) , monitor_update) ;
4834
4837
let update_id = monitor_update. update_id ;
4835
4838
handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock,
4836
- peer_state, chan)
4839
+ peer_state, per_peer_state , chan)
4837
4840
} ,
4838
4841
hash_map:: Entry :: Vacant ( _) => return Err ( MsgHandleErrInternal :: send_err_msg_no_close ( format ! ( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" , counterparty_node_id) , msg. channel_id ) )
4839
4842
}
@@ -4939,21 +4942,20 @@ where
4939
4942
fn internal_revoke_and_ack ( & self , counterparty_node_id : & PublicKey , msg : & msgs:: RevokeAndACK ) -> Result < ( ) , MsgHandleErrInternal > {
4940
4943
let ( htlcs_to_fail, res) = {
4941
4944
let per_peer_state = self . per_peer_state . read ( ) . unwrap ( ) ;
4942
- let peer_state_mutex = per_peer_state. get ( counterparty_node_id)
4945
+ let mut peer_state_lock = per_peer_state. get ( counterparty_node_id)
4943
4946
. ok_or_else ( || {
4944
4947
debug_assert ! ( false ) ;
4945
4948
MsgHandleErrInternal :: send_err_msg_no_close ( format ! ( "Can't find a peer matching the passed counterparty node_id {}" , counterparty_node_id) , msg. channel_id )
4946
- } ) ?;
4947
- let mut peer_state_lock = peer_state_mutex. lock ( ) . unwrap ( ) ;
4949
+ } ) . map ( |mtx| mtx. lock ( ) . unwrap ( ) ) ?;
4948
4950
let peer_state = & mut * peer_state_lock;
4949
4951
match peer_state. channel_by_id . entry ( msg. channel_id ) {
4950
4952
hash_map:: Entry :: Occupied ( mut chan) => {
4951
4953
let funding_txo = chan. get ( ) . get_funding_txo ( ) ;
4952
4954
let ( htlcs_to_fail, monitor_update) = try_chan_entry ! ( self , chan. get_mut( ) . revoke_and_ack( & msg, & self . logger) , chan) ;
4953
4955
let update_res = self . chain_monitor . update_channel ( funding_txo. unwrap ( ) , monitor_update) ;
4954
4956
let update_id = monitor_update. update_id ;
4955
- let res = handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock ,
4956
- peer_state, chan) ;
4957
+ let res = handle_new_monitor_update ! ( self , update_res, update_id,
4958
+ peer_state_lock , peer_state, per_peer_state , chan) ;
4957
4959
( htlcs_to_fail, res)
4958
4960
} ,
4959
4961
hash_map:: Entry :: Vacant ( _) => return Err ( MsgHandleErrInternal :: send_err_msg_no_close ( format ! ( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" , counterparty_node_id) , msg. channel_id ) )
@@ -5210,38 +5212,45 @@ where
5210
5212
let mut has_monitor_update = false ;
5211
5213
let mut failed_htlcs = Vec :: new ( ) ;
5212
5214
let mut handle_errors = Vec :: new ( ) ;
5213
- let per_peer_state = self . per_peer_state . read ( ) . unwrap ( ) ;
5214
5215
5215
- for ( _cp_id, peer_state_mutex) in per_peer_state. iter ( ) {
5216
- ' chan_loop: loop {
5217
- let mut peer_state_lock = peer_state_mutex. lock ( ) . unwrap ( ) ;
5218
- let peer_state: & mut PeerState < _ > = & mut * peer_state_lock;
5219
- for ( channel_id, chan) in peer_state. channel_by_id . iter_mut ( ) {
5220
- let counterparty_node_id = chan. get_counterparty_node_id ( ) ;
5221
- let funding_txo = chan. get_funding_txo ( ) ;
5222
- let ( monitor_opt, holding_cell_failed_htlcs) =
5223
- chan. maybe_free_holding_cell_htlcs ( & self . logger ) ;
5224
- if !holding_cell_failed_htlcs. is_empty ( ) {
5225
- failed_htlcs. push ( ( holding_cell_failed_htlcs, * channel_id, counterparty_node_id) ) ;
5226
- }
5227
- if let Some ( monitor_update) = monitor_opt {
5228
- has_monitor_update = true ;
5229
-
5230
- let update_res = self . chain_monitor . update_channel (
5231
- funding_txo. expect ( "channel is live" ) , monitor_update) ;
5232
- let update_id = monitor_update. update_id ;
5233
- let channel_id: [ u8 ; 32 ] = * channel_id;
5234
- let res = handle_new_monitor_update ! ( self , update_res, update_id,
5235
- peer_state_lock, peer_state, chan, MANUALLY_REMOVING ,
5236
- peer_state. channel_by_id. remove( & channel_id) ) ;
5237
- if res. is_err ( ) {
5238
- handle_errors. push ( ( counterparty_node_id, res) ) ;
5216
+ // Walk our list of channels and find any that need to update. Note that when we do find an
5217
+ // update, if it includes actions that must be taken afterwards, we have to drop the
5218
+ // per-peer state lock as well as the top level per_peer_state lock. Thus, we loop until we
5219
+ // manage to go through all our peers without finding a single channel to update.
5220
+ ' peer_loop: loop {
5221
+ let per_peer_state = self . per_peer_state . read ( ) . unwrap ( ) ;
5222
+ for ( _cp_id, peer_state_mutex) in per_peer_state. iter ( ) {
5223
+ ' chan_loop: loop {
5224
+ let mut peer_state_lock = peer_state_mutex. lock ( ) . unwrap ( ) ;
5225
+ let peer_state: & mut PeerState < _ > = & mut * peer_state_lock;
5226
+ for ( channel_id, chan) in peer_state. channel_by_id . iter_mut ( ) {
5227
+ let counterparty_node_id = chan. get_counterparty_node_id ( ) ;
5228
+ let funding_txo = chan. get_funding_txo ( ) ;
5229
+ let ( monitor_opt, holding_cell_failed_htlcs) =
5230
+ chan. maybe_free_holding_cell_htlcs ( & self . logger ) ;
5231
+ if !holding_cell_failed_htlcs. is_empty ( ) {
5232
+ failed_htlcs. push ( ( holding_cell_failed_htlcs, * channel_id, counterparty_node_id) ) ;
5233
+ }
5234
+ if let Some ( monitor_update) = monitor_opt {
5235
+ has_monitor_update = true ;
5236
+
5237
+ let update_res = self . chain_monitor . update_channel (
5238
+ funding_txo. expect ( "channel is live" ) , monitor_update) ;
5239
+ let update_id = monitor_update. update_id ;
5240
+ let channel_id: [ u8 ; 32 ] = * channel_id;
5241
+ let res = handle_new_monitor_update ! ( self , update_res, update_id,
5242
+ peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING ,
5243
+ peer_state. channel_by_id. remove( & channel_id) ) ;
5244
+ if res. is_err ( ) {
5245
+ handle_errors. push ( ( counterparty_node_id, res) ) ;
5246
+ }
5247
+ continue ' peer_loop;
5239
5248
}
5240
- continue ' chan_loop;
5241
5249
}
5250
+ break ' chan_loop;
5242
5251
}
5243
- break ' chan_loop;
5244
5252
}
5253
+ break ' peer_loop;
5245
5254
}
5246
5255
5247
5256
let has_update = has_monitor_update || !failed_htlcs. is_empty ( ) || !handle_errors. is_empty ( ) ;
0 commit comments