@@ -1819,7 +1819,7 @@ macro_rules! emit_channel_ready_event {
1819
1819
}
1820
1820
1821
1821
macro_rules! handle_monitor_update_completion {
1822
- ( $self: ident, $update_id : expr , $ peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
1822
+ ( $self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
1823
1823
let mut updates = $chan. monitor_updating_restored( & $self. logger,
1824
1824
& $self. node_signer, $self. genesis_hash, & $self. default_configuration,
1825
1825
$self. best_block. read( ) . unwrap( ) . height( ) ) ;
@@ -1898,16 +1898,15 @@ macro_rules! handle_new_monitor_update {
1898
1898
} ,
1899
1899
}
1900
1900
} } ;
1901
- ( $self: ident, $update_res: expr, $update_id : expr , $ peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_INITIAL_MONITOR , $remove: expr) => {
1901
+ ( $self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_INITIAL_MONITOR , $remove: expr) => {
1902
1902
handle_new_monitor_update!( $self, $update_res, $peer_state_lock, $peer_state,
1903
1903
$per_peer_state_lock, $chan, _internal, $remove,
1904
- handle_monitor_update_completion!( $self, $update_id , $ peer_state_lock, $peer_state, $per_peer_state_lock, $chan) )
1904
+ handle_monitor_update_completion!( $self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan) )
1905
1905
} ;
1906
- ( $self: ident, $update_res: expr, $update_id : expr , $ peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, INITIAL_MONITOR ) => {
1907
- handle_new_monitor_update!( $self, $update_res, $update_id , $ peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry. get_mut( ) , MANUALLY_REMOVING_INITIAL_MONITOR , $chan_entry. remove_entry( ) )
1906
+ ( $self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, INITIAL_MONITOR ) => {
1907
+ handle_new_monitor_update!( $self, $update_res, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry. get_mut( ) , MANUALLY_REMOVING_INITIAL_MONITOR , $chan_entry. remove_entry( ) )
1908
1908
} ;
1909
1909
( $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING , $remove: expr) => { {
1910
- let update_id = $update. update_id;
1911
1910
let in_flight_updates = $peer_state. in_flight_monitor_updates. entry( $funding_txo)
1912
1911
. or_insert_with( Vec :: new) ;
1913
1912
// During startup, we push monitor updates as background events through to here in
@@ -1924,7 +1923,7 @@ macro_rules! handle_new_monitor_update {
1924
1923
{
1925
1924
in_flight_updates. pop( ) ;
1926
1925
if in_flight_updates. is_empty( ) && $chan. blocked_monitor_updates_pending( ) == 0 {
1927
- handle_monitor_update_completion!( $self, update_id , $peer_state_lock, $peer_state, $per_peer_state_lock, $chan) ;
1926
+ handle_monitor_update_completion!( $self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan) ;
1928
1927
}
1929
1928
} )
1930
1929
} } ;
@@ -4934,7 +4933,7 @@ where
4934
4933
if !channel. get ( ) . is_awaiting_monitor_update ( ) || channel. get ( ) . context . get_latest_monitor_update_id ( ) != highest_applied_update_id {
4935
4934
return ;
4936
4935
}
4937
- handle_monitor_update_completion ! ( self , highest_applied_update_id , peer_state_lock, peer_state, per_peer_state, channel. get_mut( ) ) ;
4936
+ handle_monitor_update_completion ! ( self , peer_state_lock, peer_state, per_peer_state, channel. get_mut( ) ) ;
4938
4937
}
4939
4938
4940
4939
/// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
@@ -5257,7 +5256,7 @@ where
5257
5256
let monitor_res = self . chain_monitor . watch_channel ( monitor. get_funding_txo ( ) . 0 , monitor) ;
5258
5257
5259
5258
let chan = e. insert ( chan) ;
5260
- let mut res = handle_new_monitor_update ! ( self , monitor_res, 0 , peer_state_lock, peer_state,
5259
+ let mut res = handle_new_monitor_update ! ( self , monitor_res, peer_state_lock, peer_state,
5261
5260
per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR ,
5262
5261
{ peer_state. channel_by_id. remove( & new_channel_id) } ) ;
5263
5262
@@ -5292,7 +5291,7 @@ where
5292
5291
let monitor = try_chan_entry ! ( self ,
5293
5292
chan. get_mut( ) . funding_signed( & msg, best_block, & self . signer_provider, & self . logger) , chan) ;
5294
5293
let update_res = self . chain_monitor . watch_channel ( chan. get ( ) . context . get_funding_txo ( ) . unwrap ( ) , monitor) ;
5295
- let mut res = handle_new_monitor_update ! ( self , update_res, 0 , peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR ) ;
5294
+ let mut res = handle_new_monitor_update ! ( self , update_res, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR ) ;
5296
5295
if let Err ( MsgHandleErrInternal { ref mut shutdown_finish, .. } ) = res {
5297
5296
// We weren't able to watch the channel to begin with, so no updates should be made on
5298
5297
// it. Previously, full_stack_target found an (unreachable) panic when the
0 commit comments