@@ -1855,7 +1855,7 @@ macro_rules! handle_monitor_update_completion {
1855
1855
}
1856
1856
1857
1857
macro_rules! handle_new_monitor_update {
1858
- ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_ALREADY_APPLIED , $remove: expr) => { {
1858
+ ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_INITIAL_MONITOR , $remove: expr) => { {
1859
1859
// update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
1860
1860
// any case so that it won't deadlock.
1861
1861
debug_assert_ne!( $self. id_to_peer. held_by_thread( ) , LockHeldState :: HeldByThread ) ;
@@ -1866,13 +1866,13 @@ macro_rules! handle_new_monitor_update {
1866
1866
ChannelMonitorUpdateStatus :: InProgress => {
1867
1867
log_debug!( $self. logger, "ChannelMonitor update for {} in flight, holding messages until the update completes." ,
1868
1868
log_bytes!( $chan. context. channel_id( ) [ ..] ) ) ;
1869
- Ok ( ( ) )
1869
+ Ok ( false )
1870
1870
} ,
1871
1871
ChannelMonitorUpdateStatus :: PermanentFailure => {
1872
1872
log_error!( $self. logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure" ,
1873
1873
log_bytes!( $chan. context. channel_id( ) [ ..] ) ) ;
1874
1874
update_maps_on_chan_removal!( $self, & $chan. context) ;
1875
- let res: Result < ( ) , _> = Err ( MsgHandleErrInternal :: from_finish_shutdown(
1875
+ let res = Err ( MsgHandleErrInternal :: from_finish_shutdown(
1876
1876
"ChannelMonitor storage failure" . to_owned( ) , $chan. context. channel_id( ) ,
1877
1877
$chan. context. get_user_id( ) , $chan. context. force_shutdown( false ) ,
1878
1878
$self. get_channel_update_for_broadcast( & $chan) . ok( ) ) ) ;
@@ -1884,16 +1884,16 @@ macro_rules! handle_new_monitor_update {
1884
1884
if $chan. no_monitor_updates_pending( ) {
1885
1885
handle_monitor_update_completion!( $self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan) ;
1886
1886
}
1887
- Ok ( ( ) )
1887
+ Ok ( true )
1888
1888
} ,
1889
1889
}
1890
1890
} } ;
1891
- ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, ALREADY_APPLIED ) => {
1892
- handle_new_monitor_update!( $self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry. get_mut( ) , MANUALLY_REMOVING_ALREADY_APPLIED , $chan_entry. remove_entry( ) )
1891
+ ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, INITIAL_MONITOR ) => {
1892
+ handle_new_monitor_update!( $self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry. get_mut( ) , MANUALLY_REMOVING_INITIAL_MONITOR , $chan_entry. remove_entry( ) )
1893
1893
} ;
1894
1894
( $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING , $remove: expr) => { {
1895
1895
let update_res = $self. chain_monitor. update_channel( $funding_txo, & $update) ;
1896
- handle_new_monitor_update!( $self, update_res, $update. update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan, MANUALLY_REMOVING_ALREADY_APPLIED , $remove)
1896
+ handle_new_monitor_update!( $self, update_res, $update. update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan, MANUALLY_REMOVING_INITIAL_MONITOR , $remove)
1897
1897
} } ;
1898
1898
( $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
1899
1899
handle_new_monitor_update!( $self, $funding_txo, $update, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry. get_mut( ) , MANUALLY_REMOVING , $chan_entry. remove_entry( ) )
@@ -2311,7 +2311,8 @@ where
2311
2311
2312
2312
// Update the monitor with the shutdown script if necessary.
2313
2313
if let Some ( monitor_update) = monitor_update_opt. take ( ) {
2314
- break handle_new_monitor_update ! ( self , funding_txo_opt. unwrap( ) , monitor_update, peer_state_lock, peer_state, per_peer_state, chan_entry) ;
2314
+ break handle_new_monitor_update ! ( self , funding_txo_opt. unwrap( ) , monitor_update,
2315
+ peer_state_lock, peer_state, per_peer_state, chan_entry) . map ( |_| ( ) ) ;
2315
2316
}
2316
2317
2317
2318
if chan_entry. get ( ) . is_shutdown ( ) {
@@ -2992,19 +2993,18 @@ where
2992
2993
} , onion_packet, & self . logger ) ;
2993
2994
match break_chan_entry ! ( self , send_res, chan) {
2994
2995
Some ( monitor_update) => {
2995
- let update_id = monitor_update. update_id ;
2996
- let update_res = self . chain_monitor . update_channel ( funding_txo, & monitor_update) ;
2997
- if let Err ( e) = handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan, ALREADY_APPLIED ) {
2998
- break Err ( e) ;
2999
- }
3000
- if update_res == ChannelMonitorUpdateStatus :: InProgress {
3001
- // Note that MonitorUpdateInProgress here indicates (per function
3002
- // docs) that we will resend the commitment update once monitor
3003
- // updating completes. Therefore, we must return an error
3004
- // indicating that it is unsafe to retry the payment wholesale,
3005
- // which we do in the send_payment check for
3006
- // MonitorUpdateInProgress, below.
3007
- return Err ( APIError :: MonitorUpdateInProgress ) ;
2996
+ match handle_new_monitor_update ! ( self , funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
2997
+ Err ( e) => break Err ( e) ,
2998
+ Ok ( false ) => {
2999
+ // Note that MonitorUpdateInProgress here indicates (per function
3000
+ // docs) that we will resend the commitment update once monitor
3001
+ // updating completes. Therefore, we must return an error
3002
+ // indicating that it is unsafe to retry the payment wholesale,
3003
+ // which we do in the send_payment check for
3004
+ // MonitorUpdateInProgress, below.
3005
+ return Err ( APIError :: MonitorUpdateInProgress ) ;
3006
+ } ,
3007
+ Ok ( true ) => { } ,
3008
3008
}
3009
3009
} ,
3010
3010
None => { } ,
@@ -4023,21 +4023,26 @@ where
4023
4023
let _ = self . chain_monitor . update_channel ( funding_txo, & update) ;
4024
4024
} ,
4025
4025
BackgroundEvent :: MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
4026
- let update_res = self . chain_monitor . update_channel ( funding_txo, & update) ;
4027
-
4026
+ let mut updated_chan = false ;
4028
4027
let res = {
4029
4028
let per_peer_state = self . per_peer_state . read ( ) . unwrap ( ) ;
4030
4029
if let Some ( peer_state_mutex) = per_peer_state. get ( & counterparty_node_id) {
4031
4030
let mut peer_state_lock = peer_state_mutex. lock ( ) . unwrap ( ) ;
4032
4031
let peer_state = & mut * peer_state_lock;
4033
4032
match peer_state. channel_by_id . entry ( funding_txo. to_channel_id ( ) ) {
4034
4033
hash_map:: Entry :: Occupied ( mut chan) => {
4035
- handle_new_monitor_update ! ( self , update_res, update. update_id, peer_state_lock, peer_state, per_peer_state, chan, ALREADY_APPLIED )
4034
+ updated_chan = true ;
4035
+ handle_new_monitor_update ! ( self , funding_txo, update,
4036
+ peer_state_lock, peer_state, per_peer_state, chan) . map ( |_| ( ) )
4036
4037
} ,
4037
4038
hash_map:: Entry :: Vacant ( _) => Ok ( ( ) ) ,
4038
4039
}
4039
4040
} else { Ok ( ( ) ) }
4040
4041
} ;
4042
+ if !updated_chan {
4043
+ // TODO: Track this as in-flight even though the channel is closed.
4044
+ let _ = self . chain_monitor . update_channel ( funding_txo, & update) ;
4045
+ }
4041
4046
// TODO: If this channel has since closed, we're likely providing a payment
4042
4047
// preimage update, which we must ensure is durable! We currently don't,
4043
4048
// however, ensure that.
@@ -5155,7 +5160,7 @@ where
5155
5160
5156
5161
let chan = e. insert ( chan) ;
5157
5162
let mut res = handle_new_monitor_update ! ( self , monitor_res, 0 , peer_state_lock, peer_state,
5158
- per_peer_state, chan, MANUALLY_REMOVING_ALREADY_APPLIED ,
5163
+ per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR ,
5159
5164
{ peer_state. channel_by_id. remove( & new_channel_id) } ) ;
5160
5165
5161
5166
// Note that we reply with the new channel_id in error messages if we gave up on the
@@ -5168,7 +5173,7 @@ where
5168
5173
if let Err ( MsgHandleErrInternal { shutdown_finish : Some ( ( res, _) ) , .. } ) = & mut res {
5169
5174
res. 0 = None ;
5170
5175
}
5171
- res
5176
+ res. map ( |_| ( ) )
5172
5177
}
5173
5178
}
5174
5179
}
@@ -5189,7 +5194,7 @@ where
5189
5194
let monitor = try_chan_entry ! ( self ,
5190
5195
chan. get_mut( ) . funding_signed( & msg, best_block, & self . signer_provider, & self . logger) , chan) ;
5191
5196
let update_res = self . chain_monitor . watch_channel ( chan. get ( ) . context . get_funding_txo ( ) . unwrap ( ) , monitor) ;
5192
- let mut res = handle_new_monitor_update ! ( self , update_res, 0 , peer_state_lock, peer_state, per_peer_state, chan, ALREADY_APPLIED ) ;
5197
+ let mut res = handle_new_monitor_update ! ( self , update_res, 0 , peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR ) ;
5193
5198
if let Err ( MsgHandleErrInternal { ref mut shutdown_finish, .. } ) = res {
5194
5199
// We weren't able to watch the channel to begin with, so no updates should be made on
5195
5200
// it. Previously, full_stack_target found an (unreachable) panic when the
@@ -5198,7 +5203,7 @@ where
5198
5203
shutdown_finish. 0 . take ( ) ;
5199
5204
}
5200
5205
}
5201
- res
5206
+ res. map ( |_| ( ) )
5202
5207
} ,
5203
5208
hash_map:: Entry :: Vacant ( _) => return Err ( MsgHandleErrInternal :: send_err_msg_no_close ( "Failed to find corresponding channel" . to_owned ( ) , msg. channel_id ) )
5204
5209
}
@@ -5286,7 +5291,8 @@ where
5286
5291
5287
5292
// Update the monitor with the shutdown script if necessary.
5288
5293
if let Some ( monitor_update) = monitor_update_opt {
5289
- break handle_new_monitor_update ! ( self , funding_txo_opt. unwrap( ) , monitor_update, peer_state_lock, peer_state, per_peer_state, chan_entry) ;
5294
+ break handle_new_monitor_update ! ( self , funding_txo_opt. unwrap( ) , monitor_update,
5295
+ peer_state_lock, peer_state, per_peer_state, chan_entry) . map ( |_| ( ) ) ;
5290
5296
}
5291
5297
break Ok ( ( ) ) ;
5292
5298
} ,
@@ -5477,7 +5483,7 @@ where
5477
5483
let monitor_update_opt = try_chan_entry ! ( self , chan. get_mut( ) . commitment_signed( & msg, & self . logger) , chan) ;
5478
5484
if let Some ( monitor_update) = monitor_update_opt {
5479
5485
handle_new_monitor_update ! ( self , funding_txo. unwrap( ) , monitor_update, peer_state_lock,
5480
- peer_state, per_peer_state, chan)
5486
+ peer_state, per_peer_state, chan) . map ( |_| ( ) )
5481
5487
} else { Ok ( ( ) ) }
5482
5488
} ,
5483
5489
hash_map:: Entry :: Vacant ( _) => return Err ( MsgHandleErrInternal :: send_err_msg_no_close ( format ! ( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" , counterparty_node_id) , msg. channel_id ) )
@@ -5614,7 +5620,7 @@ where
5614
5620
let ( htlcs_to_fail, monitor_update_opt) = try_chan_entry ! ( self , chan. get_mut( ) . revoke_and_ack( & msg, & self . logger) , chan) ;
5615
5621
let res = if let Some ( monitor_update) = monitor_update_opt {
5616
5622
handle_new_monitor_update ! ( self , funding_txo. unwrap( ) , monitor_update,
5617
- peer_state_lock, peer_state, per_peer_state, chan)
5623
+ peer_state_lock, peer_state, per_peer_state, chan) . map ( |_| ( ) )
5618
5624
} else { Ok ( ( ) ) } ;
5619
5625
( htlcs_to_fail, res)
5620
5626
} ,
0 commit comments