@@ -1860,7 +1860,7 @@ macro_rules! handle_monitor_update_completion {
1860
1860
}
1861
1861
1862
1862
macro_rules! handle_new_monitor_update {
1863
- ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_ALREADY_APPLIED , $remove: expr) => { {
1863
+ ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_INITIAL_MONITOR , $remove: expr) => { {
1864
1864
// update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
1865
1865
// any case so that it won't deadlock.
1866
1866
debug_assert_ne!( $self. id_to_peer. held_by_thread( ) , LockHeldState :: HeldByThread ) ;
@@ -1871,13 +1871,13 @@ macro_rules! handle_new_monitor_update {
1871
1871
ChannelMonitorUpdateStatus :: InProgress => {
1872
1872
log_debug!( $self. logger, "ChannelMonitor update for {} in flight, holding messages until the update completes." ,
1873
1873
log_bytes!( $chan. context. channel_id( ) [ ..] ) ) ;
1874
- Ok ( ( ) )
1874
+ Ok ( false )
1875
1875
} ,
1876
1876
ChannelMonitorUpdateStatus :: PermanentFailure => {
1877
1877
log_error!( $self. logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure" ,
1878
1878
log_bytes!( $chan. context. channel_id( ) [ ..] ) ) ;
1879
1879
update_maps_on_chan_removal!( $self, & $chan. context) ;
1880
- let res: Result < ( ) , _> = Err ( MsgHandleErrInternal :: from_finish_shutdown(
1880
+ let res = Err ( MsgHandleErrInternal :: from_finish_shutdown(
1881
1881
"ChannelMonitor storage failure" . to_owned( ) , $chan. context. channel_id( ) ,
1882
1882
$chan. context. get_user_id( ) , $chan. context. force_shutdown( false ) ,
1883
1883
$self. get_channel_update_for_broadcast( & $chan) . ok( ) ) ) ;
@@ -1889,16 +1889,16 @@ macro_rules! handle_new_monitor_update {
1889
1889
if $chan. no_monitor_updates_pending( ) {
1890
1890
handle_monitor_update_completion!( $self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan) ;
1891
1891
}
1892
- Ok ( ( ) )
1892
+ Ok ( true )
1893
1893
} ,
1894
1894
}
1895
1895
} } ;
1896
- ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, ALREADY_APPLIED ) => {
1897
- handle_new_monitor_update!( $self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry. get_mut( ) , MANUALLY_REMOVING_ALREADY_APPLIED , $chan_entry. remove_entry( ) )
1896
+ ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, INITIAL_MONITOR ) => {
1897
+ handle_new_monitor_update!( $self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry. get_mut( ) , MANUALLY_REMOVING_INITIAL_MONITOR , $chan_entry. remove_entry( ) )
1898
1898
} ;
1899
1899
( $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING , $remove: expr) => { {
1900
1900
let update_res = $self. chain_monitor. update_channel( $funding_txo, & $update) ;
1901
- handle_new_monitor_update!( $self, update_res, $update. update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan, MANUALLY_REMOVING_ALREADY_APPLIED , $remove)
1901
+ handle_new_monitor_update!( $self, update_res, $update. update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan, MANUALLY_REMOVING_INITIAL_MONITOR , $remove)
1902
1902
} } ;
1903
1903
( $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
1904
1904
handle_new_monitor_update!( $self, $funding_txo, $update, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry. get_mut( ) , MANUALLY_REMOVING , $chan_entry. remove_entry( ) )
@@ -2316,7 +2316,8 @@ where
2316
2316
2317
2317
// Update the monitor with the shutdown script if necessary.
2318
2318
if let Some ( monitor_update) = monitor_update_opt. take ( ) {
2319
- break handle_new_monitor_update ! ( self , funding_txo_opt. unwrap( ) , monitor_update, peer_state_lock, peer_state, per_peer_state, chan_entry) ;
2319
+ break handle_new_monitor_update ! ( self , funding_txo_opt. unwrap( ) , monitor_update,
2320
+ peer_state_lock, peer_state, per_peer_state, chan_entry) . map ( |_| ( ) ) ;
2320
2321
}
2321
2322
2322
2323
if chan_entry. get ( ) . is_shutdown ( ) {
@@ -3040,19 +3041,18 @@ where
3040
3041
} , onion_packet, None , & self . logger ) ;
3041
3042
match break_chan_entry ! ( self , send_res, chan) {
3042
3043
Some ( monitor_update) => {
3043
- let update_id = monitor_update. update_id ;
3044
- let update_res = self . chain_monitor . update_channel ( funding_txo, & monitor_update) ;
3045
- if let Err ( e) = handle_new_monitor_update ! ( self , update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan, ALREADY_APPLIED ) {
3046
- break Err ( e) ;
3047
- }
3048
- if update_res == ChannelMonitorUpdateStatus :: InProgress {
3049
- // Note that MonitorUpdateInProgress here indicates (per function
3050
- // docs) that we will resend the commitment update once monitor
3051
- // updating completes. Therefore, we must return an error
3052
- // indicating that it is unsafe to retry the payment wholesale,
3053
- // which we do in the send_payment check for
3054
- // MonitorUpdateInProgress, below.
3055
- return Err ( APIError :: MonitorUpdateInProgress ) ;
3044
+ match handle_new_monitor_update ! ( self , funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
3045
+ Err ( e) => break Err ( e) ,
3046
+ Ok ( false ) => {
3047
+ // Note that MonitorUpdateInProgress here indicates (per function
3048
+ // docs) that we will resend the commitment update once monitor
3049
+ // updating completes. Therefore, we must return an error
3050
+ // indicating that it is unsafe to retry the payment wholesale,
3051
+ // which we do in the send_payment check for
3052
+ // MonitorUpdateInProgress, below.
3053
+ return Err ( APIError :: MonitorUpdateInProgress ) ;
3054
+ } ,
3055
+ Ok ( true ) => { } ,
3056
3056
}
3057
3057
} ,
3058
3058
None => { } ,
@@ -4087,21 +4087,26 @@ where
4087
4087
let _ = self . chain_monitor . update_channel ( funding_txo, & update) ;
4088
4088
} ,
4089
4089
BackgroundEvent :: MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
4090
- let update_res = self . chain_monitor . update_channel ( funding_txo, & update) ;
4091
-
4090
+ let mut updated_chan = false ;
4092
4091
let res = {
4093
4092
let per_peer_state = self . per_peer_state . read ( ) . unwrap ( ) ;
4094
4093
if let Some ( peer_state_mutex) = per_peer_state. get ( & counterparty_node_id) {
4095
4094
let mut peer_state_lock = peer_state_mutex. lock ( ) . unwrap ( ) ;
4096
4095
let peer_state = & mut * peer_state_lock;
4097
4096
match peer_state. channel_by_id . entry ( funding_txo. to_channel_id ( ) ) {
4098
4097
hash_map:: Entry :: Occupied ( mut chan) => {
4099
- handle_new_monitor_update ! ( self , update_res, update. update_id, peer_state_lock, peer_state, per_peer_state, chan, ALREADY_APPLIED )
4098
+ updated_chan = true ;
4099
+ handle_new_monitor_update ! ( self , funding_txo, update,
4100
+ peer_state_lock, peer_state, per_peer_state, chan) . map ( |_| ( ) )
4100
4101
} ,
4101
4102
hash_map:: Entry :: Vacant ( _) => Ok ( ( ) ) ,
4102
4103
}
4103
4104
} else { Ok ( ( ) ) }
4104
4105
} ;
4106
+ if !updated_chan {
4107
+ // TODO: Track this as in-flight even though the channel is closed.
4108
+ let _ = self . chain_monitor . update_channel ( funding_txo, & update) ;
4109
+ }
4105
4110
// TODO: If this channel has since closed, we're likely providing a payment
4106
4111
// preimage update, which we must ensure is durable! We currently don't,
4107
4112
// however, ensure that.
@@ -5219,7 +5224,7 @@ where
5219
5224
5220
5225
let chan = e. insert ( chan) ;
5221
5226
let mut res = handle_new_monitor_update ! ( self , monitor_res, 0 , peer_state_lock, peer_state,
5222
- per_peer_state, chan, MANUALLY_REMOVING_ALREADY_APPLIED ,
5227
+ per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR ,
5223
5228
{ peer_state. channel_by_id. remove( & new_channel_id) } ) ;
5224
5229
5225
5230
// Note that we reply with the new channel_id in error messages if we gave up on the
@@ -5232,7 +5237,7 @@ where
5232
5237
if let Err ( MsgHandleErrInternal { shutdown_finish : Some ( ( res, _) ) , .. } ) = & mut res {
5233
5238
res. 0 = None ;
5234
5239
}
5235
- res
5240
+ res. map ( |_| ( ) )
5236
5241
}
5237
5242
}
5238
5243
}
@@ -5253,7 +5258,7 @@ where
5253
5258
let monitor = try_chan_entry ! ( self ,
5254
5259
chan. get_mut( ) . funding_signed( & msg, best_block, & self . signer_provider, & self . logger) , chan) ;
5255
5260
let update_res = self . chain_monitor . watch_channel ( chan. get ( ) . context . get_funding_txo ( ) . unwrap ( ) , monitor) ;
5256
- let mut res = handle_new_monitor_update ! ( self , update_res, 0 , peer_state_lock, peer_state, per_peer_state, chan, ALREADY_APPLIED ) ;
5261
+ let mut res = handle_new_monitor_update ! ( self , update_res, 0 , peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR ) ;
5257
5262
if let Err ( MsgHandleErrInternal { ref mut shutdown_finish, .. } ) = res {
5258
5263
// We weren't able to watch the channel to begin with, so no updates should be made on
5259
5264
// it. Previously, full_stack_target found an (unreachable) panic when the
@@ -5262,7 +5267,7 @@ where
5262
5267
shutdown_finish. 0 . take ( ) ;
5263
5268
}
5264
5269
}
5265
- res
5270
+ res. map ( |_| ( ) )
5266
5271
} ,
5267
5272
hash_map:: Entry :: Vacant ( _) => return Err ( MsgHandleErrInternal :: send_err_msg_no_close ( "Failed to find corresponding channel" . to_owned ( ) , msg. channel_id ) )
5268
5273
}
@@ -5350,7 +5355,8 @@ where
5350
5355
5351
5356
// Update the monitor with the shutdown script if necessary.
5352
5357
if let Some ( monitor_update) = monitor_update_opt {
5353
- break handle_new_monitor_update ! ( self , funding_txo_opt. unwrap( ) , monitor_update, peer_state_lock, peer_state, per_peer_state, chan_entry) ;
5358
+ break handle_new_monitor_update ! ( self , funding_txo_opt. unwrap( ) , monitor_update,
5359
+ peer_state_lock, peer_state, per_peer_state, chan_entry) . map ( |_| ( ) ) ;
5354
5360
}
5355
5361
break Ok ( ( ) ) ;
5356
5362
} ,
@@ -5547,7 +5553,7 @@ where
5547
5553
let monitor_update_opt = try_chan_entry ! ( self , chan. get_mut( ) . commitment_signed( & msg, & self . logger) , chan) ;
5548
5554
if let Some ( monitor_update) = monitor_update_opt {
5549
5555
handle_new_monitor_update ! ( self , funding_txo. unwrap( ) , monitor_update, peer_state_lock,
5550
- peer_state, per_peer_state, chan)
5556
+ peer_state, per_peer_state, chan) . map ( |_| ( ) )
5551
5557
} else { Ok ( ( ) ) }
5552
5558
} ,
5553
5559
hash_map:: Entry :: Vacant ( _) => return Err ( MsgHandleErrInternal :: send_err_msg_no_close ( format ! ( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" , counterparty_node_id) , msg. channel_id ) )
@@ -5684,7 +5690,7 @@ where
5684
5690
let ( htlcs_to_fail, monitor_update_opt) = try_chan_entry ! ( self , chan. get_mut( ) . revoke_and_ack( & msg, & self . logger) , chan) ;
5685
5691
let res = if let Some ( monitor_update) = monitor_update_opt {
5686
5692
handle_new_monitor_update ! ( self , funding_txo. unwrap( ) , monitor_update,
5687
- peer_state_lock, peer_state, per_peer_state, chan)
5693
+ peer_state_lock, peer_state, per_peer_state, chan) . map ( |_| ( ) )
5688
5694
} else { Ok ( ( ) ) } ;
5689
5695
( htlcs_to_fail, res)
5690
5696
} ,
0 commit comments