@@ -628,6 +628,13 @@ pub(super) struct PeerState<Signer: ChannelSigner> {
628
628
/// Messages to send to the peer - pushed to in the same lock that they are generated in (except
629
629
/// for broadcast messages, where ordering isn't as strict).
630
630
pub ( super ) pending_msg_events : Vec < MessageSendEvent > ,
631
+ /// Map from Channel IDs to pending [`ChannelMonitorUpdate`]s which have been passed to the
632
+ /// user but which have not yet completed.
633
+ ///
634
+ /// Note that the channel may no longer exist. For example if the channel was closed but we
635
+ /// later needed to claim an HTLC which is pending on-chain, we may generate a monitor update
636
+ /// for a missing channel.
637
+ in_flight_monitor_updates : BTreeMap < OutPoint , Vec < ChannelMonitorUpdate > > ,
631
638
/// Map from a specific channel to some action(s) that should be taken when all pending
632
639
/// [`ChannelMonitorUpdate`]s for the channel complete updating.
633
640
///
@@ -663,6 +670,7 @@ impl <Signer: ChannelSigner> PeerState<Signer> {
663
670
return false
664
671
}
665
672
self . channel_by_id . is_empty ( ) && self . monitor_update_blocked_actions . is_empty ( )
673
+ && self . in_flight_monitor_updates . is_empty ( )
666
674
}
667
675
668
676
// Returns a count of all channels we have with this peer, including pending channels.
@@ -1855,7 +1863,7 @@ macro_rules! handle_monitor_update_completion {
1855
1863
}
1856
1864
1857
1865
macro_rules! handle_new_monitor_update {
1858
- ( $self: ident, $update_res: expr, $update_id : expr , $ peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_INITIAL_MONITOR , $remove: expr) => { {
1866
+ ( $self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, _internal , $remove: expr , $completed : expr) => { {
1859
1867
// update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
1860
1868
// any case so that it won't deadlock.
1861
1869
debug_assert_ne!( $self. id_to_peer. held_by_thread( ) , LockHeldState :: HeldByThread ) ;
@@ -1880,20 +1888,33 @@ macro_rules! handle_new_monitor_update {
1880
1888
res
1881
1889
} ,
1882
1890
ChannelMonitorUpdateStatus :: Completed => {
1883
- $chan. complete_one_mon_update( $update_id) ;
1884
- if $chan. no_monitor_updates_pending( ) {
1885
- handle_monitor_update_completion!( $self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan) ;
1886
- }
1891
+ $completed;
1887
1892
Ok ( true )
1888
1893
} ,
1889
1894
}
1890
1895
} } ;
1896
+ ( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_INITIAL_MONITOR , $remove: expr) => {
1897
+ handle_new_monitor_update!( $self, $update_res, $peer_state_lock, $peer_state,
1898
+ $per_peer_state_lock, $chan, _internal, $remove,
1899
+ handle_monitor_update_completion!( $self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan) )
1900
+ } ;
1891
1901
( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, INITIAL_MONITOR ) => {
1892
1902
handle_new_monitor_update!( $self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry. get_mut( ) , MANUALLY_REMOVING_INITIAL_MONITOR , $chan_entry. remove_entry( ) )
1893
1903
} ;
1894
1904
( $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING , $remove: expr) => { {
1895
- let update_res = $self. chain_monitor. update_channel( $funding_txo, & $update) ;
1896
- handle_new_monitor_update!( $self, update_res, $update. update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan, MANUALLY_REMOVING_INITIAL_MONITOR , $remove)
1905
+ let update_id = $update. update_id;
1906
+ let in_flight_updates = $peer_state. in_flight_monitor_updates. entry( $funding_txo)
1907
+ . or_insert_with( Vec :: new) ;
1908
+ in_flight_updates. push( $update) ;
1909
+ let update_res = $self. chain_monitor. update_channel( $funding_txo, in_flight_updates. last( ) . unwrap( ) ) ;
1910
+ handle_new_monitor_update!( $self, update_res, $peer_state_lock, $peer_state,
1911
+ $per_peer_state_lock, $chan, _internal, $remove,
1912
+ {
1913
+ in_flight_updates. pop( ) ;
1914
+ if in_flight_updates. is_empty( ) && $chan. blocked_monitor_updates_pending( ) == 0 {
1915
+ handle_monitor_update_completion!( $self, update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan) ;
1916
+ }
1917
+ } )
1897
1918
} } ;
1898
1919
( $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
1899
1920
handle_new_monitor_update!( $self, $funding_txo, $update, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry. get_mut( ) , MANUALLY_REMOVING , $chan_entry. remove_entry( ) )
@@ -4032,15 +4053,14 @@ where
4032
4053
match peer_state. channel_by_id . entry ( funding_txo. to_channel_id ( ) ) {
4033
4054
hash_map:: Entry :: Occupied ( mut chan) => {
4034
4055
updated_chan = true ;
4035
- handle_new_monitor_update ! ( self , funding_txo, update,
4056
+ handle_new_monitor_update ! ( self , funding_txo, update. clone ( ) ,
4036
4057
peer_state_lock, peer_state, per_peer_state, chan) . map ( |_| ( ) )
4037
4058
} ,
4038
4059
hash_map:: Entry :: Vacant ( _) => Ok ( ( ) ) ,
4039
4060
}
4040
4061
} else { Ok ( ( ) ) }
4041
4062
} ;
4042
4063
if !updated_chan {
4043
- // TODO: Track this as in-flight even though the channel is closed.
4044
4064
let _ = self . chain_monitor . update_channel ( funding_txo, & update) ;
4045
4065
}
4046
4066
// TODO: If this channel has since closed, we're likely providing a payment
@@ -4831,8 +4851,14 @@ where
4831
4851
hash_map:: Entry :: Vacant ( _) => return ,
4832
4852
}
4833
4853
} ;
4834
- log_trace ! ( self . logger, "ChannelMonitor updated to {}. Current highest is {}" ,
4835
- highest_applied_update_id, channel. get( ) . context. get_latest_monitor_update_id( ) ) ;
4854
+ let remaining_in_flight =
4855
+ if let Some ( pending) = peer_state. in_flight_monitor_updates . get_mut ( funding_txo) {
4856
+ pending. retain ( |upd| upd. update_id > highest_applied_update_id) ;
4857
+ pending. len ( )
4858
+ } else { 0 } ;
4859
+ log_trace ! ( self . logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates." ,
4860
+ highest_applied_update_id, channel. get( ) . context. get_latest_monitor_update_id( ) ,
4861
+ remaining_in_flight) ;
4836
4862
if !channel. get ( ) . is_awaiting_monitor_update ( ) || channel. get ( ) . context . get_latest_monitor_update_id ( ) != highest_applied_update_id {
4837
4863
return ;
4838
4864
}
@@ -6959,6 +6985,7 @@ where
6959
6985
inbound_v1_channel_by_id : HashMap :: new ( ) ,
6960
6986
latest_features : init_msg. features . clone ( ) ,
6961
6987
pending_msg_events : Vec :: new ( ) ,
6988
+ in_flight_monitor_updates : BTreeMap :: new ( ) ,
6962
6989
monitor_update_blocked_actions : BTreeMap :: new ( ) ,
6963
6990
actions_blocking_raa_monitor_updates : BTreeMap :: new ( ) ,
6964
6991
is_connected : true ,
@@ -7792,6 +7819,16 @@ where
7792
7819
pending_claiming_payments = None ;
7793
7820
}
7794
7821
7822
+ let mut in_flight_monitor_updates: Option < HashMap < ( & PublicKey , & OutPoint ) , & Vec < ChannelMonitorUpdate > > > = None ;
7823
+ for ( ( counterparty_id, _) , peer_state) in per_peer_state. iter ( ) . zip ( peer_states. iter ( ) ) {
7824
+ for ( funding_outpoint, updates) in peer_state. in_flight_monitor_updates . iter ( ) {
7825
+ if !updates. is_empty ( ) {
7826
+ if in_flight_monitor_updates. is_none ( ) { in_flight_monitor_updates = Some ( HashMap :: new ( ) ) ; }
7827
+ in_flight_monitor_updates. as_mut ( ) . unwrap ( ) . insert ( ( counterparty_id, funding_outpoint) , updates) ;
7828
+ }
7829
+ }
7830
+ }
7831
+
7795
7832
write_tlv_fields ! ( writer, {
7796
7833
( 1 , pending_outbound_payments_no_retry, required) ,
7797
7834
( 2 , pending_intercepted_htlcs, option) ,
@@ -7802,6 +7839,7 @@ where
7802
7839
( 7 , self . fake_scid_rand_bytes, required) ,
7803
7840
( 8 , if events_not_backwards_compatible { Some ( & * events) } else { None } , option) ,
7804
7841
( 9 , htlc_purposes, vec_type) ,
7842
+ ( 10 , in_flight_monitor_updates, option) ,
7805
7843
( 11 , self . probing_cookie_secret, required) ,
7806
7844
( 13 , htlc_onion_fields, optional_vec) ,
7807
7845
} ) ;
@@ -8080,7 +8118,6 @@ where
8080
8118
log_info ! ( args. logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}" ,
8081
8119
log_bytes!( channel. context. channel_id( ) ) , channel. context. get_latest_monitor_update_id( ) ,
8082
8120
monitor. get_latest_update_id( ) ) ;
8083
- channel. complete_all_mon_updates_through ( monitor. get_latest_update_id ( ) ) ;
8084
8121
if let Some ( short_channel_id) = channel. context . get_short_channel_id ( ) {
8085
8122
short_to_chan_info. insert ( short_channel_id, ( channel. context . get_counterparty_node_id ( ) , channel. context . channel_id ( ) ) ) ;
8086
8123
}
@@ -8166,6 +8203,7 @@ where
8166
8203
inbound_v1_channel_by_id : HashMap :: new ( ) ,
8167
8204
latest_features : Readable :: read ( reader) ?,
8168
8205
pending_msg_events : Vec :: new ( ) ,
8206
+ in_flight_monitor_updates : BTreeMap :: new ( ) ,
8169
8207
monitor_update_blocked_actions : BTreeMap :: new ( ) ,
8170
8208
actions_blocking_raa_monitor_updates : BTreeMap :: new ( ) ,
8171
8209
is_connected : false ,
@@ -8197,24 +8235,6 @@ where
8197
8235
}
8198
8236
}
8199
8237
8200
- for ( node_id, peer_mtx) in per_peer_state. iter ( ) {
8201
- let peer_state = peer_mtx. lock ( ) . unwrap ( ) ;
8202
- for ( _, chan) in peer_state. channel_by_id . iter ( ) {
8203
- for update in chan. uncompleted_unblocked_mon_updates ( ) {
8204
- if let Some ( funding_txo) = chan. context . get_funding_txo ( ) {
8205
- log_trace ! ( args. logger, "Replaying ChannelMonitorUpdate {} for channel {}" ,
8206
- update. update_id, log_bytes!( funding_txo. to_channel_id( ) ) ) ;
8207
- pending_background_events. push (
8208
- BackgroundEvent :: MonitorUpdateRegeneratedOnStartup {
8209
- counterparty_node_id : * node_id, funding_txo, update : update. clone ( ) ,
8210
- } ) ;
8211
- } else {
8212
- return Err ( DecodeError :: InvalidValue ) ;
8213
- }
8214
- }
8215
- }
8216
- }
8217
-
8218
8238
let _last_node_announcement_serial: u32 = Readable :: read ( reader) ?; // Only used < 0.0.111
8219
8239
let highest_seen_timestamp: u32 = Readable :: read ( reader) ?;
8220
8240
@@ -8251,6 +8271,7 @@ where
8251
8271
let mut pending_claiming_payments = Some ( HashMap :: new ( ) ) ;
8252
8272
let mut monitor_update_blocked_actions_per_peer: Option < Vec < ( _ , BTreeMap < _ , Vec < _ > > ) > > = Some ( Vec :: new ( ) ) ;
8253
8273
let mut events_override = None ;
8274
+ let mut in_flight_monitor_updates: Option < HashMap < ( PublicKey , OutPoint ) , Vec < ChannelMonitorUpdate > > > = None ;
8254
8275
read_tlv_fields ! ( reader, {
8255
8276
( 1 , pending_outbound_payments_no_retry, option) ,
8256
8277
( 2 , pending_intercepted_htlcs, option) ,
@@ -8261,6 +8282,7 @@ where
8261
8282
( 7 , fake_scid_rand_bytes, option) ,
8262
8283
( 8 , events_override, option) ,
8263
8284
( 9 , claimable_htlc_purposes, vec_type) ,
8285
+ ( 10 , in_flight_monitor_updates, option) ,
8264
8286
( 11 , probing_cookie_secret, option) ,
8265
8287
( 13 , claimable_htlc_onion_fields, optional_vec) ,
8266
8288
} ) ;
@@ -8294,6 +8316,31 @@ where
8294
8316
retry_lock : Mutex :: new ( ( ) )
8295
8317
} ;
8296
8318
8319
+ if let Some ( in_flight_upds) = & mut in_flight_monitor_updates {
8320
+ for ( counterparty_id, peer_state_mtx) in per_peer_state. iter_mut ( ) {
8321
+ let mut peer_state_lock = peer_state_mtx. lock ( ) . unwrap ( ) ;
8322
+ let peer_state = & mut * peer_state_lock;
8323
+ for ( _, chan) in peer_state. channel_by_id . iter ( ) {
8324
+ if let Some ( funding_txo) = chan. context . get_funding_txo ( ) {
8325
+ if let Some ( mut peer_in_flight_upds) = in_flight_upds. remove ( & ( * counterparty_id, funding_txo) ) {
8326
+ let monitor = args. channel_monitors . get ( & funding_txo) . unwrap ( ) ;
8327
+ peer_in_flight_upds. retain ( |upd| upd. update_id > monitor. get_latest_update_id ( ) ) ;
8328
+ for update in peer_in_flight_upds. iter ( ) {
8329
+ log_trace ! ( args. logger, "Replaying ChannelMonitorUpdate {} for channel {}" ,
8330
+ update. update_id, log_bytes!( funding_txo. to_channel_id( ) ) ) ;
8331
+ pending_background_events. push (
8332
+ BackgroundEvent :: MonitorUpdateRegeneratedOnStartup {
8333
+ counterparty_node_id : * counterparty_id,
8334
+ funding_txo, update : update. clone ( ) ,
8335
+ } ) ;
8336
+ }
8337
+ peer_state. in_flight_monitor_updates . insert ( funding_txo, peer_in_flight_upds) ;
8338
+ }
8339
+ } else { debug_assert ! ( false , "We already loaded a channel, it has to have been funded" ) ; }
8340
+ }
8341
+ }
8342
+ }
8343
+
8297
8344
{
8298
8345
// If we're tracking pending payments, ensure we haven't lost any by looking at the
8299
8346
// ChannelMonitor data for any channels for which we do not have authorative state
0 commit comments