@@ -3923,11 +3923,10 @@ where
3923
3923
}
3924
3924
3925
3925
/// Applies a [`ChannelMonitorUpdate`] which may or may not be for a channel which is closed.
3926
- #[must_use]
3927
3926
fn apply_post_close_monitor_update(
3928
3927
&self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
3929
3928
monitor_update: ChannelMonitorUpdate,
3930
- ) -> ChannelMonitorUpdateStatus {
3929
+ ) {
3931
3930
// Note that there may be some post-close updates which need to be well-ordered with
3932
3931
// respect to the `update_id`, so we hold the `peer_state` lock here.
3933
3932
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -3938,16 +3937,21 @@ where
3938
3937
match peer_state.channel_by_id.entry(channel_id) {
3939
3938
hash_map::Entry::Occupied(mut chan_phase) => {
3940
3939
if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
3941
- let completed = handle_new_monitor_update!(self, funding_txo,
3940
+ handle_new_monitor_update!(self, funding_txo,
3942
3941
monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
3943
- return if completed { ChannelMonitorUpdateStatus::Completed } else { ChannelMonitorUpdateStatus::InProgress } ;
3942
+ return;
3944
3943
} else {
3945
3944
debug_assert!(false, "We shouldn't have an update for a non-funded channel");
3946
3945
}
3947
3946
},
3948
3947
hash_map::Entry::Vacant(_) => {},
3949
3948
}
3950
- self.chain_monitor.update_channel(funding_txo, &monitor_update)
3949
+ let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
3950
+
3951
+ handle_new_monitor_update!(
3952
+ self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
3953
+ logger, channel_id, POST_CHANNEL_CLOSE
3954
+ );
3951
3955
}
3952
3956
3953
3957
/// When a channel is removed, two things need to happen:
@@ -3976,7 +3980,7 @@ where
3976
3980
}
3977
3981
if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
3978
3982
debug_assert!(false, "This should have been handled in `locked_close_channel`");
3979
- let _ = self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
3983
+ self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
3980
3984
}
3981
3985
if self.background_events_processed_since_startup.load(Ordering::Acquire) {
3982
3986
// If a `ChannelMonitorUpdate` was applied (i.e. any time we have a funding txo and are
@@ -6293,9 +6297,7 @@ where
6293
6297
let _ = self.chain_monitor.update_channel(funding_txo, &update);
6294
6298
},
6295
6299
BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
6296
- // The monitor update will be replayed on startup if it doesnt complete, so no
6297
- // use bothering to care about the monitor update completing.
6298
- let _ = self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
6300
+ self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
6299
6301
},
6300
6302
BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
6301
6303
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -7226,32 +7228,31 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
7226
7228
let payment_hash = payment_preimage.into();
7227
7229
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));
7228
7230
7229
- if !during_init {
7230
- if let Some(action) = action_opt {
7231
- log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7232
- chan_id, action);
7233
- peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7234
- }
7231
+ if let Some(action) = action_opt {
7232
+ log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7233
+ chan_id, action);
7234
+ peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7235
+ }
7235
7236
7237
+ if !during_init {
7236
7238
handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
7237
7239
} else {
7238
7240
// If we're running during init we cannot update a monitor directly - they probably
7239
7241
// haven't actually been loaded yet. Instead, push the monitor update as a background
7240
7242
// event.
7241
- // TODO: Track this update as pending and only complete the completion action when it
7242
- // finishes.
7243
+
7244
+ let in_flight_updates = peer_state.in_flight_monitor_updates
7245
+ .entry(prev_hop.funding_txo)
7246
+ .or_insert_with(Vec::new);
7247
+ in_flight_updates.push(preimage_update.clone());
7248
+
7243
7249
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
7244
7250
counterparty_node_id,
7245
7251
funding_txo: prev_hop.funding_txo,
7246
7252
channel_id: prev_hop.channel_id,
7247
7253
update: preimage_update,
7248
7254
};
7249
7255
self.pending_background_events.lock().unwrap().push(event);
7250
-
7251
- mem::drop(peer_state);
7252
- mem::drop(per_peer_state);
7253
-
7254
- self.handle_monitor_update_completion_actions(action_opt);
7255
7256
}
7256
7257
}
7257
7258
0 commit comments