@@ -3939,11 +3939,10 @@ where
3939
3939
}
3940
3940
3941
3941
/// Applies a [`ChannelMonitorUpdate`] which may or may not be for a channel which is closed.
3942
- #[must_use]
3943
3942
fn apply_post_close_monitor_update(
3944
3943
&self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
3945
3944
monitor_update: ChannelMonitorUpdate,
3946
- ) -> ChannelMonitorUpdateStatus {
3945
+ ) {
3947
3946
// Note that there may be some post-close updates which need to be well-ordered with
3948
3947
// respect to the `update_id`, so we hold the `peer_state` lock here.
3949
3948
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -3954,16 +3953,21 @@ where
3954
3953
match peer_state.channel_by_id.entry(channel_id) {
3955
3954
hash_map::Entry::Occupied(mut chan_phase) => {
3956
3955
if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
3957
- let completed = handle_new_monitor_update!(self, funding_txo,
3956
+ handle_new_monitor_update!(self, funding_txo,
3958
3957
monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
3959
- return if completed { ChannelMonitorUpdateStatus::Completed } else { ChannelMonitorUpdateStatus::InProgress } ;
3958
+ return;
3960
3959
} else {
3961
3960
debug_assert!(false, "We shouldn't have an update for a non-funded channel");
3962
3961
}
3963
3962
},
3964
3963
hash_map::Entry::Vacant(_) => {},
3965
3964
}
3966
- self.chain_monitor.update_channel(funding_txo, &monitor_update)
3965
+ let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
3966
+
3967
+ handle_new_monitor_update!(
3968
+ self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
3969
+ logger, channel_id, POST_CHANNEL_CLOSE
3970
+ );
3967
3971
}
3968
3972
3969
3973
/// When a channel is removed, two things need to happen:
@@ -3992,7 +3996,7 @@ where
3992
3996
}
3993
3997
if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
3994
3998
debug_assert!(false, "This should have been handled in `locked_close_channel`");
3995
- let _ = self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
3999
+ self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
3996
4000
}
3997
4001
if self.background_events_processed_since_startup.load(Ordering::Acquire) {
3998
4002
// If a `ChannelMonitorUpdate` was applied (i.e. any time we have a funding txo and are
@@ -6309,9 +6313,7 @@ where
6309
6313
let _ = self.chain_monitor.update_channel(funding_txo, &update);
6310
6314
},
6311
6315
BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
6312
- // The monitor update will be replayed on startup if it doesnt complete, so no
6313
- // use bothering to care about the monitor update completing.
6314
- let _ = self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
6316
+ self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
6315
6317
},
6316
6318
BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
6317
6319
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -7242,32 +7244,31 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
7242
7244
let payment_hash = payment_preimage.into();
7243
7245
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));
7244
7246
7245
- if !during_init {
7246
- if let Some(action) = action_opt {
7247
- log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7248
- chan_id, action);
7249
- peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7250
- }
7247
+ if let Some(action) = action_opt {
7248
+ log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7249
+ chan_id, action);
7250
+ peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7251
+ }
7251
7252
7253
+ if !during_init {
7252
7254
handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
7253
7255
} else {
7254
7256
// If we're running during init we cannot update a monitor directly - they probably
7255
7257
// haven't actually been loaded yet. Instead, push the monitor update as a background
7256
7258
// event.
7257
- // TODO: Track this update as pending and only complete the completion action when it
7258
- // finishes.
7259
+
7260
+ let in_flight_updates = peer_state.in_flight_monitor_updates
7261
+ .entry(prev_hop.funding_txo)
7262
+ .or_insert_with(Vec::new);
7263
+ in_flight_updates.push(preimage_update.clone());
7264
+
7259
7265
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
7260
7266
counterparty_node_id,
7261
7267
funding_txo: prev_hop.funding_txo,
7262
7268
channel_id: prev_hop.channel_id,
7263
7269
update: preimage_update,
7264
7270
};
7265
7271
self.pending_background_events.lock().unwrap().push(event);
7266
-
7267
- mem::drop(peer_state);
7268
- mem::drop(per_peer_state);
7269
-
7270
- self.handle_monitor_update_completion_actions(action_opt);
7271
7272
}
7272
7273
}
7273
7274
0 commit comments