@@ -3938,39 +3938,6 @@ where
3938
3938
self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
3939
3939
}
3940
3940
3941
- fn set_closed_chan_next_monitor_update_id(
3942
- peer_state: &mut PeerState<SP>, channel_id: ChannelId, monitor_update: &mut ChannelMonitorUpdate,
3943
- ) {
3944
- match peer_state.closed_channel_monitor_update_ids.entry(channel_id) {
3945
- btree_map::Entry::Vacant(entry) => {
3946
- let is_closing_unupdated_monitor = monitor_update.update_id == 1
3947
- && monitor_update.updates.len() == 1
3948
- && matches!(&monitor_update.updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. });
3949
- // If the ChannelMonitorUpdate is closing a channel that never got past initial
3950
- // funding (to have any commitment updates), we'll skip inserting in
3951
- // `locked_close_channel`, allowing us to avoid keeping around the PeerState for
3952
- // that peer. In that specific case we expect no entry in the map here. In any
3953
- // other cases, this is a bug, but in production we go ahead and recover by
3954
- // inserting the update_id and hoping its right.
3955
- debug_assert!(is_closing_unupdated_monitor, "Expected closing monitor against an unused channel, got {:?}", monitor_update);
3956
- if !is_closing_unupdated_monitor {
3957
- entry.insert(monitor_update.update_id);
3958
- }
3959
- },
3960
- btree_map::Entry::Occupied(entry) => {
3961
- // If we're running in a threaded environment its possible we generate updates for
3962
- // a channel that is closing, then apply some preimage update, then go back and
3963
- // apply the close monitor update here. In order to ensure the updates are still
3964
- // well-ordered, we have to use the `closed_channel_monitor_update_ids` map to
3965
- // override the `update_id`, taking care to handle old monitors where the
3966
- // `latest_update_id` is already `u64::MAX`.
3967
- let latest_update_id = entry.into_mut();
3968
- *latest_update_id = latest_update_id.saturating_add(1);
3969
- monitor_update.update_id = *latest_update_id;
3970
- }
3971
- }
3972
- }
3973
-
3974
3941
/// Applies a [`ChannelMonitorUpdate`] which may or may not be for a channel which is closed.
3975
3942
#[must_use]
3976
3943
fn apply_post_close_monitor_update(
@@ -7236,15 +7203,25 @@ where
7236
7203
let mut peer_state = peer_state_opt.expect("peer_state_opt is always Some when the counterparty_node_id is Some");
7237
7204
7238
7205
let mut preimage_update = ChannelMonitorUpdate {
7239
- update_id: 0, // set in set_closed_chan_next_monitor_update_id
7206
+ update_id: 0, // set below
7240
7207
counterparty_node_id: None,
7241
7208
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
7242
7209
payment_preimage,
7243
7210
payment_info,
7244
7211
}],
7245
7212
channel_id: Some(prev_hop.channel_id),
7246
7213
};
7247
- Self::set_closed_chan_next_monitor_update_id(&mut *peer_state, prev_hop.channel_id, &mut preimage_update);
7214
+
7215
+ if let Some(latest_update_id) = peer_state.closed_channel_monitor_update_ids.get_mut(&chan_id) {
7216
+ *latest_update_id = latest_update_id.saturating_add(1);
7217
+ preimage_update.update_id = *latest_update_id;
7218
+ } else {
7219
+ let err = "We need the latest ChannelMonitorUpdate ID to build a new update.
7220
+ This should have been checked for availability on startup but somehow it is no longer available.
7221
+ This indicates a bug inside LDK. Please report this error at https://github.com/lightningdevkit/rust-lightning/issues/new";
7222
+ log_error!(self.logger, "{}", err);
7223
+ panic!("{}", err);
7224
+ }
7248
7225
7249
7226
// Note that we do process the completion action here. This totally could be a
7250
7227
// duplicate claim, but we have no way of knowing without interrogating the
0 commit comments