@@ -3922,39 +3922,6 @@ where
3922
3922
self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
3923
3923
}
3924
3924
3925
- fn set_closed_chan_next_monitor_update_id(
3926
- peer_state: &mut PeerState<SP>, channel_id: ChannelId, monitor_update: &mut ChannelMonitorUpdate,
3927
- ) {
3928
- match peer_state.closed_channel_monitor_update_ids.entry(channel_id) {
3929
- btree_map::Entry::Vacant(entry) => {
3930
- let is_closing_unupdated_monitor = monitor_update.update_id == 1
3931
- && monitor_update.updates.len() == 1
3932
- && matches!(&monitor_update.updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. });
3933
- // If the ChannelMonitorUpdate is closing a channel that never got past initial
3934
- // funding (to have any commitment updates), we'll skip inserting in
3935
- // `locked_close_channel`, allowing us to avoid keeping around the PeerState for
3936
- // that peer. In that specific case we expect no entry in the map here. In any
3937
- // other cases, this is a bug, but in production we go ahead and recover by
3938
- // inserting the update_id and hoping its right.
3939
- debug_assert!(is_closing_unupdated_monitor, "Expected closing monitor against an unused channel, got {:?}", monitor_update);
3940
- if !is_closing_unupdated_monitor {
3941
- entry.insert(monitor_update.update_id);
3942
- }
3943
- },
3944
- btree_map::Entry::Occupied(entry) => {
3945
- // If we're running in a threaded environment its possible we generate updates for
3946
- // a channel that is closing, then apply some preimage update, then go back and
3947
- // apply the close monitor update here. In order to ensure the updates are still
3948
- // well-ordered, we have to use the `closed_channel_monitor_update_ids` map to
3949
- // override the `update_id`, taking care to handle old monitors where the
3950
- // `latest_update_id` is already `u64::MAX`.
3951
- let latest_update_id = entry.into_mut();
3952
- *latest_update_id = latest_update_id.saturating_add(1);
3953
- monitor_update.update_id = *latest_update_id;
3954
- }
3955
- }
3956
- }
3957
-
3958
3925
/// Applies a [`ChannelMonitorUpdate`] which may or may not be for a channel which is closed.
3959
3926
#[must_use]
3960
3927
fn apply_post_close_monitor_update(
@@ -7220,15 +7187,25 @@ where
7220
7187
let mut peer_state = peer_state_opt.expect("peer_state_opt is always Some when the counterparty_node_id is Some");
7221
7188
7222
7189
let mut preimage_update = ChannelMonitorUpdate {
7223
- update_id: 0, // set in set_closed_chan_next_monitor_update_id
7190
+ update_id: 0, // set below
7224
7191
counterparty_node_id: None,
7225
7192
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
7226
7193
payment_preimage,
7227
7194
payment_info,
7228
7195
}],
7229
7196
channel_id: Some(prev_hop.channel_id),
7230
7197
};
7231
- Self::set_closed_chan_next_monitor_update_id(&mut *peer_state, prev_hop.channel_id, &mut preimage_update);
7198
+
7199
+ if let Some(latest_update_id) = peer_state.closed_channel_monitor_update_ids.get_mut(&chan_id) {
7200
+ *latest_update_id = latest_update_id.saturating_add(1);
7201
+ preimage_update.update_id = *latest_update_id;
7202
+ } else {
7203
+ let err = "We need the latest ChannelMonitorUpdate ID to build a new update.
7204
+ This should have been checked for availability on startup but somehow it is no longer available.
7205
+ This indicates a bug inside LDK. Please report this error at https://github.com/lightningdevkit/rust-lightning/issues/new";
7206
+ log_error!(self.logger, "{}", err);
7207
+ panic!("{}", err);
7208
+ }
7232
7209
7233
7210
// Note that we do process the completion action here. This totally could be a
7234
7211
// duplicate claim, but we have no way of knowing without interrogating the
0 commit comments