Skip to content

Commit 1dc0663

Browse files
committed
Support async ChannelMonitorUpdates to closed chans at startup
One of the largest gaps in our async persistence functionality has been preimage (claim) updates to closed channels. Here we finally implement support for this (for updates which are generated during startup). Thanks to all the work we've built up over the past many commits, this is a fairly straightforward patch, removing the immediate-completion logic from `claim_mpp_part` and adding the required in-flight tracking logic to `apply_post_close_monitor_update`. Like in the during-runtime case in the previous commit, we sadly can't use the `handle_new_monitor_update` macro wholesale as it handles the `Channel` resumption as well which we don't do here.
1 parent 059e32a commit 1dc0663

File tree

2 files changed

+44
-38
lines changed

2 files changed

+44
-38
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

+21-16
Original file line numberDiff line numberDiff line change
@@ -3294,10 +3294,6 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool,
32943294
if !close_chans_before_reload {
32953295
check_closed_broadcast(&nodes[1], 1, true);
32963296
check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
3297-
} else {
3298-
// While we forwarded the payment a while ago, we don't want to process events too early or
3299-
// we'll run background tasks we wanted to test individually.
3300-
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, !close_only_a);
33013297
}
33023298

33033299
mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]);
@@ -3308,24 +3304,33 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool,
33083304
// Make sure the B<->C channel is still alive and well by sending a payment over it.
33093305
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
33103306
reconnect_args.pending_responding_commitment_signed.1 = true;
3311-
if !close_chans_before_reload {
3312-
// TODO: If the A<->B channel was closed before we reloaded, the `ChannelManager`
3313-
// will consider the forwarded payment complete and allow the B<->C
3314-
// `ChannelMonitorUpdate` to complete, wiping the payment preimage. This should not
3315-
// be allowed, and needs fixing.
3316-
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
3317-
}
3307+
// The B<->C `ChannelMonitorUpdate` shouldn't be allowed to complete, which is the
3308+
// equivalent to the responding `commitment_signed` being a duplicate for node B, thus we
3309+
// need to set the `pending_responding_commitment_signed_dup` flag.
3310+
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
33183311
reconnect_args.pending_raa.1 = true;
33193312

33203313
reconnect_nodes(reconnect_args);
3314+
3315+
// Once the blocked `ChannelMonitorUpdate` *finally* completes, the pending
3316+
// `PaymentForwarded` event will finally be released.
33213317
let (outpoint, ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
33223318
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, ab_update_id);
3323-
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), true, false);
3324-
if !close_chans_before_reload {
3325-
// Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C
3326-
// channel will fly, removing the payment preimage from it.
3327-
check_added_monitors(&nodes[1], 1);
3319+
3320+
// If the A<->B channel was closed before we reload, we'll replay the claim against it on
3321+
// reload, causing the `PaymentForwarded` event to get replayed.
3322+
let evs = nodes[1].node.get_and_clear_pending_events();
3323+
assert_eq!(evs.len(), if close_chans_before_reload { 2 } else { 1 });
3324+
for ev in evs {
3325+
if let Event::PaymentForwarded { .. } = ev { }
3326+
else {
3327+
panic!();
3328+
}
33283329
}
3330+
3331+
// Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C channel
3332+
// will fly, removing the payment preimage from it.
3333+
check_added_monitors(&nodes[1], 1);
33293334
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
33303335
send_payment(&nodes[1], &[&nodes[2]], 100_000);
33313336
}

lightning/src/ln/channelmanager.rs

+23-22
Original file line numberDiff line numberDiff line change
@@ -3939,11 +3939,10 @@ where
39393939
}
39403940

39413941
/// Applies a [`ChannelMonitorUpdate`] which may or may not be for a channel which is closed.
3942-
#[must_use]
39433942
fn apply_post_close_monitor_update(
39443943
&self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
39453944
monitor_update: ChannelMonitorUpdate,
3946-
) -> ChannelMonitorUpdateStatus {
3945+
) {
39473946
// Note that there may be some post-close updates which need to be well-ordered with
39483947
// respect to the `update_id`, so we hold the `peer_state` lock here.
39493948
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -3954,16 +3953,21 @@ where
39543953
match peer_state.channel_by_id.entry(channel_id) {
39553954
hash_map::Entry::Occupied(mut chan_phase) => {
39563955
if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
3957-
let completed = handle_new_monitor_update!(self, funding_txo,
3956+
handle_new_monitor_update!(self, funding_txo,
39583957
monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
3959-
return if completed { ChannelMonitorUpdateStatus::Completed } else { ChannelMonitorUpdateStatus::InProgress };
3958+
return;
39603959
} else {
39613960
debug_assert!(false, "We shouldn't have an update for a non-funded channel");
39623961
}
39633962
},
39643963
hash_map::Entry::Vacant(_) => {},
39653964
}
3966-
self.chain_monitor.update_channel(funding_txo, &monitor_update)
3965+
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
3966+
3967+
handle_new_monitor_update!(
3968+
self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
3969+
logger, channel_id, POST_CHANNEL_CLOSE
3970+
);
39673971
}
39683972

39693973
/// When a channel is removed, two things need to happen:
@@ -3992,7 +3996,7 @@ where
39923996
}
39933997
if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
39943998
debug_assert!(false, "This should have been handled in `locked_close_channel`");
3995-
let _ = self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
3999+
self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
39964000
}
39974001
if self.background_events_processed_since_startup.load(Ordering::Acquire) {
39984002
// If a `ChannelMonitorUpdate` was applied (i.e. any time we have a funding txo and are
@@ -6309,9 +6313,7 @@ where
63096313
let _ = self.chain_monitor.update_channel(funding_txo, &update);
63106314
},
63116315
BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
6312-
// The monitor update will be replayed on startup if it doesnt complete, so no
6313-
// use bothering to care about the monitor update completing.
6314-
let _ = self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
6316+
self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
63156317
},
63166318
BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
63176319
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -7242,32 +7244,31 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
72427244
let payment_hash = payment_preimage.into();
72437245
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));
72447246

7245-
if !during_init {
7246-
if let Some(action) = action_opt {
7247-
log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7248-
chan_id, action);
7249-
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7250-
}
7247+
if let Some(action) = action_opt {
7248+
log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7249+
chan_id, action);
7250+
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7251+
}
72517252

7253+
if !during_init {
72527254
handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
72537255
} else {
72547256
// If we're running during init we cannot update a monitor directly - they probably
72557257
// haven't actually been loaded yet. Instead, push the monitor update as a background
72567258
// event.
7257-
// TODO: Track this update as pending and only complete the completion action when it
7258-
// finishes.
7259+
7260+
let in_flight_updates = peer_state.in_flight_monitor_updates
7261+
.entry(prev_hop.funding_txo)
7262+
.or_insert_with(Vec::new);
7263+
in_flight_updates.push(preimage_update.clone());
7264+
72597265
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
72607266
counterparty_node_id,
72617267
funding_txo: prev_hop.funding_txo,
72627268
channel_id: prev_hop.channel_id,
72637269
update: preimage_update,
72647270
};
72657271
self.pending_background_events.lock().unwrap().push(event);
7266-
7267-
mem::drop(peer_state);
7268-
mem::drop(per_peer_state);
7269-
7270-
self.handle_monitor_update_completion_actions(action_opt);
72717272
}
72727273
}
72737274

0 commit comments

Comments
 (0)