Skip to content

Commit 59d72e3

Browse files
committed
Support async ChannelMonitorUpdates to closed chans at startup
One of the largest gaps in our async persistence functionality has been preimage (claim) updates to closed channels. Here we finally implement support for this (for updates which are generated during startup). Thanks to all the work we've built up over the past many commits, this is a fairly straightforward patch, removing the immediate-completion logic from `claim_mpp_part` and adding the required in-flight tracking logic to `apply_post_close_monitor_update`. Like in the during-runtime case in the previous commit, we sadly can't use the `handle_new_monitor_update` macro wholesale as it handles the `Channel` resumption as well which we don't do here.
1 parent c497740 commit 59d72e3

File tree

2 files changed

+44
-38
lines changed

2 files changed

+44
-38
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

+21-16
Original file line numberDiff line numberDiff line change
@@ -3294,10 +3294,6 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool,
32943294
if !close_chans_before_reload {
32953295
check_closed_broadcast(&nodes[1], 1, true);
32963296
check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
3297-
} else {
3298-
// While we forwarded the payment a while ago, we don't want to process events too early or
3299-
// we'll run background tasks we wanted to test individually.
3300-
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, !close_only_a);
33013297
}
33023298

33033299
mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]);
@@ -3308,24 +3304,33 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool,
33083304
// Make sure the B<->C channel is still alive and well by sending a payment over it.
33093305
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
33103306
reconnect_args.pending_responding_commitment_signed.1 = true;
3311-
if !close_chans_before_reload {
3312-
// TODO: If the A<->B channel was closed before we reloaded, the `ChannelManager`
3313-
// will consider the forwarded payment complete and allow the B<->C
3314-
// `ChannelMonitorUpdate` to complete, wiping the payment preimage. This should not
3315-
// be allowed, and needs fixing.
3316-
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
3317-
}
3307+
// The B<->C `ChannelMonitorUpdate` shouldn't be allowed to complete, which is the
3308+
// equivalent to the responding `commitment_signed` being a duplicate for node B, thus we
3309+
// need to set the `pending_responding_commitment_signed_dup` flag.
3310+
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
33183311
reconnect_args.pending_raa.1 = true;
33193312

33203313
reconnect_nodes(reconnect_args);
3314+
3315+
// Once the blocked `ChannelMonitorUpdate` *finally* completes, the pending
3316+
// `PaymentForwarded` event will finally be released.
33213317
let (outpoint, ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
33223318
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, ab_update_id);
3323-
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), true, false);
3324-
if !close_chans_before_reload {
3325-
// Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C
3326-
// channel will fly, removing the payment preimage from it.
3327-
check_added_monitors(&nodes[1], 1);
3319+
3320+
// If the A<->B channel was closed before we reload, we'll replay the claim against it on
3321+
// reload, causing the `PaymentForwarded` event to get replayed.
3322+
let evs = nodes[1].node.get_and_clear_pending_events();
3323+
assert_eq!(evs.len(), if close_chans_before_reload { 2 } else { 1 });
3324+
for ev in evs {
3325+
if let Event::PaymentForwarded { .. } = ev { }
3326+
else {
3327+
panic!();
3328+
}
33283329
}
3330+
3331+
// Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C channel
3332+
// will fly, removing the payment preimage from it.
3333+
check_added_monitors(&nodes[1], 1);
33293334
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
33303335
send_payment(&nodes[1], &[&nodes[2]], 100_000);
33313336
}

lightning/src/ln/channelmanager.rs

+23-22
Original file line numberDiff line numberDiff line change
@@ -3923,11 +3923,10 @@ where
39233923
}
39243924

39253925
/// Applies a [`ChannelMonitorUpdate`] which may or may not be for a channel which is closed.
3926-
#[must_use]
39273926
fn apply_post_close_monitor_update(
39283927
&self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
39293928
monitor_update: ChannelMonitorUpdate,
3930-
) -> ChannelMonitorUpdateStatus {
3929+
) {
39313930
// Note that there may be some post-close updates which need to be well-ordered with
39323931
// respect to the `update_id`, so we hold the `peer_state` lock here.
39333932
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -3938,16 +3937,21 @@ where
39383937
match peer_state.channel_by_id.entry(channel_id) {
39393938
hash_map::Entry::Occupied(mut chan_phase) => {
39403939
if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
3941-
let completed = handle_new_monitor_update!(self, funding_txo,
3940+
handle_new_monitor_update!(self, funding_txo,
39423941
monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
3943-
return if completed { ChannelMonitorUpdateStatus::Completed } else { ChannelMonitorUpdateStatus::InProgress };
3942+
return;
39443943
} else {
39453944
debug_assert!(false, "We shouldn't have an update for a non-funded channel");
39463945
}
39473946
},
39483947
hash_map::Entry::Vacant(_) => {},
39493948
}
3950-
self.chain_monitor.update_channel(funding_txo, &monitor_update)
3949+
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
3950+
3951+
handle_new_monitor_update!(
3952+
self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
3953+
logger, channel_id, POST_CHANNEL_CLOSE
3954+
);
39513955
}
39523956

39533957
/// When a channel is removed, two things need to happen:
@@ -3976,7 +3980,7 @@ where
39763980
}
39773981
if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
39783982
debug_assert!(false, "This should have been handled in `locked_close_channel`");
3979-
let _ = self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
3983+
self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
39803984
}
39813985
if self.background_events_processed_since_startup.load(Ordering::Acquire) {
39823986
// If a `ChannelMonitorUpdate` was applied (i.e. any time we have a funding txo and are
@@ -6293,9 +6297,7 @@ where
62936297
let _ = self.chain_monitor.update_channel(funding_txo, &update);
62946298
},
62956299
BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
6296-
// The monitor update will be replayed on startup if it doesnt complete, so no
6297-
// use bothering to care about the monitor update completing.
6298-
let _ = self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
6300+
self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
62996301
},
63006302
BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
63016303
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -7226,32 +7228,31 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
72267228
let payment_hash = payment_preimage.into();
72277229
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));
72287230

7229-
if !during_init {
7230-
if let Some(action) = action_opt {
7231-
log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7232-
chan_id, action);
7233-
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7234-
}
7231+
if let Some(action) = action_opt {
7232+
log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7233+
chan_id, action);
7234+
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7235+
}
72357236

7237+
if !during_init {
72367238
handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
72377239
} else {
72387240
// If we're running during init we cannot update a monitor directly - they probably
72397241
// haven't actually been loaded yet. Instead, push the monitor update as a background
72407242
// event.
7241-
// TODO: Track this update as pending and only complete the completion action when it
7242-
// finishes.
7243+
7244+
let in_flight_updates = peer_state.in_flight_monitor_updates
7245+
.entry(prev_hop.funding_txo)
7246+
.or_insert_with(Vec::new);
7247+
in_flight_updates.push(preimage_update.clone());
7248+
72437249
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
72447250
counterparty_node_id,
72457251
funding_txo: prev_hop.funding_txo,
72467252
channel_id: prev_hop.channel_id,
72477253
update: preimage_update,
72487254
};
72497255
self.pending_background_events.lock().unwrap().push(event);
7250-
7251-
mem::drop(peer_state);
7252-
mem::drop(per_peer_state);
7253-
7254-
self.handle_monitor_update_completion_actions(action_opt);
72557256
}
72567257
}
72577258

0 commit comments

Comments
 (0)