Skip to content

Commit ce9e5a7

Browse files
committed
Support async ChannelMonitorUpdates to closed chans at startup
One of the largest gaps in our async persistence functionality has been preimage (claim) updates to closed channels. Here we finally implement support for this (for updates which are generated during startup). Thanks to all the work we've built up over the past many commits, this is a fairly straightforward patch, removing the immediate-completion logic from `claim_mpp_part` and adding the required in-flight tracking logic to `apply_post_close_monitor_update`. Like in the during-runtime case in the previous commit, we sadly can't use the `handle_new_monitor_update` macro wholesale as it handles the `Channel` resumption as well which we don't do here.
1 parent 9e590cf commit ce9e5a7

File tree

2 files changed

+44
-38
lines changed

2 files changed

+44
-38
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 21 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -3294,10 +3294,6 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool,
32943294
if !close_chans_before_reload {
32953295
check_closed_broadcast(&nodes[1], 1, true);
32963296
check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
3297-
} else {
3298-
// While we forwarded the payment a while ago, we don't want to process events too early or
3299-
// we'll run background tasks we wanted to test individually.
3300-
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, !close_only_a);
33013297
}
33023298

33033299
mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]);
@@ -3308,24 +3304,33 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool,
33083304
// Make sure the B<->C channel is still alive and well by sending a payment over it.
33093305
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
33103306
reconnect_args.pending_responding_commitment_signed.1 = true;
3311-
if !close_chans_before_reload {
3312-
// TODO: If the A<->B channel was closed before we reloaded, the `ChannelManager`
3313-
// will consider the forwarded payment complete and allow the B<->C
3314-
// `ChannelMonitorUpdate` to complete, wiping the payment preimage. This should not
3315-
// be allowed, and needs fixing.
3316-
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
3317-
}
3307+
// The B<->C `ChannelMonitorUpdate` shouldn't be allowed to complete, which is the
3308+
// equivalent to the responding `commitment_signed` being a duplicate for node B, thus we
3309+
// need to set the `pending_responding_commitment_signed_dup` flag.
3310+
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
33183311
reconnect_args.pending_raa.1 = true;
33193312

33203313
reconnect_nodes(reconnect_args);
3314+
3315+
// Once the blocked `ChannelMonitorUpdate` *finally* completes, the pending
3316+
// `PaymentForwarded` event will finally be released.
33213317
let (outpoint, ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
33223318
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, ab_update_id);
3323-
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), true, false);
3324-
if !close_chans_before_reload {
3325-
// Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C
3326-
// channel will fly, removing the payment preimage from it.
3327-
check_added_monitors(&nodes[1], 1);
3319+
3320+
// If the A<->B channel was closed before we reload, we'll replay the claim against it on
3321+
// reload, causing the `PaymentForwarded` event to get replayed.
3322+
let evs = nodes[1].node.get_and_clear_pending_events();
3323+
assert_eq!(evs.len(), if close_chans_before_reload { 2 } else { 1 });
3324+
for ev in evs {
3325+
if let Event::PaymentForwarded { .. } = ev { }
3326+
else {
3327+
panic!();
3328+
}
33283329
}
3330+
3331+
// Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C channel
3332+
// will fly, removing the payment preimage from it.
3333+
check_added_monitors(&nodes[1], 1);
33293334
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
33303335
send_payment(&nodes[1], &[&nodes[2]], 100_000);
33313336
}

lightning/src/ln/channelmanager.rs

Lines changed: 23 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -3941,11 +3941,10 @@ where
39413941
}
39423942

39433943
/// Applies a [`ChannelMonitorUpdate`] which may or may not be for a channel which is closed.
3944-
#[must_use]
39453944
fn apply_post_close_monitor_update(
39463945
&self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
39473946
monitor_update: ChannelMonitorUpdate,
3948-
) -> ChannelMonitorUpdateStatus {
3947+
) {
39493948
// Note that there may be some post-close updates which need to be well-ordered with
39503949
// respect to the `update_id`, so we hold the `peer_state` lock here.
39513950
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -3956,16 +3955,21 @@ where
39563955
match peer_state.channel_by_id.entry(channel_id) {
39573956
hash_map::Entry::Occupied(mut chan_phase) => {
39583957
if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
3959-
let in_flight = handle_new_monitor_update!(self, funding_txo,
3958+
handle_new_monitor_update!(self, funding_txo,
39603959
monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
3961-
return if in_flight { ChannelMonitorUpdateStatus::InProgress } else { ChannelMonitorUpdateStatus::Completed };
3960+
return;
39623961
} else {
39633962
debug_assert!(false, "We shouldn't have an update for a non-funded channel");
39643963
}
39653964
},
39663965
hash_map::Entry::Vacant(_) => {},
39673966
}
3968-
self.chain_monitor.update_channel(funding_txo, &monitor_update)
3967+
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
3968+
3969+
handle_new_monitor_update!(
3970+
self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
3971+
logger, channel_id, POST_CHANNEL_CLOSE
3972+
);
39693973
}
39703974

39713975
/// When a channel is removed, two things need to happen:
@@ -3994,7 +3998,7 @@ where
39943998
}
39953999
if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
39964000
debug_assert!(false, "This should have been handled in `locked_close_channel`");
3997-
let _ = self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
4001+
self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
39984002
}
39994003
if self.background_events_processed_since_startup.load(Ordering::Acquire) {
40004004
// If a `ChannelMonitorUpdate` was applied (i.e. any time we have a funding txo and are
@@ -6348,9 +6352,7 @@ where
63486352
let _ = self.chain_monitor.update_channel(funding_txo, &update);
63496353
},
63506354
BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
6351-
// The monitor update will be replayed on startup if it doesnt complete, so no
6352-
// use bothering to care about the monitor update completing.
6353-
let _ = self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
6355+
self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
63546356
},
63556357
BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
63566358
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -7296,32 +7298,31 @@ where
72967298
let payment_hash = payment_preimage.into();
72977299
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));
72987300

7299-
if !during_init {
7300-
if let Some(action) = action_opt {
7301-
log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7302-
chan_id, action);
7303-
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7304-
}
7301+
if let Some(action) = action_opt {
7302+
log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7303+
chan_id, action);
7304+
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7305+
}
73057306

7307+
if !during_init {
73067308
handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
73077309
} else {
73087310
// If we're running during init we cannot update a monitor directly - they probably
73097311
// haven't actually been loaded yet. Instead, push the monitor update as a background
73107312
// event.
7311-
// TODO: Track this update as pending and only complete the completion action when it
7312-
// finishes.
7313+
7314+
let in_flight_updates = peer_state.in_flight_monitor_updates
7315+
.entry(prev_hop.funding_txo)
7316+
.or_insert_with(Vec::new);
7317+
in_flight_updates.push(preimage_update.clone());
7318+
73137319
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
73147320
counterparty_node_id,
73157321
funding_txo: prev_hop.funding_txo,
73167322
channel_id: prev_hop.channel_id,
73177323
update: preimage_update,
73187324
};
73197325
self.pending_background_events.lock().unwrap().push(event);
7320-
7321-
mem::drop(peer_state);
7322-
mem::drop(per_peer_state);
7323-
7324-
self.handle_monitor_update_completion_actions(action_opt);
73257326
}
73267327
}
73277328

0 commit comments

Comments
 (0)