Skip to content

Commit 385799f

Browse files
committed
Handle events immediately if we are running during block connection
During block connection, we cannot apply `ChannelMonitorUpdate`s if we're running during the startup sequence (i.e. before the user has called any methods outside of block connection). We previously handled this by simply always pushing any `ChannelMonitorUpdate`s generated during block connection into the `pending_background_events` queue. However, this results in `ChannelMonitorUpdate`s going through the queue when we could just push them immediately. Here we explicitly check `background_events_processed_since_startup` and use that to decide whether to push updates through the background queue instead.
1 parent d33687d commit 385799f

File tree

2 files changed

+35
-14
lines changed

2 files changed

+35
-14
lines changed

lightning/src/ln/channelmanager.rs

+12-6
Original file line numberDiff line numberDiff line change
@@ -9624,8 +9624,8 @@ where
96249624
}
96259625

96269626
/// Handle a list of channel failures during a block_connected or block_disconnected call,
9627-
/// pushing the channel monitor update (if any) to the background events queue and removing the
9628-
/// Channel object.
9627+
/// pushing the channel monitor update (if any) to the background events queue (if we're
9628+
/// currently in the startup phase) and calling [`Self::finish_close_channel`].
96299629
fn handle_init_event_channel_failures(&self, mut failed_channels: Vec<ShutdownResult>) {
96309630
for mut failure in failed_channels.drain(..) {
96319631
// Either a commitment transactions has been confirmed on-chain or
@@ -9640,10 +9640,16 @@ where
96409640
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
96419641
assert!(should_broadcast);
96429642
} else { unreachable!(); }
9643-
self.pending_background_events.lock().unwrap().push(
9644-
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
9645-
counterparty_node_id, funding_txo, update, channel_id,
9646-
});
9643+
if self.background_events_processed_since_startup.load(Ordering::Acquire) {
9644+
let res = self.chain_monitor.update_channel(funding_txo, &update);
9645+
debug_assert_eq!(res, ChannelMonitorUpdateStatus::Completed,
9646+
"TODO: We don't currently handle failures here, this logic is removed in the next commit");
9647+
} else {
9648+
self.pending_background_events.lock().unwrap().push(
9649+
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
9650+
counterparty_node_id, funding_txo, update, channel_id,
9651+
});
9652+
}
96479653
}
96489654
self.finish_close_channel(failure);
96499655
}

lightning/src/ln/reorg_tests.rs

+23-8
Original file line numberDiff line numberDiff line change
@@ -268,6 +268,9 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
268268
assert_eq!(nodes[1].node.list_channels()[0].confirmations, Some(10));
269269

270270
if !reorg_after_reload {
271+
// With expect_channel_force_closed set the TestChainMonitor will enforce that the next update
272+
// is a ChannelForceClosed on the right channel with should_broadcast set.
273+
*nodes[0].chain_monitor.expect_channel_force_closed.lock().unwrap() = Some((chan.2, true));
271274
if use_funding_unconfirmed {
272275
let relevant_txids = nodes[0].node.get_relevant_txids();
273276
assert_eq!(relevant_txids.len(), 1);
@@ -293,12 +296,17 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
293296
let relevant_txids = nodes[0].node.get_relevant_txids();
294297
assert_eq!(relevant_txids.len(), 0);
295298

299+
let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
300+
assert_eq!(txn.len(), 1);
301+
296302
{
297303
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
298304
let peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
299305
assert_eq!(peer_state.channel_by_id.len(), 0);
300306
assert_eq!(nodes[0].node.short_to_chan_info.read().unwrap().len(), 0);
301307
}
308+
309+
check_added_monitors!(nodes[0], 1);
302310
}
303311

304312
if reload_node {
@@ -310,10 +318,13 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
310318
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan.2).encode();
311319

312320
reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
313-
assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
314321
}
315322

316323
if reorg_after_reload {
324+
// With expect_channel_force_closed set the TestChainMonitor will enforce that the next update
325+
// is a ChannelForceClosed on the right channel with should_broadcast set.
326+
*nodes[0].chain_monitor.expect_channel_force_closed.lock().unwrap() = Some((chan.2, true));
327+
317328
if use_funding_unconfirmed {
318329
let relevant_txids = nodes[0].node.get_relevant_txids();
319330
assert_eq!(relevant_txids.len(), 1);
@@ -345,12 +356,18 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
345356
assert_eq!(peer_state.channel_by_id.len(), 0);
346357
assert_eq!(nodes[0].node.short_to_chan_info.read().unwrap().len(), 0);
347358
}
359+
360+
if reload_node {
361+
// The update may come when we free background events if we just restarted, or in-line if
362+
// we were already running.
363+
nodes[0].node.test_process_background_events();
364+
}
365+
check_added_monitors!(nodes[0], 1);
366+
367+
let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
368+
assert_eq!(txn.len(), 1);
348369
}
349-
// With expect_channel_force_closed set the TestChainMonitor will enforce that the next update
350-
// is a ChannelForcClosed on the right channel with should_broadcast set.
351-
*nodes[0].chain_monitor.expect_channel_force_closed.lock().unwrap() = Some((chan.2, true));
352-
nodes[0].node.test_process_background_events(); // Required to free the pending background monitor update
353-
check_added_monitors!(nodes[0], 1);
370+
354371
let expected_err = "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.";
355372
if reorg_after_reload || !reload_node {
356373
handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
@@ -361,8 +378,6 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
361378

362379
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: expected_err.to_owned() },
363380
[nodes[1].node.get_our_node_id()], 100000);
364-
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
365-
nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
366381

367382
// Now check that we can create a new channel
368383
if reload_node && nodes[0].node.per_peer_state.read().unwrap().len() == 0 {

0 commit comments

Comments
 (0)