Skip to content

Commit bd4eb0d

Browse files
committed
Queue BackgroundEvent to force close channels upon ChannelManager::read
This results in a new, potentially redundant, `ChannelMonitorUpdate` that must be applied to `ChannelMonitor`s to broadcast the holder's latest commitment transaction. This is a behavior change for anchor channels since their commitments may require additional fees to be attached through a child anchor transaction. Recall that anchor transactions are only generated by the event consumer after processing a `BumpTransactionEvent::ChannelClose` event, which is yielded after applying a `ChannelMonitorUpdateStep::ChannelForceClosed` monitor update. Assuming the node operator is not watching the mempool to generate these anchor transactions without LDK, an anchor channel which we had to fail when deserializing our `ChannelManager` would have its commitment transaction broadcast by itself, potentially exposing the node operator to loss of funds if the commitment transaction's fee is not enough to be accepted into the network's mempools.
1 parent 5a90f01 commit bd4eb0d

File tree

6 files changed

+81
-59
lines changed

6 files changed

+81
-59
lines changed

lightning/src/chain/channelmonitor.rs

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -69,8 +69,7 @@ use crate::sync::{Mutex, LockTestExt};
6969
/// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction
7070
/// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment
7171
/// transaction), a single update may reach upwards of 1 MiB in serialized size.
72-
#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))]
73-
#[derive(Clone)]
72+
#[derive(Clone, PartialEq, Eq)]
7473
#[must_use]
7574
pub struct ChannelMonitorUpdate {
7675
pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
@@ -491,8 +490,7 @@ impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
491490

492491
);
493492

494-
#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))]
495-
#[derive(Clone)]
493+
#[derive(Clone, PartialEq, Eq)]
496494
pub(crate) enum ChannelMonitorUpdateStep {
497495
LatestHolderCommitmentTXInfo {
498496
commitment_tx: HolderCommitmentTransaction,
@@ -2268,10 +2266,14 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
22682266
{
22692267
log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} changes.",
22702268
log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len());
2271-
// ChannelMonitor updates may be applied after force close if we receive a
2272-
// preimage for a broadcasted commitment transaction HTLC output that we'd
2273-
// like to claim on-chain. If this is the case, we no longer have guaranteed
2274-
// access to the monitor's update ID, so we use a sentinel value instead.
2269+
// ChannelMonitor updates may be applied after force close if we receive a preimage for a
2270+
// broadcasted commitment transaction HTLC output that we'd like to claim on-chain. If this
2271+
// is the case, we no longer have guaranteed access to the monitor's update ID, so we use a
2272+
// sentinel value instead.
2273+
//
2274+
// The `ChannelManager` may also queue redundant `ChannelForceClosed` updates if it still
2275+
// thinks the channel needs to have its commitment transaction broadcast, so we'll allow
2276+
// them as well.
22752277
if updates.update_id == CLOSED_CHANNEL_UPDATE_ID {
22762278
assert_eq!(updates.updates.len(), 1);
22772279
match updates.updates[0] {

lightning/src/ln/channelmanager.rs

Lines changed: 21 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -7310,6 +7310,7 @@ where
73107310
let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
73117311
let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
73127312
let mut channel_closures = Vec::new();
7313+
let mut pending_background_events = Vec::new();
73137314
for _ in 0..channel_count {
73147315
let mut channel: Channel<<SP::Target as SignerProvider>::Signer> = Channel::read(reader, (
73157316
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
@@ -7339,9 +7340,11 @@ where
73397340
log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
73407341
log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
73417342
log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
7342-
let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
7343+
let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
7344+
if let Some(monitor_update) = monitor_update {
7345+
pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate(monitor_update));
7346+
}
73437347
failed_htlcs.append(&mut new_failed_htlcs);
7344-
monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
73457348
channel_closures.push(events::Event::ChannelClosed {
73467349
channel_id: channel.channel_id(),
73477350
user_channel_id: channel.get_user_id(),
@@ -7406,10 +7409,13 @@ where
74067409
}
74077410
}
74087411

7409-
for (funding_txo, monitor) in args.channel_monitors.iter_mut() {
7412+
for (funding_txo, _) in args.channel_monitors.iter() {
74107413
if !funding_txo_set.contains(funding_txo) {
7411-
log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id()));
7412-
monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
7414+
let monitor_update = ChannelMonitorUpdate {
7415+
update_id: CLOSED_CHANNEL_UPDATE_ID,
7416+
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
7417+
};
7418+
pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((*funding_txo, monitor_update)));
74137419
}
74147420
}
74157421

@@ -7462,10 +7468,17 @@ where
74627468
}
74637469

74647470
let background_event_count: u64 = Readable::read(reader)?;
7465-
let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
74667471
for _ in 0..background_event_count {
74677472
match <u8 as Readable>::read(reader)? {
7468-
0 => pending_background_events_read.push(BackgroundEvent::ClosingMonitorUpdate((Readable::read(reader)?, Readable::read(reader)?))),
7473+
0 => {
7474+
let (funding_txo, monitor_update): (OutPoint, ChannelMonitorUpdate) = (Readable::read(reader)?, Readable::read(reader)?);
7475+
if pending_background_events.iter().find(|e| {
7476+
let BackgroundEvent::ClosingMonitorUpdate((pending_funding_txo, pending_monitor_update)) = e;
7477+
*pending_funding_txo == funding_txo && *pending_monitor_update == monitor_update
7478+
}).is_none() {
7479+
pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)));
7480+
}
7481+
}
74697482
_ => return Err(DecodeError::InvalidValue),
74707483
}
74717484
}
@@ -7840,7 +7853,7 @@ where
78407853
per_peer_state: FairRwLock::new(per_peer_state),
78417854

78427855
pending_events: Mutex::new(pending_events_read),
7843-
pending_background_events: Mutex::new(pending_background_events_read),
7856+
pending_background_events: Mutex::new(pending_background_events),
78447857
total_consistency_lock: RwLock::new(()),
78457858
persistence_notifier: Notifier::new(),
78467859

lightning/src/ln/monitor_tests.rs

Lines changed: 26 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1859,15 +1859,18 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
18591859
let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000);
18601860
let chan_b = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000);
18611861

1862+
// Serialize Bob with the initial state of both channels, which we'll use later.
1863+
let bob_serialized = nodes[1].node.encode();
1864+
18621865
// Route two payments for each channel from Alice to Bob to lock in the HTLCs.
18631866
let payment_a = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
18641867
let payment_b = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
18651868
let payment_c = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
18661869
let payment_d = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
18671870

1868-
// Serialize Bob with the HTLCs locked in. We'll restart Bob later on with the state at this
1869-
// point such that he broadcasts a revoked commitment transaction.
1870-
let bob_serialized = nodes[1].node.encode();
1871+
// Serialize Bob's monitors with the HTLCs locked in. We'll restart Bob later on with the state
1872+
// at this point such that he broadcasts a revoked commitment transaction with the HTLCs
1873+
// present.
18711874
let bob_serialized_monitor_a = get_monitor!(nodes[1], chan_a.2).encode();
18721875
let bob_serialized_monitor_b = get_monitor!(nodes[1], chan_b.2).encode();
18731876

@@ -1897,30 +1900,26 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
18971900
}
18981901
}
18991902

1900-
// Bob force closes by broadcasting his revoked state for each channel.
1901-
nodes[1].node.force_close_broadcasting_latest_txn(&chan_a.2, &nodes[0].node.get_our_node_id()).unwrap();
1902-
check_added_monitors(&nodes[1], 1);
1903-
check_closed_broadcast(&nodes[1], 1, true);
1904-
check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed);
1905-
let revoked_commitment_a = {
1906-
let mut txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
1907-
assert_eq!(txn.len(), 1);
1908-
let revoked_commitment = txn.pop().unwrap();
1909-
assert_eq!(revoked_commitment.output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
1910-
check_spends!(revoked_commitment, chan_a.3);
1911-
revoked_commitment
1912-
};
1913-
nodes[1].node.force_close_broadcasting_latest_txn(&chan_b.2, &nodes[0].node.get_our_node_id()).unwrap();
1914-
check_added_monitors(&nodes[1], 1);
1915-
check_closed_broadcast(&nodes[1], 1, true);
1916-
check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed);
1917-
let revoked_commitment_b = {
1918-
let mut txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
1919-
assert_eq!(txn.len(), 1);
1920-
let revoked_commitment = txn.pop().unwrap();
1921-
assert_eq!(revoked_commitment.output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
1922-
check_spends!(revoked_commitment, chan_b.3);
1923-
revoked_commitment
1903+
// Bob force closes by restarting with the outdated state, prompting the ChannelMonitors to
1904+
// broadcast the latest commitment transaction known to them, which in our case is the one with
1905+
// the HTLCs still pending.
1906+
nodes[1].node.timer_tick_occurred();
1907+
check_added_monitors(&nodes[1], 2);
1908+
check_closed_event!(&nodes[1], 2, ClosureReason::OutdatedChannelManager);
1909+
let (revoked_commitment_a, revoked_commitment_b) = {
1910+
let txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
1911+
assert_eq!(txn.len(), 2);
1912+
assert_eq!(txn[0].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
1913+
assert_eq!(txn[1].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
1914+
if txn[0].input[0].previous_output.txid == chan_a.3.txid() {
1915+
check_spends!(&txn[0], &chan_a.3);
1916+
check_spends!(&txn[1], &chan_b.3);
1917+
(txn[0].clone(), txn[1].clone())
1918+
} else {
1919+
check_spends!(&txn[1], &chan_a.3);
1920+
check_spends!(&txn[0], &chan_b.3);
1921+
(txn[1].clone(), txn[0].clone())
1922+
}
19241923
};
19251924

19261925
// Bob should now receive two events to bump his revoked commitment transaction fees.

lightning/src/ln/payment_tests.rs

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -334,9 +334,15 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
334334
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
335335
assert!(nodes[0].node.list_channels().is_empty());
336336
assert!(nodes[0].node.has_pending_payments());
337-
let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
338-
assert_eq!(as_broadcasted_txn.len(), 1);
339-
assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
337+
nodes[0].node.timer_tick_occurred();
338+
if !confirm_before_reload {
339+
let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
340+
assert_eq!(as_broadcasted_txn.len(), 1);
341+
assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
342+
} else {
343+
assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
344+
}
345+
check_added_monitors!(nodes[0], 1);
340346

341347
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
342348
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
@@ -499,9 +505,11 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
499505
// On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
500506
// force-close the channel.
501507
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
508+
nodes[0].node.timer_tick_occurred();
502509
assert!(nodes[0].node.list_channels().is_empty());
503510
assert!(nodes[0].node.has_pending_payments());
504511
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
512+
check_added_monitors!(nodes[0], 1);
505513

506514
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
507515
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
@@ -2794,6 +2802,7 @@ fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) {
27942802
if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert_eq!(payment_preimage, our_payment_preimage); } else { panic!(); }
27952803
// Note that we don't get a PaymentPathSuccessful here as we leave the HTLC pending to avoid
27962804
// the double-claim that would otherwise appear at the end of this test.
2805+
nodes[0].node.timer_tick_occurred();
27972806
let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
27982807
assert_eq!(as_broadcasted_txn.len(), 1);
27992808

lightning/src/ln/reload_tests.rs

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -422,20 +422,22 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
422422
nodes_0_deserialized = nodes_0_deserialized_tmp;
423423
assert!(nodes_0_read.is_empty());
424424

425-
{ // Channel close should result in a commitment tx
426-
let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
427-
assert_eq!(txn.len(), 1);
428-
check_spends!(txn[0], funding_tx);
429-
assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
430-
}
431-
432425
for monitor in node_0_monitors.drain(..) {
433426
assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
434427
ChannelMonitorUpdateStatus::Completed);
435428
check_added_monitors!(nodes[0], 1);
436429
}
437430
nodes[0].node = &nodes_0_deserialized;
431+
438432
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
433+
{ // Channel close should result in a commitment tx
434+
nodes[0].node.timer_tick_occurred();
435+
let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
436+
assert_eq!(txn.len(), 1);
437+
check_spends!(txn[0], funding_tx);
438+
assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
439+
}
440+
check_added_monitors!(nodes[0], 1);
439441

440442
// nodes[1] and nodes[2] have no lost state with nodes[0]...
441443
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
@@ -920,8 +922,10 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
920922
});
921923
}
922924

925+
nodes[1].node.timer_tick_occurred();
923926
let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
924927
assert_eq!(bs_commitment_tx.len(), 1);
928+
check_added_monitors!(nodes[1], 1);
925929

926930
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
927931
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));

lightning/src/ln/reorg_tests.rs

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -320,12 +320,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
320320
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan.2).encode();
321321

322322
reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
323-
if !reorg_after_reload {
324-
// If the channel is already closed when we reload the node, we'll broadcast a closing
325-
// transaction via the ChannelMonitor which is missing a corresponding channel.
326-
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
327-
nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
328-
}
323+
assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
329324
}
330325

331326
if reorg_after_reload {

0 commit comments

Comments
 (0)