Skip to content

Commit 32eb894

Browse files
authored
Merge pull request #2167 from TheBlueMatt/2023-04-monitor-e-monitor-prep
Add infra to block ChannelMonitorUpdates on forwarded claims
2 parents 91536ed + 394f54d commit 32eb894

File tree

7 files changed

+387
-138
lines changed

7 files changed

+387
-138
lines changed

lightning/src/ln/channel.rs

+23-2
Original file line numberDiff line numberDiff line change
@@ -432,6 +432,12 @@ pub(super) struct ReestablishResponses {
432432
pub shutdown_msg: Option<msgs::Shutdown>,
433433
}
434434

435+
/// The return type of `force_shutdown`
436+
pub(crate) type ShutdownResult = (
437+
Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
438+
Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>
439+
);
440+
435441
/// If the majority of the channels funds are to the fundee and the initiator holds only just
436442
/// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
437443
/// initiator controls the feerate, if they then go to increase the channel fee, they may have no
@@ -5097,10 +5103,25 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
50975103
self.pending_monitor_updates.is_empty()
50985104
}
50995105

5106+
pub fn complete_all_mon_updates_through(&mut self, update_id: u64) {
5107+
self.pending_monitor_updates.retain(|upd| {
5108+
if upd.update.update_id <= update_id {
5109+
assert!(!upd.blocked, "Completed update must have flown");
5110+
false
5111+
} else { true }
5112+
});
5113+
}
5114+
51005115
pub fn complete_one_mon_update(&mut self, update_id: u64) {
51015116
self.pending_monitor_updates.retain(|upd| upd.update.update_id != update_id);
51025117
}
51035118

5119+
/// Returns an iterator over all unblocked monitor updates which have not yet completed.
5120+
pub fn uncompleted_unblocked_mon_updates(&self) -> impl Iterator<Item=&ChannelMonitorUpdate> {
5121+
self.pending_monitor_updates.iter()
5122+
.filter_map(|upd| if upd.blocked { None } else { Some(&upd.update) })
5123+
}
5124+
51045125
/// Returns true if funding_created was sent/received.
51055126
pub fn is_funding_initiated(&self) -> bool {
51065127
self.channel_state >= ChannelState::FundingSent as u32
@@ -6282,7 +6303,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
62826303
/// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
62836304
/// Also returns the list of payment_hashes for channels which we can safely fail backwards
62846305
/// immediately (others we will have to allow to time out).
6285-
pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>) {
6306+
pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
62866307
// Note that we MUST only generate a monitor update that indicates force-closure - we're
62876308
// called during initialization prior to the chain_monitor in the encompassing ChannelManager
62886309
// being fully configured in some cases. Thus, its likely any monitor events we generate will
@@ -6311,7 +6332,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
63116332
// See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
63126333
if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
63136334
self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
6314-
Some((funding_txo, ChannelMonitorUpdate {
6335+
Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
63156336
update_id: self.latest_monitor_update_id,
63166337
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
63176338
}))

lightning/src/ln/channelmanager.rs

+340-129
Large diffs are not rendered by default.

lightning/src/ln/payment_tests.rs

+8
Original file line numberDiff line numberDiff line change
@@ -607,6 +607,9 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
607607
reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], second_persister, second_new_chain_monitor, second_nodes_0_deserialized);
608608
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
609609

610+
nodes[0].node.test_process_background_events();
611+
check_added_monitors(&nodes[0], 1);
612+
610613
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
611614

612615
// Now resend the payment, delivering the HTLC and actually claiming it this time. This ensures
@@ -632,6 +635,9 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
632635
reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], third_persister, third_new_chain_monitor, third_nodes_0_deserialized);
633636
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
634637

638+
nodes[0].node.test_process_background_events();
639+
check_added_monitors(&nodes[0], 1);
640+
635641
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
636642

637643
match nodes[0].node.send_payment_with_route(&new_route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) {
@@ -777,6 +783,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
777783
let height = nodes[0].blocks.lock().unwrap().len() as u32 - 1;
778784
nodes[0].chain_monitor.chain_monitor.block_connected(&claim_block, height);
779785
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
786+
check_added_monitors(&nodes[0], 1);
780787
}
781788

782789
#[test]
@@ -2861,6 +2868,7 @@ fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) {
28612868
reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_c, chain_monitor_c, nodes_0_deserialized_c);
28622869
let events = nodes[0].node.get_and_clear_pending_events();
28632870
assert!(events.is_empty());
2871+
check_added_monitors(&nodes[0], 1);
28642872
}
28652873

28662874
#[test]

lightning/src/ln/priv_short_conf_tests.rs

+2
Original file line numberDiff line numberDiff line change
@@ -853,10 +853,12 @@ fn test_0conf_channel_reorg() {
853853
err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs.".to_owned()
854854
});
855855
check_closed_broadcast!(nodes[0], true);
856+
check_added_monitors(&nodes[0], 1);
856857
check_closed_event!(&nodes[1], 1, ClosureReason::ProcessingError {
857858
err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs.".to_owned()
858859
});
859860
check_closed_broadcast!(nodes[1], true);
861+
check_added_monitors(&nodes[1], 1);
860862
}
861863

862864
#[test]

lightning/src/ln/reload_tests.rs

+6
Original file line numberDiff line numberDiff line change
@@ -774,6 +774,9 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
774774
if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); }
775775
if persist_both_monitors {
776776
if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); }
777+
check_added_monitors(&nodes[3], 2);
778+
} else {
779+
check_added_monitors(&nodes[3], 1);
777780
}
778781

779782
// On restart, we should also get a duplicate PaymentClaimed event as we persisted the
@@ -1047,6 +1050,9 @@ fn removed_payment_no_manager_persistence() {
10471050
_ => panic!("Unexpected event"),
10481051
}
10491052

1053+
nodes[1].node.test_process_background_events();
1054+
check_added_monitors(&nodes[1], 1);
1055+
10501056
// Now that the ChannelManager has force-closed the channel which had the HTLC removed, it is
10511057
// now forgotten everywhere. The ChannelManager should have, as a side-effect of reload,
10521058
// learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set.

lightning/src/ln/reorg_tests.rs

+6-5
Original file line numberDiff line numberDiff line change
@@ -289,8 +289,6 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
289289
let relevant_txids = nodes[0].node.get_relevant_txids();
290290
assert_eq!(relevant_txids.len(), 0);
291291

292-
handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
293-
check_added_monitors!(nodes[1], 1);
294292
{
295293
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
296294
let peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
@@ -336,8 +334,6 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
336334
let relevant_txids = nodes[0].node.get_relevant_txids();
337335
assert_eq!(relevant_txids.len(), 0);
338336

339-
handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
340-
check_added_monitors!(nodes[1], 1);
341337
{
342338
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
343339
let peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
@@ -351,7 +347,12 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
351347
nodes[0].node.test_process_background_events(); // Required to free the pending background monitor update
352348
check_added_monitors!(nodes[0], 1);
353349
let expected_err = "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.";
354-
check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Channel closed because of an exception: {}", expected_err)) });
350+
if reorg_after_reload || !reload_node {
351+
handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
352+
check_added_monitors!(nodes[1], 1);
353+
check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Channel closed because of an exception: {}", expected_err)) });
354+
}
355+
355356
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: expected_err.to_owned() });
356357
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
357358
nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();

lightning/src/sync/nostd_sync.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ impl<T> Mutex<T> {
4949
impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {
5050
#[inline]
5151
fn held_by_thread(&self) -> LockHeldState {
52-
if self.lock().is_err() { return LockHeldState::HeldByThread; }
52+
if self.inner.try_borrow_mut().is_err() { return LockHeldState::HeldByThread; }
5353
else { return LockHeldState::NotHeldByThread; }
5454
}
5555
type ExclLock = MutexGuard<'a, T>;
@@ -115,7 +115,7 @@ impl<T> RwLock<T> {
115115
impl<'a, T: 'a> LockTestExt<'a> for RwLock<T> {
116116
#[inline]
117117
fn held_by_thread(&self) -> LockHeldState {
118-
if self.write().is_err() { return LockHeldState::HeldByThread; }
118+
if self.inner.try_borrow_mut().is_err() { return LockHeldState::HeldByThread; }
119119
else { return LockHeldState::NotHeldByThread; }
120120
}
121121
type ExclLock = RwLockWriteGuard<'a, T>;

0 commit comments

Comments
 (0)