Skip to content

Commit 0bb87dd

Browse files
committed
Avoid attempting to forward to a closed chan on stale-data reload
If, after forwarding a payment to our counterparty, we restart with a ChannelMonitor update having been persisted, but the corresponding ChannelManager update was not persisted, we'll still have the forwarded HTLC in the `forward_htlcs` map on start. This will cause us to generate a (spurious) `PendingHTLCsForwardable` event. However, when we go to forward said HTLC, we'll notice the channel has been closed and leave it up to the `ChannelMontior` to finalize the HTLC. This is all fine today - we won't lose any funds, we'll just generate an excess forwardable event and then fail to forward. However, in the future when we allow for forward-time channel changes this could break. Thus, its worth adding tests for this behavior today, and, while we're at it, removing the spurious forwardable HTLCs event.
1 parent e142f67 commit 0bb87dd

File tree

3 files changed

+160
-14
lines changed

3 files changed

+160
-14
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 34 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -7286,16 +7286,6 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
72867286
None => continue,
72877287
}
72887288
}
7289-
if forward_htlcs_count > 0 {
7290-
// If we have pending HTLCs to forward, assume we either dropped a
7291-
// `PendingHTLCsForwardable` or the user received it but never processed it as they
7292-
// shut down before the timer hit. Either way, set the time_forwardable to a small
7293-
// constant as enough time has likely passed that we should simply handle the forwards
7294-
// now, or at least after the user gets a chance to reconnect to our peers.
7295-
pending_events_read.push(events::Event::PendingHTLCsForwardable {
7296-
time_forwardable: Duration::from_secs(2),
7297-
});
7298-
}
72997289

73007290
let background_event_count: u64 = Readable::read(reader)?;
73017291
let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
@@ -7404,10 +7394,44 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
74047394
}
74057395
}
74067396
}
7397+
for (htlc_source, htlc) in monitor.get_all_current_outbound_htlcs() {
7398+
if let HTLCSource::PreviousHopData(prev_hop_data) = htlc_source {
7399+
// The ChannelMonitor is now responsible for this HTLC's
7400+
// failure/success and will let us know what its outcome is. If we
7401+
// still have an entry for this HTLC in `forward_htlcs`, we were
7402+
// apparently not persisted after the monitor was when forwarding
7403+
// the payment.
7404+
forward_htlcs.retain(|_, forwards| {
7405+
forwards.retain(|forward| {
7406+
if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
7407+
if htlc_info.prev_short_channel_id == prev_hop_data.short_channel_id &&
7408+
htlc_info.prev_htlc_id == prev_hop_data.htlc_id
7409+
{
7410+
log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
7411+
log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
7412+
false
7413+
} else { true }
7414+
} else { true }
7415+
});
7416+
!forwards.is_empty()
7417+
})
7418+
}
7419+
}
74077420
}
74087421
}
74097422
}
74107423

7424+
if !forward_htlcs.is_empty() {
7425+
// If we have pending HTLCs to forward, assume we either dropped a
7426+
// `PendingHTLCsForwardable` or the user received it but never processed it as they
7427+
// shut down before the timer hit. Either way, set the time_forwardable to a small
7428+
// constant as enough time has likely passed that we should simply handle the forwards
7429+
// now, or at least after the user gets a chance to reconnect to our peers.
7430+
pending_events_read.push(events::Event::PendingHTLCsForwardable {
7431+
time_forwardable: Duration::from_secs(2),
7432+
});
7433+
}
7434+
74117435
let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material();
74127436
let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
74137437

lightning/src/ln/functional_test_utils.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1091,7 +1091,7 @@ macro_rules! check_closed_event {
10911091
use $crate::util::events::Event;
10921092

10931093
let events = $node.node.get_and_clear_pending_events();
1094-
assert_eq!(events.len(), $events);
1094+
assert_eq!(events.len(), $events, "{:?}", events);
10951095
let expected_reason = $reason;
10961096
let mut issues_discard_funding = false;
10971097
for event in events {
@@ -1350,7 +1350,7 @@ macro_rules! expect_pending_htlcs_forwardable_conditions {
13501350
let events = $node.node.get_and_clear_pending_events();
13511351
match events[0] {
13521352
$crate::util::events::Event::PendingHTLCsForwardable { .. } => { },
1353-
_ => panic!("Unexpected event"),
1353+
_ => panic!("Unexpected event {:?}", events),
13541354
};
13551355

13561356
let count = expected_failures.len() + 1;
@@ -1560,7 +1560,7 @@ macro_rules! expect_payment_forwarded {
15601560
if !$downstream_force_closed {
15611561
assert!($node.node.list_channels().iter().any(|x| x.counterparty.node_id == $next_node.node.get_our_node_id() && x.channel_id == next_channel_id.unwrap()));
15621562
}
1563-
assert_eq!(claim_from_onchain_tx, $upstream_force_closed);
1563+
assert_eq!(claim_from_onchain_tx, $downstream_force_closed);
15641564
},
15651565
_ => panic!("Unexpected event"),
15661566
}

lightning/src/ln/reload_tests.rs

Lines changed: 123 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,14 +10,16 @@
1010
//! Functional tests which test for correct behavior across node restarts.
1111
1212
use crate::chain::{ChannelMonitorUpdateStatus, Watch};
13+
use crate::chain::chaininterface::LowerBoundedFeeEstimator;
1314
use crate::chain::channelmonitor::ChannelMonitor;
15+
use crate::chain::keysinterface::KeysInterface;
1416
use crate::chain::transaction::OutPoint;
1517
use crate::ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, PaymentId};
1618
use crate::ln::msgs;
1719
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
1820
use crate::util::enforcing_trait_impls::EnforcingSigner;
1921
use crate::util::test_utils;
20-
use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
22+
use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
2123
use crate::util::ser::{Writeable, ReadableArgs};
2224
use crate::util::config::UserConfig;
2325

@@ -811,3 +813,123 @@ fn test_partial_claim_before_restart() {
811813
do_test_partial_claim_before_restart(false);
812814
do_test_partial_claim_before_restart(true);
813815
}
816+
817+
fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_htlc: bool) {
818+
if !use_cs_commitment { assert!(!claim_htlc); }
819+
// If we go to forward a payment, and the ChannelMonitor persistence completes, but the
820+
// ChannelManager does not, we shouldn't try to forward the payment again, nor should we fail
821+
// it back until the ChannelMonitor decides the fate of the HTLC.
822+
// This was never an issue, but it may be easy to regress here going forward.
823+
let chanmon_cfgs = create_chanmon_cfgs(3);
824+
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
825+
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
826+
827+
let persister;
828+
let new_chain_monitor;
829+
let nodes_1_deserialized;
830+
831+
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
832+
833+
let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
834+
let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
835+
836+
let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
837+
let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
838+
let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV;
839+
nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), payment_id).unwrap();
840+
check_added_monitors!(nodes[0], 1);
841+
842+
let payment_event = SendEvent::from_node(&nodes[0]);
843+
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
844+
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
845+
846+
let node_encoded = nodes[1].node.encode();
847+
848+
expect_pending_htlcs_forwardable!(nodes[1]);
849+
850+
let payment_event = SendEvent::from_node(&nodes[1]);
851+
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
852+
nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg);
853+
check_added_monitors!(nodes[2], 1);
854+
855+
if claim_htlc {
856+
get_monitor!(nodes[2], chan_id_2).provide_payment_preimage(&payment_hash, &payment_preimage,
857+
&nodes[2].tx_broadcaster, &LowerBoundedFeeEstimator(nodes[2].fee_estimator), &nodes[2].logger);
858+
}
859+
assert!(nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
860+
861+
let _ = nodes[2].node.get_and_clear_pending_msg_events();
862+
863+
nodes[2].node.force_close_broadcasting_latest_txn(&chan_id_2, &nodes[1].node.get_our_node_id()).unwrap();
864+
let cs_commitment_tx = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
865+
assert_eq!(cs_commitment_tx.len(), if claim_htlc { 2 } else { 1 });
866+
867+
check_added_monitors!(nodes[2], 1);
868+
check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
869+
check_closed_broadcast!(nodes[2], true);
870+
871+
let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
872+
let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
873+
reload_node!(nodes[1], node_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
874+
875+
check_closed_event!(nodes[1], 1, ClosureReason::OutdatedChannelManager);
876+
877+
let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
878+
assert_eq!(bs_commitment_tx.len(), 1);
879+
880+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
881+
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
882+
883+
if use_cs_commitment {
884+
// If we confirm a commitment transaction that has the HTLC on-chain, nodes[1] should wait
885+
// for an HTLC-spending transaction before it does anything with the HTLC upstream.
886+
confirm_transaction(&nodes[1], &cs_commitment_tx[0]);
887+
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
888+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
889+
890+
if claim_htlc {
891+
confirm_transaction(&nodes[1], &cs_commitment_tx[1]);
892+
} else {
893+
connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1);
894+
let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
895+
assert_eq!(bs_htlc_timeout_tx.len(), 1);
896+
confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]);
897+
}
898+
} else {
899+
confirm_transaction(&nodes[1], &bs_commitment_tx[0]);
900+
}
901+
902+
if !claim_htlc {
903+
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
904+
} else {
905+
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, true);
906+
}
907+
check_added_monitors!(nodes[1], 1);
908+
909+
let events = nodes[1].node.get_and_clear_pending_msg_events();
910+
assert_eq!(events.len(), 1);
911+
match &events[0] {
912+
MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fulfill_htlcs, update_fail_htlcs, commitment_signed, .. }, .. } => {
913+
if claim_htlc {
914+
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
915+
} else {
916+
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
917+
}
918+
commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
919+
},
920+
_ => panic!("Unexpected event"),
921+
}
922+
923+
if claim_htlc {
924+
expect_payment_sent!(nodes[0], payment_preimage);
925+
} else {
926+
expect_payment_failed!(nodes[0], payment_hash, false);
927+
}
928+
}
929+
930+
#[test]
931+
fn forwarded_payment_no_manager_persistence() {
932+
do_forwarded_payment_no_manager_persistence(true, true);
933+
do_forwarded_payment_no_manager_persistence(true, false);
934+
do_forwarded_payment_no_manager_persistence(false, false);
935+
}

0 commit comments

Comments
 (0)