Skip to content

Add MPP receive timeout handling #1353

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 33 additions & 0 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -441,6 +441,7 @@ struct ClaimableHTLC {
cltv_expiry: u32,
value: u64,
onion_payload: OnionPayload,
timer_ticks: u8,
}

/// A payment identifier used to uniquely identify a payment to LDK.
Expand Down Expand Up @@ -1148,6 +1149,9 @@ const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_G
/// pending HTLCs in flight.
pub(crate) const PAYMENT_EXPIRY_BLOCKS: u32 = 3;

/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs
pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;

/// Information needed for constructing an invoice route hint for this channel.
#[derive(Clone, Debug, PartialEq)]
pub struct CounterpartyForwardingInfo {
Expand Down Expand Up @@ -3326,6 +3330,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
phantom_shared_secret,
},
value: amt_to_forward,
timer_ticks: 0,
cltv_expiry,
onion_payload,
};
Expand Down Expand Up @@ -3620,6 +3625,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);

let mut handle_errors = Vec::new();
let mut timed_out_mpp_htlcs = Vec::new();
{
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
Expand Down Expand Up @@ -3668,6 +3674,32 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana

true
});

channel_state.claimable_htlcs.retain(|payment_hash, htlcs| {
if htlcs.is_empty() {
// This should be unreachable
debug_assert!(false);
return false;
}
if let OnionPayload::Invoice(ref final_hop_data) = htlcs[0].onion_payload {
// Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
// In this case we're not going to handle any timeouts of the parts here.
if final_hop_data.total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
return true;
} else if htlcs.into_iter().any(|htlc| {
htlc.timer_ticks += 1;
return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
}) {
timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
return false;
}
}
true
});
}

for htlc_source in timed_out_mpp_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() });
}

for (err, counterparty_node_id) in handle_errors.drain(..) {
Expand Down Expand Up @@ -6239,6 +6271,7 @@ impl Readable for ClaimableHTLC {
};
Ok(Self {
prev_hop: prev_hop.0.unwrap(),
timer_ticks: 0,
value,
onion_payload,
cltv_expiry,
Expand Down
74 changes: 73 additions & 1 deletion lightning/src/ln/payment_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ use chain::{ChannelMonitorUpdateErr, Confirm, Listen, Watch};
use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS};
use chain::transaction::OutPoint;
use chain::keysinterface::KeysInterface;
use ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, ChannelManagerReadArgs, PaymentId, PaymentSendFailure};
use ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, ChannelManagerReadArgs, MPP_TIMEOUT_TICKS, PaymentId, PaymentSendFailure};
use ln::features::{InitFeatures, InvoiceFeatures};
use ln::msgs;
use ln::msgs::ChannelMessageHandler;
Expand Down Expand Up @@ -199,6 +199,78 @@ fn mpp_retry() {
claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
}

fn do_mpp_receive_timeout(send_partial_mpp: bool) {
let chanmon_cfgs = create_chanmon_cfgs(4);
let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
let nodes = create_network(4, &node_cfgs, &node_chanmgrs);

let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;

let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 100_000);
let path = route.paths[0].clone();
route.paths.push(path);
route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
route.paths[0][0].short_channel_id = chan_1_id;
route.paths[0][1].short_channel_id = chan_3_id;
route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
route.paths[1][0].short_channel_id = chan_2_id;
route.paths[1][1].short_channel_id = chan_4_id;

// Initiate the MPP payment.
let _ = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
check_added_monitors!(nodes[0], 2); // one monitor per path
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 2);

// Pass half of the payment along the first path.
pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 200_000, payment_hash, Some(payment_secret), events.remove(0), false, None);

if send_partial_mpp {
// Time out the partial MPP
for _ in 0..MPP_TIMEOUT_TICKS {
nodes[3].node.timer_tick_occurred();
}

// Failed HTLC from node 3 -> 1
expect_pending_htlcs_forwardable!(nodes[3]);
let htlc_fail_updates_3_1 = get_htlc_update_msgs!(nodes[3], nodes[1].node.get_our_node_id());
assert_eq!(htlc_fail_updates_3_1.update_fail_htlcs.len(), 1);
nodes[1].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &htlc_fail_updates_3_1.update_fail_htlcs[0]);
check_added_monitors!(nodes[3], 1);
commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates_3_1.commitment_signed, false);

// Failed HTLC from node 1 -> 0
expect_pending_htlcs_forwardable!(nodes[1]);
let htlc_fail_updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert_eq!(htlc_fail_updates_1_0.update_fail_htlcs.len(), 1);
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates_1_0.update_fail_htlcs[0]);
check_added_monitors!(nodes[1], 1);
commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates_1_0.commitment_signed, false);

expect_payment_failed_conditions!(nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(23, &[][..]));
} else {
// Pass half of the payment along the second path.
pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash, Some(payment_secret), events.remove(0), true, None);

// Even after MPP_TIMEOUT_TICKS we should not timeout the MPP if we have all the parts
for _ in 0..MPP_TIMEOUT_TICKS {
nodes[3].node.timer_tick_occurred();
}

claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
}
}

#[test]
fn mpp_receive_timeout() {
do_mpp_receive_timeout(true);
do_mpp_receive_timeout(false);
}

#[test]
fn retry_expired_payment() {
let chanmon_cfgs = create_chanmon_cfgs(3);
Expand Down