Skip to content

Commit 6542294

Browse files
committed
Fail HTLC backwards before upstream claims on-chain
Fail inbound HTLCs if they expire within a certain number of blocks from the current height. If we haven't seen the preimage for an HTLC by the time the previous hop's timeout expires, we've lost that HTLC, so we might as well fail it back instead of having our counterparty force-close the channel.
1 parent d8edaad commit 6542294

File tree

2 files changed

+137
-18
lines changed

2 files changed

+137
-18
lines changed

lightning/src/chain/channelmonitor.rs

+82-15
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ use crate::ln::{PaymentHash, PaymentPreimage};
3737
use crate::ln::msgs::DecodeError;
3838
use crate::ln::chan_utils;
3939
use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HTLCClaim, ChannelTransactionParameters, HolderCommitmentTransaction};
40-
use crate::ln::channelmanager::{HTLCSource, SentHTLCId};
40+
use crate::ln::channelmanager::{HTLCSource, SentHTLCId, HTLCPreviousHopData};
4141
use crate::chain;
4242
use crate::chain::{BestBlock, WatchedOutput};
4343
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator};
@@ -236,6 +236,15 @@ pub const ANTI_REORG_DELAY: u32 = 6;
236236
/// in a race condition between the user connecting a block (which would fail it) and the user
237237
/// providing us the preimage (which would claim it).
238238
pub(crate) const HTLC_FAIL_BACK_BUFFER: u32 = CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS;
239+
/// Number of blocks before an inbound HTLC expires at which we fail it backwards.
240+
///
241+
/// If we have forwarded an HTLC to a peer and they have not claimed it on or off-chain by the
242+
/// time the previous hop's HTLC timeout expires, we're probably going to lose the inbound HTLC.
243+
/// Instead of also losing the channel, we fail the HTLC backwards.
244+
///
245+
/// To give us some buffer in case we're slow to process blocks, we fail a few blocks before the
246+
/// timeout officially expires to ensure we fail back before our counterparty force closes.
247+
pub(crate) const TIMEOUT_FAIL_BACK_BUFFER: u32 = 3;
239248

240249
// TODO(devrandom) replace this with HolderCommitmentTransaction
241250
#[derive(Clone, PartialEq, Eq)]
@@ -3309,6 +3318,60 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
33093318
}
33103319
}
33113320

3321+
// Fail back HTLCs on backwards channels if they expire within `TIMEOUT_FAIL_BACK_BUFFER`
3322+
// blocks. If we haven't seen the preimage for an HTLC by the time the previous hop's
3323+
// timeout expires, we've lost that HTLC, so we might as well fail it back instead of having our
3324+
// counterparty force-close the channel.
3325+
let height = self.best_block.height();
3326+
macro_rules! fail_soon_to_expire_htlcs {
3327+
($htlcs: expr) => {{
3328+
for (htlc, source_opt) in $htlcs {
3329+
// Only check forwarded HTLCs' previous hops
3330+
let source = match source_opt {
3331+
Some(source) => source,
3332+
None => continue,
3333+
};
3334+
let cltv_expiry = match source {
3335+
HTLCSource::PreviousHopData(HTLCPreviousHopData { cltv_expiry: Some(cltv_expiry), .. }) => *cltv_expiry,
3336+
_ => continue,
3337+
};
3338+
if cltv_expiry <= height + TIMEOUT_FAIL_BACK_BUFFER {
3339+
let duplicate_event = self.pending_monitor_events.iter().any(
3340+
|update| if let &MonitorEvent::HTLCEvent(ref upd) = update {
3341+
upd.source == *source
3342+
} else { false });
3343+
if !duplicate_event {
3344+
log_debug!(logger, "Failing back HTLC {} upstream to preserve the \
3345+
channel as the forward HTLC hasn't resolved and our backward HTLC \
3346+
expires soon at {}", log_bytes!(htlc.payment_hash.0), cltv_expiry);
3347+
self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
3348+
source: source.clone(),
3349+
payment_preimage: None,
3350+
payment_hash: htlc.payment_hash,
3351+
htlc_value_satoshis: Some(htlc.amount_msat / 1000),
3352+
}));
3353+
}
3354+
}
3355+
}
3356+
}}
3357+
}
3358+
3359+
let current_holder_htlcs = self.current_holder_commitment_tx.htlc_outputs.iter()
3360+
.map(|&(ref a, _, ref b)| (a, b.as_ref()));
3361+
fail_soon_to_expire_htlcs!(current_holder_htlcs);
3362+
3363+
if let Some(ref txid) = self.current_counterparty_commitment_txid {
3364+
if let Some(ref htlc_outputs) = self.counterparty_claimable_outpoints.get(txid) {
3365+
fail_soon_to_expire_htlcs!(htlc_outputs.iter().map(|&(ref a, ref b)| (a, (b.as_ref().clone()).map(|boxed| &**boxed))));
3366+
}
3367+
};
3368+
3369+
if let Some(ref txid) = self.prev_counterparty_commitment_txid {
3370+
if let Some(ref htlc_outputs) = self.counterparty_claimable_outpoints.get(txid) {
3371+
fail_soon_to_expire_htlcs!(htlc_outputs.iter().map(|&(ref a, ref b)| (a, (b.as_ref().clone()).map(|boxed| &**boxed))));
3372+
}
3373+
};
3374+
33123375
// Find which on-chain events have reached their confirmation threshold.
33133376
let onchain_events_awaiting_threshold_conf =
33143377
self.onchain_events_awaiting_threshold_conf.drain(..).collect::<Vec<_>>();
@@ -3353,20 +3416,24 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
33533416
matured_htlcs.push(source.clone());
33543417
}
33553418

3356-
log_debug!(logger, "HTLC {} failure update in {} has got enough confirmations to be passed upstream",
3357-
log_bytes!(payment_hash.0), entry.txid);
3358-
self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
3359-
payment_hash,
3360-
payment_preimage: None,
3361-
source: source.clone(),
3362-
htlc_value_satoshis,
3363-
}));
3364-
self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC {
3365-
commitment_tx_output_idx,
3366-
resolving_txid: Some(entry.txid),
3367-
resolving_tx: entry.transaction,
3368-
payment_preimage: None,
3369-
});
3419+
let duplicate_event = self.pending_monitor_events.iter().any(
3420+
|update| if let &MonitorEvent::HTLCEvent(ref upd) = update { upd.source == *source } else { false });
3421+
if !duplicate_event {
3422+
log_debug!(logger, "HTLC {} failure update in {} has got enough confirmations to be passed upstream",
3423+
log_bytes!(payment_hash.0), entry.txid);
3424+
self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
3425+
payment_hash,
3426+
payment_preimage: None,
3427+
source: source.clone(),
3428+
htlc_value_satoshis,
3429+
}));
3430+
self.htlcs_resolved_on_chain.push(IrrevocablyResolvedHTLC {
3431+
commitment_tx_output_idx,
3432+
resolving_txid: Some(entry.txid),
3433+
resolving_tx: entry.transaction,
3434+
payment_preimage: None,
3435+
});
3436+
}
33703437
},
33713438
OnchainEvent::MaturingOutput { descriptor } => {
33723439
log_debug!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor));

lightning/src/ln/functional_tests.rs

+55-3
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
use crate::chain;
1515
use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
1616
use crate::chain::chaininterface::LowerBoundedFeeEstimator;
17-
use crate::chain::channelmonitor;
17+
use crate::chain::channelmonitor::{self, TIMEOUT_FAIL_BACK_BUFFER};
1818
use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
1919
use crate::chain::transaction::OutPoint;
2020
use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource};
@@ -2211,6 +2211,58 @@ fn channel_reserve_in_flight_removes() {
22112211
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
22122212
}
22132213

2214+
#[test]
2215+
fn test_fail_back_before_backwards_timeout() {
2216+
// Test that we fail an HTLC upstream if we are still waiting for confirmation downstream
2217+
// by the time the upstream timeout expires
2218+
let chanmon_cfgs = create_chanmon_cfgs(3);
2219+
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2220+
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2221+
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2222+
2223+
create_announced_chan_between_nodes(&nodes, 0, 1);
2224+
let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2225+
2226+
connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
2227+
connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
2228+
connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
2229+
2230+
let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2231+
2232+
// Force close downstream with timeout
2233+
nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
2234+
check_added_monitors!(nodes[1], 1);
2235+
check_closed_broadcast!(nodes[1], true);
2236+
2237+
connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
2238+
test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
2239+
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
2240+
2241+
// Nothing is confirmed for a while
2242+
connect_blocks(&nodes[1], MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - TIMEOUT_FAIL_BACK_BUFFER);
2243+
2244+
// Check that node 2 fails the HTLC upstream
2245+
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
2246+
check_added_monitors!(nodes[1], 1);
2247+
let events = nodes[1].node.get_and_clear_pending_msg_events();
2248+
assert_eq!(events.len(), 1);
2249+
let (update_fail, commitment_signed) = match events[0] {
2250+
MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
2251+
assert!(update_add_htlcs.is_empty());
2252+
assert!(update_fulfill_htlcs.is_empty());
2253+
assert_eq!(update_fail_htlcs.len(), 1);
2254+
assert!(update_fail_malformed_htlcs.is_empty());
2255+
assert!(update_fee.is_none());
2256+
(update_fail_htlcs[0].clone(), commitment_signed.clone())
2257+
},
2258+
_ => panic!("Unexpected event"),
2259+
};
2260+
2261+
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail);
2262+
commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
2263+
expect_payment_failed_conditions(&nodes[0], payment_hash_1, false, PaymentFailedConditions::new().blamed_chan_closed(true));
2264+
}
2265+
22142266
#[test]
22152267
fn channel_monitor_network_test() {
22162268
// Simple test which builds a network of ChannelManagers, connects them to each other, and
@@ -2307,7 +2359,7 @@ fn channel_monitor_network_test() {
23072359
let node2_commitment_txid;
23082360
{
23092361
let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
2310-
connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2362+
connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
23112363
test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
23122364
node2_commitment_txid = node_txn[0].txid();
23132365

@@ -2992,7 +3044,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
29923044
// Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
29933045
mine_transaction(&nodes[1], &commitment_tx[0]);
29943046
check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false);
2995-
connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
3047+
connect_blocks(&nodes[1], 100 - nodes[2].best_block_info().1);
29963048
let timeout_tx = {
29973049
let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
29983050
if nodes[1].connect_style.borrow().skips_blocks() {

0 commit comments

Comments
 (0)