Skip to content

Commit 3044493

Browse files
committed
Update tests to test re-claiming of forwarded HTLCs on startup
Because some of these tests require connecting blocks without calling `get_and_clear_pending_msg_events`, we need to split up the block connection utilities to only optionally call sanity-checks.
1 parent 25a593b commit 3044493

File tree

5 files changed

+272
-51
lines changed

5 files changed

+272
-51
lines changed

lightning/src/chain/channelmonitor.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ use crate::sync::{Mutex, LockTestExt};
6767
/// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction
6868
/// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment
6969
/// transaction), a single update may reach upwards of 1 MiB in serialized size.
70-
#[derive(Clone, PartialEq, Eq)]
70+
#[derive(Clone, Debug, PartialEq, Eq)]
7171
#[must_use]
7272
pub struct ChannelMonitorUpdate {
7373
pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
@@ -487,7 +487,7 @@ impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
487487

488488
);
489489

490-
#[derive(Clone, PartialEq, Eq)]
490+
#[derive(Clone, Debug, PartialEq, Eq)]
491491
pub(crate) enum ChannelMonitorUpdateStep {
492492
LatestHolderCommitmentTXInfo {
493493
commitment_tx: HolderCommitmentTransaction,

lightning/src/ln/chan_utils.rs

+4-4
Original file line numberDiff line numberDiff line change
@@ -450,7 +450,7 @@ pub fn derive_public_revocation_key<T: secp256k1::Verification>(secp_ctx: &Secp2
450450
/// channel basepoints via the new function, or they were obtained via
451451
/// CommitmentTransaction.trust().keys() because we trusted the source of the
452452
/// pre-calculated keys.
453-
#[derive(PartialEq, Eq, Clone)]
453+
#[derive(PartialEq, Eq, Clone, Debug)]
454454
pub struct TxCreationKeys {
455455
/// The broadcaster's per-commitment public key which was used to derive the other keys.
456456
pub per_commitment_point: PublicKey,
@@ -1028,7 +1028,7 @@ impl<'a> DirectedChannelTransactionParameters<'a> {
10281028
/// Information needed to build and sign a holder's commitment transaction.
10291029
///
10301030
/// The transaction is only signed once we are ready to broadcast.
1031-
#[derive(Clone)]
1031+
#[derive(Clone, Debug)]
10321032
pub struct HolderCommitmentTransaction {
10331033
inner: CommitmentTransaction,
10341034
/// Our counterparty's signature for the transaction
@@ -1134,7 +1134,7 @@ impl HolderCommitmentTransaction {
11341134
}
11351135

11361136
/// A pre-built Bitcoin commitment transaction and its txid.
1137-
#[derive(Clone)]
1137+
#[derive(Clone, Debug)]
11381138
pub struct BuiltCommitmentTransaction {
11391139
/// The commitment transaction
11401140
pub transaction: Transaction,
@@ -1305,7 +1305,7 @@ impl<'a> TrustedClosingTransaction<'a> {
13051305
///
13061306
/// This class can be used inside a signer implementation to generate a signature given the relevant
13071307
/// secret key.
1308-
#[derive(Clone)]
1308+
#[derive(Clone, Debug)]
13091309
pub struct CommitmentTransaction {
13101310
commitment_number: u64,
13111311
to_broadcaster_value_sat: u64,

lightning/src/ln/chanmon_update_fail_tests.rs

+238-35
Original file line numberDiff line numberDiff line change
@@ -3102,7 +3102,7 @@ fn test_blocked_chan_preimage_release() {
31023102
expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true);
31033103
}
31043104

3105-
fn do_test_inverted_mon_completion_order(complete_bc_commitment_dance: bool) {
3105+
fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_commitment_dance: bool) {
31063106
// When we forward a payment and receive an `update_fulfill_htlc` message from the downstream
31073107
// channel, we immediately claim the HTLC on the upstream channel, before even doing a
31083108
// `commitment_signed` dance on the downstream channel. This implies that our
@@ -3130,6 +3130,10 @@ fn do_test_inverted_mon_completion_order(complete_bc_commitment_dance: bool) {
31303130
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
31313131

31323132
let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
3133+
let mut manager_b = Vec::new();
3134+
if !with_latest_manager {
3135+
manager_b = nodes[1].node.encode();
3136+
}
31333137

31343138
nodes[2].node.claim_funds(payment_preimage);
31353139
check_added_monitors(&nodes[2], 1);
@@ -3166,63 +3170,262 @@ fn do_test_inverted_mon_completion_order(complete_bc_commitment_dance: bool) {
31663170
}
31673171

31683172
// Now reload node B
3169-
let manager_b = nodes[1].node.encode();
3173+
if with_latest_manager {
3174+
manager_b = nodes[1].node.encode();
3175+
}
31703176

31713177
let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
31723178
reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
31733179

31743180
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
31753181
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
31763182

3177-
// If we used the latest ChannelManager to reload from, we should have both channels still
3178-
// live. The B <-> C channel's final RAA ChannelMonitorUpdate must still be blocked as
3179-
// before - the ChannelMonitorUpdate for the A <-> B channel hasn't completed.
3180-
// When we call `timer_tick_occurred` we will get that monitor update back, which we'll
3181-
// complete after reconnecting to our peers.
3182-
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3183-
nodes[1].node.timer_tick_occurred();
3184-
check_added_monitors(&nodes[1], 1);
3185-
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3183+
if with_latest_manager {
3184+
// If we used the latest ChannelManager to reload from, we should have both channels still
3185+
// live. The B <-> C channel's final RAA ChannelMonitorUpdate must still be blocked as
3186+
// before - the ChannelMonitorUpdate for the A <-> B channel hasn't completed.
3187+
// When we call `timer_tick_occurred` we will get that monitor update back, which we'll
3188+
// complete after reconnecting to our peers.
3189+
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3190+
nodes[1].node.timer_tick_occurred();
3191+
check_added_monitors(&nodes[1], 1);
3192+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
31863193

3187-
// Now reconnect B to both A and C. If the B <-> C commitment signed dance wasn't run to
3188-
// the end go ahead and do that, though the
3189-
// `pending_responding_commitment_signed_dup_monitor` in `reconnect_args` indicates that we
3190-
// expect to *not* receive the final RAA ChannelMonitorUpdate.
3191-
if complete_bc_commitment_dance {
3192-
reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));
3194+
// Now reconnect B to both A and C. If the B <-> C commitment signed dance wasn't run to
3195+
// the end go ahead and do that, though the
3196+
// `pending_responding_commitment_signed_dup_monitor` in `reconnect_args` indicates that we
3197+
// expect to *not* receive the final RAA ChannelMonitorUpdate.
3198+
if complete_bc_commitment_dance {
3199+
reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));
3200+
} else {
3201+
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
3202+
reconnect_args.pending_responding_commitment_signed.1 = true;
3203+
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
3204+
reconnect_args.pending_raa = (false, true);
3205+
reconnect_nodes(reconnect_args);
3206+
}
3207+
3208+
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3209+
3210+
// (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on
3211+
// disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating
3212+
// process.
3213+
let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
3214+
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
3215+
3216+
// When we fetch B's HTLC update messages next (now that the ChannelMonitorUpdate has
3217+
// completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C
3218+
// channel.
31933219
} else {
3194-
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
3195-
reconnect_args.pending_responding_commitment_signed.1 = true;
3196-
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
3197-
reconnect_args.pending_raa = (false, true);
3198-
reconnect_nodes(reconnect_args);
3199-
}
3220+
// If the ChannelManager used in the reload was stale, check that the B <-> C channel was
3221+
// closed.
3222+
//
3223+
// Note that this will also process the ChannelMonitorUpdates which were queued up when we
3224+
// reloaded the ChannelManager. This will re-emit the A<->B preimage as well as the B<->C
3225+
// force-closure ChannelMonitorUpdate. Once the A<->B preimage update completes, the claim
3226+
// commitment update will be allowed to go out.
3227+
check_added_monitors(&nodes[1], 0);
3228+
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3229+
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3230+
check_closed_event(&nodes[1], 1, ClosureReason::OutdatedChannelManager, false, &[nodes[2].node.get_our_node_id()], 100_000);
3231+
check_added_monitors(&nodes[1], 2);
3232+
3233+
nodes[1].node.timer_tick_occurred();
3234+
check_added_monitors(&nodes[1], 0);
32003235

3201-
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
3236+
// Don't bother to reconnect B to C - that channel has been closed. We don't need to
3237+
// exchange any messages here even though there's a pending commitment update because the
3238+
// ChannelMonitorUpdate hasn't yet completed.
3239+
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
32023240

3203-
// (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on
3204-
// disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating
3205-
// process.
3206-
let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
3207-
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
3241+
let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
3242+
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
3243+
3244+
// The ChannelMonitorUpdate which was completed prior to the reconnect only contained the
3245+
// preimage (as it was a replay of the original ChannelMonitorUpdate from before we
3246+
// restarted). When we go to fetch the commitment transaction updates we'll poll the
3247+
// ChannelMonitorUpdate completion, then generate (and complete) a new ChannelMonitorUpdate
3248+
// with the actual commitment transaction, which will allow us to fulfill the HTLC with
3249+
// node A.
3250+
}
32083251

3209-
// When we fetch B's HTLC update messages here (now that the ChannelMonitorUpdate has
3210-
// completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C
3211-
// channel.
32123252
let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
32133253
check_added_monitors(&nodes[1], 1);
32143254

32153255
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
32163256
do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, false);
32173257

3218-
expect_payment_forwarded!(nodes[1], &nodes[0], &nodes[2], Some(1_000), false, false);
3258+
expect_payment_forwarded!(nodes[1], &nodes[0], &nodes[2], Some(1_000), false, !with_latest_manager);
32193259

32203260
// Finally, check that the payment was, ultimately, seen as sent by node A.
32213261
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
32223262
}
32233263

32243264
#[test]
32253265
fn test_inverted_mon_completion_order() {
3226-
do_test_inverted_mon_completion_order(true);
3227-
do_test_inverted_mon_completion_order(false);
3266+
do_test_inverted_mon_completion_order(true, true);
3267+
do_test_inverted_mon_completion_order(true, false);
3268+
do_test_inverted_mon_completion_order(false, true);
3269+
do_test_inverted_mon_completion_order(false, false);
3270+
}
3271+
3272+
fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, close_only_a: bool, hold_post_reload_mon_update: bool) {
3273+
// Test that we can apply a `ChannelMonitorUpdate` with a payment preimage even if the channel
3274+
// is force-closed between when we generate the update on reload and when we go to handle the
3275+
// update or prior to generating the update at all.
3276+
3277+
if !close_chans_before_reload && close_only_a {
3278+
// If we're not closing, it makes no sense to "only close A"
3279+
panic!();
3280+
}
3281+
3282+
let chanmon_cfgs = create_chanmon_cfgs(3);
3283+
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3284+
3285+
let persister;
3286+
let new_chain_monitor;
3287+
let nodes_1_deserialized;
3288+
3289+
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3290+
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3291+
3292+
let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
3293+
let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
3294+
3295+
// Route a payment from A, through B, to C, then claim it on C. Once we pass B the
3296+
// `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one
3297+
// on the B<->C channel but leave the A<->B monitor update pending, then reload B.
3298+
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
3299+
3300+
let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
3301+
3302+
nodes[2].node.claim_funds(payment_preimage);
3303+
check_added_monitors(&nodes[2], 1);
3304+
expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
3305+
3306+
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3307+
let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
3308+
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
3309+
3310+
// B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
3311+
// for it since the monitor update is marked in-progress.
3312+
check_added_monitors(&nodes[1], 1);
3313+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3314+
3315+
// Now step the Commitment Signed Dance between B and C forward a bit, ensuring we won't get
3316+
// the preimage when the nodes reconnect, at which point we have to ensure we get it from the
3317+
// ChannelMonitor.
3318+
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
3319+
check_added_monitors(&nodes[1], 1);
3320+
let _ = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
3321+
3322+
let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
3323+
3324+
if close_chans_before_reload {
3325+
if !close_only_a {
3326+
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3327+
nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_bc, &nodes[2].node.get_our_node_id()).unwrap();
3328+
check_closed_broadcast(&nodes[1], 1, true);
3329+
check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[2].node.get_our_node_id()], 100000);
3330+
}
3331+
3332+
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3333+
nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[0].node.get_our_node_id()).unwrap();
3334+
check_closed_broadcast(&nodes[1], 1, true);
3335+
check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
3336+
}
3337+
3338+
// Now reload node B
3339+
let manager_b = nodes[1].node.encode();
3340+
reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
3341+
3342+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3343+
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
3344+
3345+
if close_chans_before_reload {
3346+
// If the channels were already closed, B will rebroadcast its closing transactions here.
3347+
let bs_close_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3348+
if close_only_a {
3349+
assert_eq!(bs_close_txn.len(), 2);
3350+
} else {
3351+
assert_eq!(bs_close_txn.len(), 3);
3352+
}
3353+
}
3354+
3355+
nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id()).unwrap();
3356+
check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
3357+
let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3358+
assert_eq!(as_closing_tx.len(), 1);
3359+
3360+
// In order to give A's closing transaction to B without processing background events first,
3361+
// use the _without_consistency_checks utility method. This is similar to connecting blocks
3362+
// during startup prior to the node being full initialized.
3363+
mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]);
3364+
3365+
// After a timer tick a payment preimage ChannelMonitorUpdate is applied to the A<->B
3366+
// ChannelMonitor (possible twice), even though the channel has since been closed.
3367+
check_added_monitors(&nodes[1], 0);
3368+
let mons_added = if close_chans_before_reload { if !close_only_a { 4 } else { 3 } } else { 2 };
3369+
if hold_post_reload_mon_update {
3370+
for _ in 0..mons_added {
3371+
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3372+
}
3373+
}
3374+
nodes[1].node.timer_tick_occurred();
3375+
check_added_monitors(&nodes[1], mons_added);
3376+
3377+
// Finally, check that B created a payment preimage transaction and close out the payment.
3378+
let bs_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3379+
assert_eq!(bs_txn.len(), if close_chans_before_reload && !close_only_a { 2 } else { 1 });
3380+
let bs_preimage_tx = &bs_txn[0];
3381+
check_spends!(bs_preimage_tx, as_closing_tx[0]);
3382+
3383+
if !close_chans_before_reload {
3384+
check_closed_broadcast(&nodes[1], 1, true);
3385+
check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
3386+
} else {
3387+
// While we forwarded the payment a while ago, we don't want to process events too early or
3388+
// we'll run background tasks we wanted to test individually.
3389+
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, !close_only_a);
3390+
}
3391+
3392+
mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]);
3393+
check_closed_broadcast(&nodes[0], 1, true);
3394+
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
3395+
3396+
if !close_chans_before_reload || close_only_a {
3397+
// Make sure the B<->C channel is still alive and well by sending a payment over it.
3398+
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
3399+
reconnect_args.pending_responding_commitment_signed.1 = true;
3400+
if !close_chans_before_reload {
3401+
// TODO: If the A<->B channel was closed before we reloaded, the `ChannelManager`
3402+
// will consider the forwarded payment complete and allow the B<->C
3403+
// `ChannelMonitorUpdate` to complete, wiping the payment preimage. This should not
3404+
// be allowed, and needs fixing.
3405+
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
3406+
}
3407+
reconnect_args.pending_raa.1 = true;
3408+
3409+
reconnect_nodes(reconnect_args);
3410+
let (outpoint, ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
3411+
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, ab_update_id);
3412+
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), true, false);
3413+
if !close_chans_before_reload {
3414+
// Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C
3415+
// channel will fly, removing the payment preimage from it.
3416+
check_added_monitors(&nodes[1], 1);
3417+
}
3418+
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3419+
send_payment(&nodes[1], &[&nodes[2]], 100_000);
3420+
}
3421+
}
3422+
3423+
#[test]
3424+
fn test_durable_preimages_on_closed_channel() {
3425+
do_test_durable_preimages_on_closed_channel(true, true, true);
3426+
do_test_durable_preimages_on_closed_channel(true, true, false);
3427+
do_test_durable_preimages_on_closed_channel(true, false, true);
3428+
do_test_durable_preimages_on_closed_channel(true, false, false);
3429+
do_test_durable_preimages_on_closed_channel(false, false, true);
3430+
do_test_durable_preimages_on_closed_channel(false, false, false);
32283431
}

lightning/src/ln/channelmanager.rs

+3-3
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,7 @@ pub(super) enum HTLCForwardInfo {
177177
}
178178

179179
/// Tracks the inbound corresponding to an outbound HTLC
180-
#[derive(Clone, Hash, PartialEq, Eq)]
180+
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
181181
pub(crate) struct HTLCPreviousHopData {
182182
// Note that this may be an outbound SCID alias for the associated channel.
183183
short_channel_id: u64,
@@ -282,7 +282,7 @@ impl Readable for InterceptId {
282282
}
283283
}
284284

285-
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
285+
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
286286
/// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`].
287287
pub(crate) enum SentHTLCId {
288288
PreviousHopData { short_channel_id: u64, htlc_id: u64 },
@@ -313,7 +313,7 @@ impl_writeable_tlv_based_enum!(SentHTLCId,
313313

314314
/// Tracks the inbound corresponding to an outbound HTLC
315315
#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
316-
#[derive(Clone, PartialEq, Eq)]
316+
#[derive(Clone, Debug, PartialEq, Eq)]
317317
pub(crate) enum HTLCSource {
318318
PreviousHopData(HTLCPreviousHopData),
319319
OutboundRoute {

0 commit comments

Comments
 (0)