@@ -2213,6 +2213,11 @@ fn channel_reserve_in_flight_removes() {
2213
2213
2214
2214
#[test]
2215
2215
fn test_fail_back_before_backwards_timeout() {
2216
+ do_test_fail_back_before_backwards_timeout(false);
2217
+ do_test_fail_back_before_backwards_timeout(true);
2218
+ }
2219
+
2220
+ fn do_test_fail_back_before_backwards_timeout(forward_claims: bool) {
2216
2221
// Test that we fail an HTLC upstream if we are still waiting for confirmation downstream
2217
2222
// just before the upstream timeout expires
2218
2223
let chanmon_cfgs = create_chanmon_cfgs(3);
@@ -2227,21 +2232,21 @@ fn test_fail_back_before_backwards_timeout() {
2227
2232
connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
2228
2233
connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
2229
2234
2230
- let (_, payment_hash_1 , _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2235
+ let (payment_preimage, payment_hash , _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2231
2236
2232
2237
// Force close downstream with timeout
2233
2238
nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
2234
2239
check_added_monitors!(nodes[1], 1);
2235
2240
check_closed_broadcast!(nodes[1], true);
2236
2241
2237
2242
connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
2238
- test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
2239
- check_closed_event!( nodes[1], 1, ClosureReason::HolderForceClosed);
2243
+ let node_1_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
2244
+ check_closed_event(& nodes[1], 1, ClosureReason::HolderForceClosed, false );
2240
2245
2241
2246
// Nothing is confirmed for a while
2242
2247
connect_blocks(&nodes[1], MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - TIMEOUT_FAIL_BACK_BUFFER);
2243
2248
2244
- // Check that node 2 fails the HTLC upstream
2249
+ // Check that nodes[1] fails the HTLC upstream
2245
2250
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
2246
2251
check_added_monitors!(nodes[1], 1);
2247
2252
let events = nodes[1].node.get_and_clear_pending_msg_events();
@@ -2260,7 +2265,30 @@ fn test_fail_back_before_backwards_timeout() {
2260
2265
2261
2266
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail);
2262
2267
commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
2263
- expect_payment_failed_conditions(&nodes[0], payment_hash_1, false, PaymentFailedConditions::new().blamed_chan_closed(true));
2268
+ expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().blamed_chan_closed(true));
2269
+
2270
+ // Make sure we don't generate duplicate fails from monitor events
2271
+ if forward_claims {
2272
+ // Claim and force close as nodes[2]
2273
+ nodes[2].node.claim_funds(payment_preimage);
2274
+ expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
2275
+ check_added_monitors!(nodes[2], 1);
2276
+ get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
2277
+
2278
+ connect_blocks(&nodes[2], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
2279
+ let node_2_txn = test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::SUCCESS);
2280
+ check_closed_broadcast!(nodes[2], true);
2281
+ check_closed_event(&nodes[2], 1, ClosureReason::CommitmentTxConfirmed, false);
2282
+ check_added_monitors!(nodes[2], 1);
2283
+
2284
+ // Confirm nodes[2]'s claim with preimage
2285
+ mine_transaction(&nodes[1], &node_2_txn[0]); // Commitment
2286
+ mine_transaction(&nodes[1], &node_2_txn[1]); // HTLC success
2287
+ } else {
2288
+ // Confirm nodes[1]'s claim with timeout, make sure we don't fail upstream again
2289
+ mine_transaction(&nodes[1], &node_1_txn[0]); // Commitment
2290
+ mine_transaction(&nodes[1], &node_1_txn[1]); // HTLC timeout
2291
+ }
2264
2292
}
2265
2293
2266
2294
#[test]
0 commit comments