@@ -15,11 +15,12 @@ use bitcoin::blockdata::locktime::absolute::LockTime;
15
15
use bitcoin:: transaction:: Version ;
16
16
17
17
use crate :: chain:: channelmonitor:: LATENCY_GRACE_PERIOD_BLOCKS ;
18
+ use crate :: chain:: ChannelMonitorUpdateStatus ;
18
19
use crate :: events:: bump_transaction:: WalletSource ;
19
- use crate :: events:: { Event , MessageSendEvent , MessageSendEventsProvider , ClosureReason } ;
20
- use crate :: ln:: functional_test_utils:: * ;
20
+ use crate :: events:: { ClosureReason , Event , MessageSendEvent , MessageSendEventsProvider , PaymentPurpose } ;
21
+ use crate :: ln:: { functional_test_utils:: * , msgs } ;
21
22
use crate :: ln:: msgs:: ChannelMessageHandler ;
22
- use crate :: ln:: channelmanager:: { PaymentId , RecipientOnionFields } ;
23
+ use crate :: ln:: channelmanager:: { PaymentId , RAACommitmentOrder , RecipientOnionFields } ;
23
24
use crate :: util:: test_channel_signer:: SignerOp ;
24
25
25
26
#[ test]
@@ -329,6 +330,222 @@ fn test_async_commitment_signature_for_peer_disconnect() {
329
330
}
330
331
}
331
332
333
+ #[ test]
334
+ fn test_async_commitment_signature_ordering_reestablish ( ) {
335
+ do_test_async_commitment_signature_ordering ( false ) ;
336
+ }
337
+
338
+ #[ test]
339
+ fn test_async_commitment_signature_ordering_monitor_restored ( ) {
340
+ do_test_async_commitment_signature_ordering ( true ) ;
341
+ }
342
+
343
+ fn do_test_async_commitment_signature_ordering ( monitor_update_failure : bool ) {
344
+ // Across disconnects we may end up in a situation where we need to send a
345
+ // commitment_signed and then revoke_and_ack. We need to make sure that if
346
+ // the signer is pending for commitment_signed but not revoke_and_ack, we don't
347
+ // screw up the order by sending the revoke_and_ack first.
348
+ //
349
+ // We test this for both the case where we send messages after a channel
350
+ // reestablish, as well as restoring a channel after persisting
351
+ // a monitor update.
352
+ //
353
+ // The set up for this test is based on
354
+ // `test_drop_messages_peer_disconnect_dual_htlc`.
355
+ let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
356
+ let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs) ;
357
+ let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs, & [ None , None ] ) ;
358
+ let mut nodes = create_network ( 2 , & node_cfgs, & node_chanmgrs) ;
359
+ let ( _, _, chan_id, _) = create_announced_chan_between_nodes ( & nodes, 0 , 1 ) ;
360
+
361
+ let ( payment_preimage_1, payment_hash_1, ..) = route_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 1_000_000 ) ;
362
+
363
+ // Start to send the second update_add_htlc + commitment_signed, but don't actually make it
364
+ // to the peer.
365
+ let ( route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash ! ( nodes[ 0 ] , nodes[ 1 ] , 1000000 ) ;
366
+ nodes[ 0 ] . node . send_payment_with_route ( & route, payment_hash_2,
367
+ RecipientOnionFields :: secret_only ( payment_secret_2) , PaymentId ( payment_hash_2. 0 ) ) . unwrap ( ) ;
368
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
369
+
370
+ let events_1 = nodes[ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
371
+ assert_eq ! ( events_1. len( ) , 1 ) ;
372
+ match events_1[ 0 ] {
373
+ MessageSendEvent :: UpdateHTLCs { .. } => { } ,
374
+ _ => panic ! ( "Unexpected event" ) ,
375
+ }
376
+
377
+ // Send back update_fulfill_htlc + commitment_signed for the first payment.
378
+ nodes[ 1 ] . node . claim_funds ( payment_preimage_1) ;
379
+ expect_payment_claimed ! ( nodes[ 1 ] , payment_hash_1, 1_000_000 ) ;
380
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
381
+
382
+ // Handle the update_fulfill_htlc, but fail to persist the monitor update when handling the
383
+ // commitment_signed.
384
+ let events_2 = nodes[ 1 ] . node . get_and_clear_pending_msg_events ( ) ;
385
+ assert_eq ! ( events_2. len( ) , 1 ) ;
386
+ match events_2[ 0 ] {
387
+ MessageSendEvent :: UpdateHTLCs { ref node_id, updates : msgs:: CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
388
+ assert_eq ! ( * node_id, nodes[ 0 ] . node. get_our_node_id( ) ) ;
389
+ assert ! ( update_add_htlcs. is_empty( ) ) ;
390
+ assert_eq ! ( update_fulfill_htlcs. len( ) , 1 ) ;
391
+ assert ! ( update_fail_htlcs. is_empty( ) ) ;
392
+ assert ! ( update_fail_malformed_htlcs. is_empty( ) ) ;
393
+ assert ! ( update_fee. is_none( ) ) ;
394
+
395
+ nodes[ 0 ] . node . handle_update_fulfill_htlc ( & nodes[ 1 ] . node . get_our_node_id ( ) , & update_fulfill_htlcs[ 0 ] ) ;
396
+ let events_3 = nodes[ 0 ] . node . get_and_clear_pending_events ( ) ;
397
+ assert_eq ! ( events_3. len( ) , 1 ) ;
398
+ match events_3[ 0 ] {
399
+ Event :: PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
400
+ assert_eq ! ( * payment_preimage, payment_preimage_1) ;
401
+ assert_eq ! ( * payment_hash, payment_hash_1) ;
402
+ } ,
403
+ _ => panic ! ( "Unexpected event" ) ,
404
+ }
405
+
406
+ if monitor_update_failure {
407
+ chanmon_cfgs[ 0 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus :: InProgress ) ;
408
+ }
409
+ nodes[ 0 ] . node . handle_commitment_signed ( & nodes[ 1 ] . node . get_our_node_id ( ) , commitment_signed) ;
410
+ if monitor_update_failure {
411
+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
412
+ } else {
413
+ let _ = get_event_msg ! ( nodes[ 0 ] , MessageSendEvent :: SendRevokeAndACK , nodes[ 1 ] . node. get_our_node_id( ) ) ;
414
+ }
415
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
416
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
417
+ } ,
418
+ _ => panic ! ( "Unexpected event" ) ,
419
+ }
420
+
421
+ // Disconnect and reconnect the peers so that nodes[0] will
422
+ // need to re-send the commitment update *and then* revoke_and_ack.
423
+ nodes[ 0 ] . node . peer_disconnected ( & nodes[ 1 ] . node . get_our_node_id ( ) ) ;
424
+ nodes[ 1 ] . node . peer_disconnected ( & nodes[ 0 ] . node . get_our_node_id ( ) ) ;
425
+
426
+ nodes[ 0 ] . node . peer_connected ( & nodes[ 1 ] . node . get_our_node_id ( ) , & msgs:: Init {
427
+ features : nodes[ 1 ] . node . init_features ( ) , networks : None , remote_network_address : None
428
+ } , true ) . unwrap ( ) ;
429
+ let reestablish_1 = get_chan_reestablish_msgs ! ( nodes[ 0 ] , nodes[ 1 ] ) ;
430
+ assert_eq ! ( reestablish_1. len( ) , 1 ) ;
431
+ nodes[ 1 ] . node . peer_connected ( & nodes[ 0 ] . node . get_our_node_id ( ) , & msgs:: Init {
432
+ features : nodes[ 0 ] . node . init_features ( ) , networks : None , remote_network_address : None
433
+ } , false ) . unwrap ( ) ;
434
+ let reestablish_2 = get_chan_reestablish_msgs ! ( nodes[ 1 ] , nodes[ 0 ] ) ;
435
+ assert_eq ! ( reestablish_2. len( ) , 1 ) ;
436
+
437
+ // With a fully working signer, here we would send a commitment_signed,
438
+ // and then revoke_and_ack. With commitment_signed disabled, since
439
+ // our ordering is CS then RAA, we should make sure we don't send the RAA.
440
+ nodes[ 0 ] . disable_channel_signer_op ( & nodes[ 1 ] . node . get_our_node_id ( ) , & chan_id, SignerOp :: SignCounterpartyCommitment ) ;
441
+ nodes[ 0 ] . node . handle_channel_reestablish ( & nodes[ 1 ] . node . get_our_node_id ( ) , & reestablish_2[ 0 ] ) ;
442
+ let as_resp = handle_chan_reestablish_msgs ! ( nodes[ 0 ] , nodes[ 1 ] ) ;
443
+ assert ! ( as_resp. 0 . is_none( ) ) ;
444
+ assert ! ( as_resp. 1 . is_none( ) ) ;
445
+ assert ! ( as_resp. 2 . is_none( ) ) ;
446
+
447
+ if monitor_update_failure {
448
+ chanmon_cfgs[ 0 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus :: Completed ) ;
449
+ let ( outpoint, latest_update, _) = nodes[ 0 ] . chain_monitor . latest_monitor_update_id . lock ( ) . unwrap ( ) . get ( & chan_id) . unwrap ( ) . clone ( ) ;
450
+ nodes[ 0 ] . chain_monitor . chain_monitor . force_channel_monitor_updated ( outpoint, latest_update) ;
451
+ check_added_monitors ! ( nodes[ 0 ] , 0 ) ;
452
+ }
453
+
454
+ // Make sure that on signer_unblocked we have the same behavior (even though RAA is ready,
455
+ // we don't send CS yet).
456
+ nodes[ 0 ] . node . signer_unblocked ( Some ( ( nodes[ 1 ] . node . get_our_node_id ( ) , chan_id) ) ) ;
457
+ let as_resp = handle_chan_reestablish_msgs ! ( nodes[ 0 ] , nodes[ 1 ] ) ;
458
+ assert ! ( as_resp. 0 . is_none( ) ) ;
459
+ assert ! ( as_resp. 1 . is_none( ) ) ;
460
+ assert ! ( as_resp. 2 . is_none( ) ) ;
461
+
462
+ nodes[ 0 ] . enable_channel_signer_op ( & nodes[ 1 ] . node . get_our_node_id ( ) , & chan_id, SignerOp :: SignCounterpartyCommitment ) ;
463
+ nodes[ 0 ] . node . signer_unblocked ( Some ( ( nodes[ 1 ] . node . get_our_node_id ( ) , chan_id) ) ) ;
464
+
465
+ let as_resp = handle_chan_reestablish_msgs ! ( nodes[ 0 ] , nodes[ 1 ] ) ;
466
+ nodes[ 1 ] . node . handle_channel_reestablish ( & nodes[ 0 ] . node . get_our_node_id ( ) , & reestablish_1[ 0 ] ) ;
467
+ let bs_resp = handle_chan_reestablish_msgs ! ( nodes[ 1 ] , nodes[ 0 ] ) ;
468
+
469
+ assert ! ( as_resp. 0 . is_none( ) ) ;
470
+ assert ! ( bs_resp. 0 . is_none( ) ) ;
471
+
472
+ assert ! ( bs_resp. 1 . is_none( ) ) ;
473
+ assert ! ( bs_resp. 2 . is_none( ) ) ;
474
+
475
+ assert ! ( as_resp. 3 == RAACommitmentOrder :: CommitmentFirst ) ;
476
+
477
+ // Now that everything is restored, get the CS + RAA and handle them.
478
+ assert_eq ! ( as_resp. 2 . as_ref( ) . unwrap( ) . update_add_htlcs. len( ) , 1 ) ;
479
+ assert ! ( as_resp. 2 . as_ref( ) . unwrap( ) . update_fulfill_htlcs. is_empty( ) ) ;
480
+ assert ! ( as_resp. 2 . as_ref( ) . unwrap( ) . update_fail_htlcs. is_empty( ) ) ;
481
+ assert ! ( as_resp. 2 . as_ref( ) . unwrap( ) . update_fail_malformed_htlcs. is_empty( ) ) ;
482
+ assert ! ( as_resp. 2 . as_ref( ) . unwrap( ) . update_fee. is_none( ) ) ;
483
+ nodes[ 1 ] . node . handle_update_add_htlc ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_resp. 2 . as_ref ( ) . unwrap ( ) . update_add_htlcs [ 0 ] ) ;
484
+ nodes[ 1 ] . node . handle_commitment_signed ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_resp. 2 . as_ref ( ) . unwrap ( ) . commitment_signed ) ;
485
+ let bs_revoke_and_ack = get_event_msg ! ( nodes[ 1 ] , MessageSendEvent :: SendRevokeAndACK , nodes[ 0 ] . node. get_our_node_id( ) ) ;
486
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
487
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
488
+
489
+ nodes[ 1 ] . node . handle_revoke_and_ack ( & nodes[ 0 ] . node . get_our_node_id ( ) , as_resp. 1 . as_ref ( ) . unwrap ( ) ) ;
490
+ let bs_second_commitment_signed = get_htlc_update_msgs ! ( nodes[ 1 ] , nodes[ 0 ] . node. get_our_node_id( ) ) ;
491
+ assert ! ( bs_second_commitment_signed. update_add_htlcs. is_empty( ) ) ;
492
+ assert ! ( bs_second_commitment_signed. update_fulfill_htlcs. is_empty( ) ) ;
493
+ assert ! ( bs_second_commitment_signed. update_fail_htlcs. is_empty( ) ) ;
494
+ assert ! ( bs_second_commitment_signed. update_fail_malformed_htlcs. is_empty( ) ) ;
495
+ assert ! ( bs_second_commitment_signed. update_fee. is_none( ) ) ;
496
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
497
+
498
+ // The rest of this is boilerplate for resolving the previous state.
499
+
500
+ nodes[ 0 ] . node . handle_revoke_and_ack ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_revoke_and_ack) ;
501
+ let as_commitment_signed = get_htlc_update_msgs ! ( nodes[ 0 ] , nodes[ 1 ] . node. get_our_node_id( ) ) ;
502
+ assert ! ( as_commitment_signed. update_add_htlcs. is_empty( ) ) ;
503
+ assert ! ( as_commitment_signed. update_fulfill_htlcs. is_empty( ) ) ;
504
+ assert ! ( as_commitment_signed. update_fail_htlcs. is_empty( ) ) ;
505
+ assert ! ( as_commitment_signed. update_fail_malformed_htlcs. is_empty( ) ) ;
506
+ assert ! ( as_commitment_signed. update_fee. is_none( ) ) ;
507
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
508
+
509
+ nodes[ 0 ] . node . handle_commitment_signed ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_second_commitment_signed. commitment_signed ) ;
510
+ let as_revoke_and_ack = get_event_msg ! ( nodes[ 0 ] , MessageSendEvent :: SendRevokeAndACK , nodes[ 1 ] . node. get_our_node_id( ) ) ;
511
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
512
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
513
+
514
+ nodes[ 1 ] . node . handle_commitment_signed ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_commitment_signed. commitment_signed ) ;
515
+ let bs_second_revoke_and_ack = get_event_msg ! ( nodes[ 1 ] , MessageSendEvent :: SendRevokeAndACK , nodes[ 0 ] . node. get_our_node_id( ) ) ;
516
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
517
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
518
+
519
+ nodes[ 1 ] . node . handle_revoke_and_ack ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_revoke_and_ack) ;
520
+ assert ! ( nodes[ 1 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
521
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
522
+
523
+ expect_pending_htlcs_forwardable ! ( nodes[ 1 ] ) ;
524
+
525
+ let events_5 = nodes[ 1 ] . node . get_and_clear_pending_events ( ) ;
526
+ assert_eq ! ( events_5. len( ) , 1 ) ;
527
+ match events_5[ 0 ] {
528
+ Event :: PaymentClaimable { ref payment_hash, ref purpose, .. } => {
529
+ assert_eq ! ( payment_hash_2, * payment_hash) ;
530
+ match & purpose {
531
+ PaymentPurpose :: Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
532
+ assert ! ( payment_preimage. is_none( ) ) ;
533
+ assert_eq ! ( payment_secret_2, * payment_secret) ;
534
+ } ,
535
+ _ => panic ! ( "expected PaymentPurpose::Bolt11InvoicePayment" )
536
+ }
537
+ } ,
538
+ _ => panic ! ( "Unexpected event" ) ,
539
+ }
540
+
541
+ nodes[ 0 ] . node . handle_revoke_and_ack ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_second_revoke_and_ack) ;
542
+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
543
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
544
+
545
+ expect_payment_path_successful ! ( nodes[ 0 ] ) ;
546
+ claim_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , payment_preimage_2) ;
547
+ }
548
+
332
549
fn do_test_async_holder_signatures ( anchors : bool , remote_commitment : bool ) {
333
550
// Ensures that we can obtain holder signatures for commitment and HTLC transactions
334
551
// asynchronously by allowing their retrieval to fail and retrying via
0 commit comments