@@ -15,11 +15,12 @@ use bitcoin::blockdata::locktime::absolute::LockTime;
15
15
use bitcoin:: transaction:: Version ;
16
16
17
17
use crate :: chain:: channelmonitor:: LATENCY_GRACE_PERIOD_BLOCKS ;
18
+ use crate :: chain:: ChannelMonitorUpdateStatus ;
18
19
use crate :: events:: bump_transaction:: WalletSource ;
19
- use crate :: events:: { Event , MessageSendEvent , MessageSendEventsProvider , ClosureReason } ;
20
- use crate :: ln:: functional_test_utils:: * ;
20
+ use crate :: events:: { ClosureReason , Event , MessageSendEvent , MessageSendEventsProvider , PaymentPurpose } ;
21
+ use crate :: ln:: { functional_test_utils:: * , msgs } ;
21
22
use crate :: ln:: msgs:: ChannelMessageHandler ;
22
- use crate :: ln:: channelmanager:: { PaymentId , RecipientOnionFields } ;
23
+ use crate :: ln:: channelmanager:: { PaymentId , RAACommitmentOrder , RecipientOnionFields } ;
23
24
use crate :: util:: test_channel_signer:: SignerOp ;
24
25
25
26
#[ test]
@@ -272,7 +273,28 @@ fn test_async_commitment_signature_for_funding_signed_0conf() {
272
273
}
273
274
274
275
#[ test]
275
- fn test_async_commitment_signature_for_peer_disconnect ( ) {
276
+ fn test_async_commitment_signature_peer_disconnect ( ) {
277
+ do_test_async_commitment_signature_peer_disconnect ( 0 ) ;
278
+ }
279
+
280
+ #[ test]
281
+ fn test_async_commitment_signature_peer_disconnect_signer_restored_before_monitor_completion ( ) {
282
+ // This tests that if we were pending a monitor update completion across a disconnect,
283
+ // and needed to send a CS, that if our signer becomes available before the monitor
284
+ // update completes, then we don't send duplicate messages upon calling `signer_unblocked`
285
+ // after the monitor update completes.
286
+ do_test_async_commitment_signature_peer_disconnect ( 1 ) ;
287
+ }
288
+
289
+ #[ test]
290
+ fn test_async_commitment_signature_peer_disconnect_signer_restored_before_reestablish ( ) {
291
+ // This tests that if we tried to send a commitment_signed, but our signer was blocked,
292
+ // if we disconnect, reconnect, the signer becomes available, then handle channel_reestablish,
293
+ // that we don't send duplicate messages upon calling `signer_unblocked`.
294
+ do_test_async_commitment_signature_peer_disconnect ( 2 ) ;
295
+ }
296
+
297
+ fn do_test_async_commitment_signature_peer_disconnect ( test_case : u8 ) {
276
298
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
277
299
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs) ;
278
300
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs, & [ None , None ] ) ;
@@ -298,35 +320,236 @@ fn test_async_commitment_signature_for_peer_disconnect() {
298
320
299
321
dst. node . handle_update_add_htlc ( & src. node . get_our_node_id ( ) , & payment_event. msgs [ 0 ] ) ;
300
322
323
+ if test_case == 1 {
324
+ // Fail to persist the monitor update when handling the commitment_signed.
325
+ chanmon_cfgs[ 1 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus :: InProgress ) ;
326
+ }
327
+
301
328
// Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a
302
329
// `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`.
303
330
dst. disable_channel_signer_op ( & src. node . get_our_node_id ( ) , & chan_id, SignerOp :: SignCounterpartyCommitment ) ;
304
331
dst. node . handle_commitment_signed ( & src. node . get_our_node_id ( ) , & payment_event. commitment_msg ) ;
305
332
check_added_monitors ( dst, 1 ) ;
306
333
307
- get_event_msg ! ( dst, MessageSendEvent :: SendRevokeAndACK , src. node. get_our_node_id( ) ) ;
334
+ if test_case != 1 {
335
+ get_event_msg ! ( dst, MessageSendEvent :: SendRevokeAndACK , src. node. get_our_node_id( ) ) ;
336
+ }
308
337
309
338
// Now disconnect and reconnect the peers.
310
339
src. node . peer_disconnected ( & dst. node . get_our_node_id ( ) ) ;
311
340
dst. node . peer_disconnected ( & src. node . get_our_node_id ( ) ) ;
312
- let mut reconnect_args = ReconnectArgs :: new ( & nodes[ 0 ] , & nodes[ 1 ] ) ;
313
- reconnect_args. send_channel_ready = ( false , false ) ;
314
- reconnect_args. pending_raa = ( true , false ) ;
315
- reconnect_nodes ( reconnect_args) ;
341
+
342
+ // do reestablish stuff
343
+ src. node . peer_connected ( & dst. node . get_our_node_id ( ) , & msgs:: Init {
344
+ features : dst. node . init_features ( ) , networks : None , remote_network_address : None
345
+ } , true ) . unwrap ( ) ;
346
+ let reestablish_1 = get_chan_reestablish_msgs ! ( src, dst) ;
347
+ assert_eq ! ( reestablish_1. len( ) , 1 ) ;
348
+ dst. node . peer_connected ( & src. node . get_our_node_id ( ) , & msgs:: Init {
349
+ features : src. node . init_features ( ) , networks : None , remote_network_address : None
350
+ } , false ) . unwrap ( ) ;
351
+ let reestablish_2 = get_chan_reestablish_msgs ! ( dst, src) ;
352
+ assert_eq ! ( reestablish_2. len( ) , 1 ) ;
353
+
354
+ if test_case == 2 {
355
+ // Reenable the signer before the reestablish.
356
+ dst. enable_channel_signer_op ( & src. node . get_our_node_id ( ) , & chan_id, SignerOp :: SignCounterpartyCommitment ) ;
357
+ }
358
+
359
+ dst. node . handle_channel_reestablish ( & src. node . get_our_node_id ( ) , & reestablish_1[ 0 ] ) ;
360
+
361
+ if test_case == 1 {
362
+ dst. enable_channel_signer_op ( & src. node . get_our_node_id ( ) , & chan_id, SignerOp :: SignCounterpartyCommitment ) ;
363
+ chanmon_cfgs[ 1 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus :: Completed ) ;
364
+ let ( outpoint, latest_update, _) = dst. chain_monitor . latest_monitor_update_id . lock ( ) . unwrap ( ) . get ( & chan_id) . unwrap ( ) . clone ( ) ;
365
+ dst. chain_monitor . chain_monitor . force_channel_monitor_updated ( outpoint, latest_update) ;
366
+ check_added_monitors ! ( dst, 0 ) ;
367
+ }
368
+
369
+ // Expect the RAA
370
+ let ( _, revoke_and_ack, commitment_signed, _) = handle_chan_reestablish_msgs ! ( dst, src) ;
371
+ assert ! ( revoke_and_ack. is_some( ) ) ;
372
+ if test_case == 0 {
373
+ assert ! ( commitment_signed. is_none( ) ) ;
374
+ } else {
375
+ assert ! ( commitment_signed. is_some( ) ) ;
376
+ }
316
377
317
378
// Mark dst's signer as available and retry: we now expect to see dst's `commitment_signed`.
318
379
dst. enable_channel_signer_op ( & src. node . get_our_node_id ( ) , & chan_id, SignerOp :: SignCounterpartyCommitment ) ;
319
380
dst. node . signer_unblocked ( Some ( ( src. node . get_our_node_id ( ) , chan_id) ) ) ;
320
381
321
- {
322
- let events = dst. node . get_and_clear_pending_msg_events ( ) ;
323
- assert_eq ! ( events. len( ) , 1 , "expected one message, got {}" , events. len( ) ) ;
324
- if let MessageSendEvent :: UpdateHTLCs { ref node_id, .. } = events[ 0 ] {
325
- assert_eq ! ( node_id, & src. node. get_our_node_id( ) ) ;
326
- } else {
327
- panic ! ( "expected UpdateHTLCs message, not {:?}" , events[ 0 ] ) ;
328
- } ;
382
+ if test_case == 0 {
383
+ let ( _, _, commitment_signed, _) = handle_chan_reestablish_msgs ! ( dst, src) ;
384
+ assert ! ( commitment_signed. is_some( ) ) ;
385
+ } else {
386
+ // Make sure we don't double send the CS.
387
+ let ( _, _, commitment_signed, _) = handle_chan_reestablish_msgs ! ( dst, src) ;
388
+ assert ! ( commitment_signed. is_none( ) ) ;
389
+ }
390
+ }
391
+
392
+ #[ test]
393
+ fn test_async_commitment_signature_ordering_reestablish ( ) {
394
+ do_test_async_commitment_signature_ordering ( false ) ;
395
+ }
396
+
397
+ #[ test]
398
+ fn test_async_commitment_signature_ordering_monitor_restored ( ) {
399
+ do_test_async_commitment_signature_ordering ( true ) ;
400
+ }
401
+
402
+ fn do_test_async_commitment_signature_ordering ( monitor_update_failure : bool ) {
403
+ // Across disconnects we may end up in a situation where we need to send a
404
+ // commitment_signed and then revoke_and_ack. We need to make sure that if
405
+ // the signer is pending for commitment_signed but not revoke_and_ack, we don't
406
+ // screw up the order by sending the revoke_and_ack first.
407
+ //
408
+ // We test this for both the case where we send messages after a channel
409
+ // reestablish, as well as restoring a channel after persisting
410
+ // a monitor update.
411
+ //
412
+ // The set up for this test is based on
413
+ // `test_drop_messages_peer_disconnect_dual_htlc`.
414
+ let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
415
+ let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs) ;
416
+ let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs, & [ None , None ] ) ;
417
+ let mut nodes = create_network ( 2 , & node_cfgs, & node_chanmgrs) ;
418
+ let ( _, _, chan_id, _) = create_announced_chan_between_nodes ( & nodes, 0 , 1 ) ;
419
+
420
+ let ( payment_preimage_1, payment_hash_1, ..) = route_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 1_000_000 ) ;
421
+
422
+ // Start to send the second update_add_htlc + commitment_signed, but don't actually make it
423
+ // to the peer.
424
+ let ( route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash ! ( nodes[ 0 ] , nodes[ 1 ] , 1000000 ) ;
425
+ nodes[ 0 ] . node . send_payment_with_route ( & route, payment_hash_2,
426
+ RecipientOnionFields :: secret_only ( payment_secret_2) , PaymentId ( payment_hash_2. 0 ) ) . unwrap ( ) ;
427
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
428
+
429
+ get_htlc_update_msgs ( & nodes[ 0 ] , & nodes[ 1 ] . node . get_our_node_id ( ) ) ;
430
+
431
+ // Send back update_fulfill_htlc + commitment_signed for the first payment.
432
+ nodes[ 1 ] . node . claim_funds ( payment_preimage_1) ;
433
+ expect_payment_claimed ! ( nodes[ 1 ] , payment_hash_1, 1_000_000 ) ;
434
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
435
+
436
+ // Handle the update_fulfill_htlc, but fail to persist the monitor update when handling the
437
+ // commitment_signed.
438
+ let events_2 = nodes[ 1 ] . node . get_and_clear_pending_msg_events ( ) ;
439
+ assert_eq ! ( events_2. len( ) , 1 ) ;
440
+ match events_2[ 0 ] {
441
+ MessageSendEvent :: UpdateHTLCs { node_id : _, updates : msgs:: CommitmentUpdate { ref update_fulfill_htlcs, ref commitment_signed, .. } } => {
442
+ nodes[ 0 ] . node . handle_update_fulfill_htlc ( & nodes[ 1 ] . node . get_our_node_id ( ) , & update_fulfill_htlcs[ 0 ] ) ;
443
+ expect_payment_sent ( & nodes[ 0 ] , payment_preimage_1, None , false , false ) ;
444
+ if monitor_update_failure {
445
+ chanmon_cfgs[ 0 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus :: InProgress ) ;
446
+ }
447
+ nodes[ 0 ] . node . handle_commitment_signed ( & nodes[ 1 ] . node . get_our_node_id ( ) , commitment_signed) ;
448
+ if monitor_update_failure {
449
+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
450
+ } else {
451
+ let _ = get_event_msg ! ( nodes[ 0 ] , MessageSendEvent :: SendRevokeAndACK , nodes[ 1 ] . node. get_our_node_id( ) ) ;
452
+ }
453
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
454
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
455
+ } ,
456
+ _ => panic ! ( "Unexpected event" ) ,
457
+ }
458
+
459
+ // Disconnect and reconnect the peers so that nodes[0] will
460
+ // need to re-send the commitment update *and then* revoke_and_ack.
461
+ nodes[ 0 ] . node . peer_disconnected ( & nodes[ 1 ] . node . get_our_node_id ( ) ) ;
462
+ nodes[ 1 ] . node . peer_disconnected ( & nodes[ 0 ] . node . get_our_node_id ( ) ) ;
463
+
464
+ nodes[ 0 ] . node . peer_connected ( & nodes[ 1 ] . node . get_our_node_id ( ) , & msgs:: Init {
465
+ features : nodes[ 1 ] . node . init_features ( ) , networks : None , remote_network_address : None
466
+ } , true ) . unwrap ( ) ;
467
+ let reestablish_1 = get_chan_reestablish_msgs ! ( nodes[ 0 ] , nodes[ 1 ] ) ;
468
+ assert_eq ! ( reestablish_1. len( ) , 1 ) ;
469
+ nodes[ 1 ] . node . peer_connected ( & nodes[ 0 ] . node . get_our_node_id ( ) , & msgs:: Init {
470
+ features : nodes[ 0 ] . node . init_features ( ) , networks : None , remote_network_address : None
471
+ } , false ) . unwrap ( ) ;
472
+ let reestablish_2 = get_chan_reestablish_msgs ! ( nodes[ 1 ] , nodes[ 0 ] ) ;
473
+ assert_eq ! ( reestablish_2. len( ) , 1 ) ;
474
+
475
+ // With a fully working signer, here we would send a commitment_signed,
476
+ // and then revoke_and_ack. With commitment_signed disabled, since
477
+ // our ordering is CS then RAA, we should make sure we don't send the RAA.
478
+ nodes[ 0 ] . disable_channel_signer_op ( & nodes[ 1 ] . node . get_our_node_id ( ) , & chan_id, SignerOp :: SignCounterpartyCommitment ) ;
479
+ nodes[ 0 ] . node . handle_channel_reestablish ( & nodes[ 1 ] . node . get_our_node_id ( ) , & reestablish_2[ 0 ] ) ;
480
+ let as_resp = handle_chan_reestablish_msgs ! ( nodes[ 0 ] , nodes[ 1 ] ) ;
481
+ assert ! ( as_resp. 0 . is_none( ) ) ;
482
+ assert ! ( as_resp. 1 . is_none( ) ) ;
483
+ assert ! ( as_resp. 2 . is_none( ) ) ;
484
+
485
+ if monitor_update_failure {
486
+ chanmon_cfgs[ 0 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus :: Completed ) ;
487
+ let ( outpoint, latest_update, _) = nodes[ 0 ] . chain_monitor . latest_monitor_update_id . lock ( ) . unwrap ( ) . get ( & chan_id) . unwrap ( ) . clone ( ) ;
488
+ nodes[ 0 ] . chain_monitor . chain_monitor . force_channel_monitor_updated ( outpoint, latest_update) ;
489
+ check_added_monitors ! ( nodes[ 0 ] , 0 ) ;
329
490
}
491
+
492
+ // Make sure that on signer_unblocked we have the same behavior (even though RAA is ready,
493
+ // we don't send CS yet).
494
+ nodes[ 0 ] . node . signer_unblocked ( Some ( ( nodes[ 1 ] . node . get_our_node_id ( ) , chan_id) ) ) ;
495
+ let as_resp = handle_chan_reestablish_msgs ! ( nodes[ 0 ] , nodes[ 1 ] ) ;
496
+ assert ! ( as_resp. 0 . is_none( ) ) ;
497
+ assert ! ( as_resp. 1 . is_none( ) ) ;
498
+ assert ! ( as_resp. 2 . is_none( ) ) ;
499
+
500
+ nodes[ 0 ] . enable_channel_signer_op ( & nodes[ 1 ] . node . get_our_node_id ( ) , & chan_id, SignerOp :: SignCounterpartyCommitment ) ;
501
+ nodes[ 0 ] . node . signer_unblocked ( Some ( ( nodes[ 1 ] . node . get_our_node_id ( ) , chan_id) ) ) ;
502
+
503
+ let as_resp = handle_chan_reestablish_msgs ! ( nodes[ 0 ] , nodes[ 1 ] ) ;
504
+ nodes[ 1 ] . node . handle_channel_reestablish ( & nodes[ 0 ] . node . get_our_node_id ( ) , & reestablish_1[ 0 ] ) ;
505
+ let bs_resp = handle_chan_reestablish_msgs ! ( nodes[ 1 ] , nodes[ 0 ] ) ;
506
+
507
+ assert ! ( as_resp. 0 . is_none( ) ) ;
508
+ assert ! ( bs_resp. 0 . is_none( ) ) ;
509
+
510
+ assert ! ( bs_resp. 1 . is_none( ) ) ;
511
+ assert ! ( bs_resp. 2 . is_none( ) ) ;
512
+
513
+ assert ! ( as_resp. 3 == RAACommitmentOrder :: CommitmentFirst ) ;
514
+
515
+ // Now that everything is restored, get the CS + RAA and handle them.
516
+ nodes[ 1 ] . node . handle_update_add_htlc ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_resp. 2 . as_ref ( ) . unwrap ( ) . update_add_htlcs [ 0 ] ) ;
517
+ nodes[ 1 ] . node . handle_commitment_signed ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_resp. 2 . as_ref ( ) . unwrap ( ) . commitment_signed ) ;
518
+ nodes[ 1 ] . node . handle_revoke_and_ack ( & nodes[ 0 ] . node . get_our_node_id ( ) , as_resp. 1 . as_ref ( ) . unwrap ( ) ) ;
519
+ let ( bs_revoke_and_ack, bs_second_commitment_signed) = get_revoke_commit_msgs ( & nodes[ 1 ] , & nodes[ 0 ] . node . get_our_node_id ( ) ) ;
520
+ check_added_monitors ! ( nodes[ 1 ] , 2 ) ;
521
+
522
+ // The rest of this is boilerplate for resolving the previous state.
523
+
524
+ nodes[ 0 ] . node . handle_revoke_and_ack ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_revoke_and_ack) ;
525
+ let as_commitment_signed = get_htlc_update_msgs ! ( nodes[ 0 ] , nodes[ 1 ] . node. get_our_node_id( ) ) ;
526
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
527
+
528
+ nodes[ 0 ] . node . handle_commitment_signed ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_second_commitment_signed) ;
529
+ let as_revoke_and_ack = get_event_msg ! ( nodes[ 0 ] , MessageSendEvent :: SendRevokeAndACK , nodes[ 1 ] . node. get_our_node_id( ) ) ;
530
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
531
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
532
+
533
+ nodes[ 1 ] . node . handle_commitment_signed ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_commitment_signed. commitment_signed ) ;
534
+ let bs_second_revoke_and_ack = get_event_msg ! ( nodes[ 1 ] , MessageSendEvent :: SendRevokeAndACK , nodes[ 0 ] . node. get_our_node_id( ) ) ;
535
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
536
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
537
+
538
+ nodes[ 1 ] . node . handle_revoke_and_ack ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_revoke_and_ack) ;
539
+ assert ! ( nodes[ 1 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
540
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
541
+
542
+ nodes[ 0 ] . node . handle_revoke_and_ack ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_second_revoke_and_ack) ;
543
+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
544
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
545
+
546
+ expect_pending_htlcs_forwardable ! ( nodes[ 1 ] ) ;
547
+
548
+ let events_5 = nodes[ 1 ] . node . get_and_clear_pending_events ( ) ;
549
+ check_payment_claimable ( & events_5[ 0 ] , payment_hash_2, payment_secret_2, 1_000_000 , None , nodes[ 1 ] . node . get_our_node_id ( ) ) ;
550
+
551
+ expect_payment_path_successful ! ( nodes[ 0 ] ) ;
552
+ claim_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , payment_preimage_2) ;
330
553
}
331
554
332
555
fn do_test_async_holder_signatures ( anchors : bool , remote_commitment : bool ) {
0 commit comments