@@ -16,7 +16,7 @@ use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor, LATENCY_GRACE_PERI
16
16
use chain:: transaction:: OutPoint ;
17
17
use chain:: keysinterface:: KeysInterface ;
18
18
use ln:: channel:: EXPIRE_PREV_CONFIG_TICKS ;
19
- use ln:: channelmanager:: { BREAKDOWN_TIMEOUT , ChannelManager , ChannelManagerReadArgs , MPP_TIMEOUT_TICKS , PaymentId , PaymentSendFailure } ;
19
+ use ln:: channelmanager:: { BREAKDOWN_TIMEOUT , ChannelManager , ChannelManagerReadArgs , MPP_TIMEOUT_TICKS , MIN_CLTV_EXPIRY_DELTA , PaymentId , PaymentSendFailure } ;
20
20
use ln:: features:: { InitFeatures , InvoiceFeatures } ;
21
21
use ln:: msgs;
22
22
use ln:: msgs:: ChannelMessageHandler ;
@@ -563,6 +563,228 @@ fn retry_with_no_persist() {
563
563
do_retry_with_no_persist ( false ) ;
564
564
}
565
565
566
+ fn do_test_completed_payment_not_retryable_on_reload ( use_dust : bool ) {
567
+ // Test that an off-chain completed payment is not retryable on restart. This was previously
568
+ // broken for dust payments, but we test for both dust and non-dust payments.
569
+ //
570
+ // `use_dust` switches to using a dust HTLC, which results in the HTLC not having an on-chain
571
+ // output at all.
572
+ let chanmon_cfgs = create_chanmon_cfgs ( 3 ) ;
573
+ let node_cfgs = create_node_cfgs ( 3 , & chanmon_cfgs) ;
574
+
575
+ let mut manually_accept_config = test_default_channel_config ( ) ;
576
+ manually_accept_config. manually_accept_inbound_channels = true ;
577
+
578
+ let node_chanmgrs = create_node_chanmgrs ( 3 , & node_cfgs, & [ None , Some ( manually_accept_config) , None ] ) ;
579
+
580
+ let first_persister: test_utils:: TestPersister ;
581
+ let first_new_chain_monitor: test_utils:: TestChainMonitor ;
582
+ let first_nodes_0_deserialized: ChannelManager < EnforcingSigner , & test_utils:: TestChainMonitor , & test_utils:: TestBroadcaster , & test_utils:: TestKeysInterface , & test_utils:: TestFeeEstimator , & test_utils:: TestLogger > ;
583
+ let second_persister: test_utils:: TestPersister ;
584
+ let second_new_chain_monitor: test_utils:: TestChainMonitor ;
585
+ let second_nodes_0_deserialized: ChannelManager < EnforcingSigner , & test_utils:: TestChainMonitor , & test_utils:: TestBroadcaster , & test_utils:: TestKeysInterface , & test_utils:: TestFeeEstimator , & test_utils:: TestLogger > ;
586
+ let third_persister: test_utils:: TestPersister ;
587
+ let third_new_chain_monitor: test_utils:: TestChainMonitor ;
588
+ let third_nodes_0_deserialized: ChannelManager < EnforcingSigner , & test_utils:: TestChainMonitor , & test_utils:: TestBroadcaster , & test_utils:: TestKeysInterface , & test_utils:: TestFeeEstimator , & test_utils:: TestLogger > ;
589
+
590
+ let mut nodes = create_network ( 3 , & node_cfgs, & node_chanmgrs) ;
591
+
592
+ let ( funding_tx, chan_id) = open_zero_conf_channel ( & nodes[ 0 ] , & nodes[ 1 ] , None ) ;
593
+ confirm_transaction ( & nodes[ 0 ] , & funding_tx) ;
594
+ confirm_transaction ( & nodes[ 1 ] , & funding_tx) ;
595
+ // Ignore the announcement_signatures messages
596
+ nodes[ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
597
+ nodes[ 1 ] . node . get_and_clear_pending_msg_events ( ) ;
598
+ let chan_id_2 = create_announced_chan_between_nodes ( & nodes, 1 , 2 , InitFeatures :: known ( ) , InitFeatures :: known ( ) ) . 2 ;
599
+
600
+ // Serialize the ChannelManager prior to sending payments
601
+ let mut nodes_0_serialized = nodes[ 0 ] . node . encode ( ) ;
602
+
603
+ let route = get_route_and_payment_hash ! ( nodes[ 0 ] , nodes[ 2 ] , if use_dust { 1_000 } else { 1_000_000 } ) . 0 ;
604
+ let ( payment_preimage, payment_hash, payment_secret, payment_id) = send_along_route ( & nodes[ 0 ] , route, & [ & nodes[ 1 ] , & nodes[ 2 ] ] , if use_dust { 1_000 } else { 1_000_000 } ) ;
605
+
606
+ // The ChannelMonitor should always be the latest version, as we're required to persist it
607
+ // during the `commitment_signed_dance!()`.
608
+ let mut chan_0_monitor_serialized = test_utils:: TestVecWriter ( Vec :: new ( ) ) ;
609
+ get_monitor ! ( nodes[ 0 ] , chan_id) . write ( & mut chan_0_monitor_serialized) . unwrap ( ) ;
610
+
611
+ let mut chan_1_monitor_serialized = test_utils:: TestVecWriter ( Vec :: new ( ) ) ;
612
+
613
+ macro_rules! reload_node {
614
+ ( $chain_monitor: ident, $chan_manager: ident, $persister: ident) => {
615
+ $persister = test_utils:: TestPersister :: new( ) ;
616
+ let keys_manager = & chanmon_cfgs[ 0 ] . keys_manager;
617
+ $chain_monitor = test_utils:: TestChainMonitor :: new( Some ( nodes[ 0 ] . chain_source) , nodes[ 0 ] . tx_broadcaster. clone( ) , nodes[ 0 ] . logger, node_cfgs[ 0 ] . fee_estimator, & $persister, keys_manager) ;
618
+ nodes[ 0 ] . chain_monitor = & $chain_monitor;
619
+ let mut chan_0_monitor_read = & chan_0_monitor_serialized. 0 [ ..] ;
620
+ let ( _, mut chan_0_monitor) = <( BlockHash , ChannelMonitor <EnforcingSigner >) >:: read(
621
+ & mut chan_0_monitor_read, keys_manager) . unwrap( ) ;
622
+ assert!( chan_0_monitor_read. is_empty( ) ) ;
623
+
624
+ let mut channel_monitors = HashMap :: new( ) ;
625
+ channel_monitors. insert( chan_0_monitor. get_funding_txo( ) . 0 , & mut chan_0_monitor) ;
626
+
627
+ let mut chan_1_monitor = None ;
628
+ if !chan_1_monitor_serialized. 0 . is_empty( ) {
629
+ let mut chan_1_monitor_read = & chan_1_monitor_serialized. 0 [ ..] ;
630
+ chan_1_monitor = Some ( <( BlockHash , ChannelMonitor <EnforcingSigner >) >:: read(
631
+ & mut chan_1_monitor_read, keys_manager) . unwrap( ) . 1 ) ;
632
+ assert!( chan_1_monitor_read. is_empty( ) ) ;
633
+ channel_monitors. insert( chan_1_monitor. as_ref( ) . unwrap( ) . get_funding_txo( ) . 0 , chan_1_monitor. as_mut( ) . unwrap( ) ) ;
634
+ }
635
+
636
+ let mut nodes_0_read = & nodes_0_serialized[ ..] ;
637
+ let ( _, nodes_0_deserialized_tmp) = {
638
+ <( BlockHash , ChannelManager <EnforcingSigner , & test_utils:: TestChainMonitor , & test_utils:: TestBroadcaster , & test_utils:: TestKeysInterface , & test_utils:: TestFeeEstimator , & test_utils:: TestLogger >) >:: read( & mut nodes_0_read, ChannelManagerReadArgs {
639
+ default_config: test_default_channel_config( ) ,
640
+ keys_manager,
641
+ fee_estimator: node_cfgs[ 0 ] . fee_estimator,
642
+ chain_monitor: nodes[ 0 ] . chain_monitor,
643
+ tx_broadcaster: nodes[ 0 ] . tx_broadcaster. clone( ) ,
644
+ logger: nodes[ 0 ] . logger,
645
+ channel_monitors,
646
+ } ) . unwrap( )
647
+ } ;
648
+ $chan_manager = nodes_0_deserialized_tmp;
649
+ assert!( nodes_0_read. is_empty( ) ) ;
650
+
651
+ assert!( nodes[ 0 ] . chain_monitor. watch_channel( chan_0_monitor. get_funding_txo( ) . 0 , chan_0_monitor) . is_ok( ) ) ;
652
+ if !chan_1_monitor_serialized. 0 . is_empty( ) {
653
+ let funding_txo = chan_1_monitor. as_ref( ) . unwrap( ) . get_funding_txo( ) . 0 ;
654
+ assert!( nodes[ 0 ] . chain_monitor. watch_channel( funding_txo, chan_1_monitor. unwrap( ) ) . is_ok( ) ) ;
655
+ }
656
+ nodes[ 0 ] . node = & $chan_manager;
657
+ check_added_monitors!( nodes[ 0 ] , if !chan_1_monitor_serialized. 0 . is_empty( ) { 2 } else { 1 } ) ;
658
+
659
+ nodes[ 1 ] . node. peer_disconnected( & nodes[ 0 ] . node. get_our_node_id( ) , false ) ;
660
+ }
661
+ }
662
+
663
+ reload_node ! ( first_new_chain_monitor, first_nodes_0_deserialized, first_persister) ;
664
+
665
+ // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
666
+ // force-close the channel.
667
+ check_closed_event ! ( nodes[ 0 ] , 1 , ClosureReason :: OutdatedChannelManager ) ;
668
+ assert ! ( nodes[ 0 ] . node. list_channels( ) . is_empty( ) ) ;
669
+ assert ! ( nodes[ 0 ] . node. has_pending_payments( ) ) ;
670
+ assert_eq ! ( nodes[ 0 ] . tx_broadcaster. txn_broadcasted. lock( ) . unwrap( ) . split_off( 0 ) . len( ) , 1 ) ;
671
+
672
+ nodes[ 0 ] . node . peer_connected ( & nodes[ 1 ] . node . get_our_node_id ( ) , & msgs:: Init { features : InitFeatures :: known ( ) , remote_network_address : None } ) ;
673
+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
674
+
675
+ // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
676
+ // error, as the channel has hit the chain.
677
+ nodes[ 1 ] . node . peer_connected ( & nodes[ 0 ] . node . get_our_node_id ( ) , & msgs:: Init { features : InitFeatures :: known ( ) , remote_network_address : None } ) ;
678
+ let bs_reestablish = get_event_msg ! ( nodes[ 1 ] , MessageSendEvent :: SendChannelReestablish , nodes[ 0 ] . node. get_our_node_id( ) ) ;
679
+ nodes[ 0 ] . node . handle_channel_reestablish ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_reestablish) ;
680
+ let as_err = nodes[ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
681
+ assert_eq ! ( as_err. len( ) , 1 ) ;
682
+ let bs_commitment_tx;
683
+ match as_err[ 0 ] {
684
+ MessageSendEvent :: HandleError { node_id, action : msgs:: ErrorAction :: SendErrorMessage { ref msg } } => {
685
+ assert_eq ! ( node_id, nodes[ 1 ] . node. get_our_node_id( ) ) ;
686
+ nodes[ 1 ] . node . handle_error ( & nodes[ 0 ] . node . get_our_node_id ( ) , msg) ;
687
+ check_closed_event ! ( nodes[ 1 ] , 1 , ClosureReason :: CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel" . to_string( ) } ) ;
688
+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
689
+ bs_commitment_tx = nodes[ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
690
+ } ,
691
+ _ => panic ! ( "Unexpected event" ) ,
692
+ }
693
+ check_closed_broadcast ! ( nodes[ 1 ] , false ) ;
694
+
695
+ // Now fail back the payment from nodes[2] to nodes[1]. This doesn't really matter as the
696
+ // previous hop channel is already on-chain, but it makes nodes[2] willing to see additional
697
+ // incoming HTLCs with the same payment hash later.
698
+ nodes[ 2 ] . node . fail_htlc_backwards ( & payment_hash) ;
699
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed ! ( nodes[ 2 ] , [ HTLCDestination :: FailedPayment { payment_hash } ] ) ;
700
+ check_added_monitors ! ( nodes[ 2 ] , 1 ) ;
701
+
702
+ let htlc_fulfill_updates = get_htlc_update_msgs ! ( nodes[ 2 ] , nodes[ 1 ] . node. get_our_node_id( ) ) ;
703
+ nodes[ 1 ] . node . handle_update_fail_htlc ( & nodes[ 2 ] . node . get_our_node_id ( ) , & htlc_fulfill_updates. update_fail_htlcs [ 0 ] ) ;
704
+ commitment_signed_dance ! ( nodes[ 1 ] , nodes[ 2 ] , htlc_fulfill_updates. commitment_signed, false ) ;
705
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed ! ( nodes[ 1 ] ,
706
+ [ HTLCDestination :: NextHopChannel { node_id: Some ( nodes[ 2 ] . node. get_our_node_id( ) ) , channel_id: chan_id_2 } ] ) ;
707
+
708
+ // Connect the HTLC-Timeout transaction, timing out the HTLC on both nodes (but not confirming
709
+ // the HTLC-Timeout transaction beyond 1 conf). For dust HTLCs, the HTLC is considered resolved
710
+ // after the commitment transaction, so always connect the commitment transaction.
711
+ mine_transaction ( & nodes[ 0 ] , & bs_commitment_tx[ 0 ] ) ;
712
+ mine_transaction ( & nodes[ 1 ] , & bs_commitment_tx[ 0 ] ) ;
713
+ if !use_dust {
714
+ connect_blocks ( & nodes[ 0 ] , TEST_FINAL_CLTV - 1 + ( MIN_CLTV_EXPIRY_DELTA as u32 ) ) ;
715
+ connect_blocks ( & nodes[ 1 ] , TEST_FINAL_CLTV - 1 + ( MIN_CLTV_EXPIRY_DELTA as u32 ) ) ;
716
+ let as_htlc_timeout = nodes[ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
717
+ check_spends ! ( as_htlc_timeout[ 0 ] , bs_commitment_tx[ 0 ] ) ;
718
+ assert_eq ! ( as_htlc_timeout. len( ) , 1 ) ;
719
+
720
+ mine_transaction ( & nodes[ 0 ] , & as_htlc_timeout[ 0 ] ) ;
721
+ // nodes[0] may rebroadcast (or RBF-bump) its HTLC-Timeout, so wipe the announced set.
722
+ nodes[ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . clear ( ) ;
723
+ mine_transaction ( & nodes[ 1 ] , & as_htlc_timeout[ 0 ] ) ;
724
+ }
725
+
726
+ // Create a new channel on which to retry the payment before we fail the payment via the
727
+ // HTLC-Timeout transaction. This avoids ChannelManager timing out the payment due to us
728
+ // connecting several blocks while creating the channel (implying time has passed).
729
+ // We do this with a zero-conf channel to avoid connecting blocks as a side-effect.
730
+ let ( _, chan_id_3) = open_zero_conf_channel ( & nodes[ 0 ] , & nodes[ 1 ] , None ) ;
731
+ assert_eq ! ( nodes[ 0 ] . node. list_usable_channels( ) . len( ) , 1 ) ;
732
+
733
+ // If we attempt to retry prior to the HTLC-Timeout (or commitment transaction, for dust HTLCs)
734
+ // confirming, we will fail as its considered still-pending...
735
+ let ( new_route, _, _, _) = get_route_and_payment_hash ! ( nodes[ 0 ] , nodes[ 2 ] , if use_dust { 1_000 } else { 1_000_000 } ) ;
736
+ assert ! ( nodes[ 0 ] . node. retry_payment( & new_route, payment_id) . is_err( ) ) ;
737
+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
738
+
739
+ // After ANTI_REORG_DELAY confirmations, the HTLC should be failed and we can try the payment
740
+ // again. We serialize the node first as we'll then test retrying the HTLC after a restart
741
+ // (which should also still work).
742
+ connect_blocks ( & nodes[ 0 ] , ANTI_REORG_DELAY - 1 ) ;
743
+ connect_blocks ( & nodes[ 1 ] , ANTI_REORG_DELAY - 1 ) ;
744
+ // We set mpp_parts_remain to avoid having abandon_payment called
745
+ expect_payment_failed_conditions ( & nodes[ 0 ] , payment_hash, false , PaymentFailedConditions :: new ( ) . mpp_parts_remain ( ) ) ;
746
+
747
+ chan_0_monitor_serialized = test_utils:: TestVecWriter ( Vec :: new ( ) ) ;
748
+ get_monitor ! ( nodes[ 0 ] , chan_id) . write ( & mut chan_0_monitor_serialized) . unwrap ( ) ;
749
+ chan_1_monitor_serialized = test_utils:: TestVecWriter ( Vec :: new ( ) ) ;
750
+ get_monitor ! ( nodes[ 0 ] , chan_id_3) . write ( & mut chan_1_monitor_serialized) . unwrap ( ) ;
751
+ nodes_0_serialized = nodes[ 0 ] . node . encode ( ) ;
752
+
753
+ assert ! ( nodes[ 0 ] . node. retry_payment( & new_route, payment_id) . is_ok( ) ) ;
754
+ assert ! ( !nodes[ 0 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
755
+
756
+ reload_node ! ( second_new_chain_monitor, second_nodes_0_deserialized, second_persister) ;
757
+ reconnect_nodes ( & nodes[ 0 ] , & nodes[ 1 ] , ( true , true ) , ( 0 , 0 ) , ( 0 , 0 ) , ( 0 , 0 ) , ( 0 , 0 ) , ( 0 , 0 ) , ( false , false ) ) ;
758
+
759
+ // Now resend the payment, delivering the HTLC through to nodes[1] XXX
760
+ assert ! ( nodes[ 0 ] . node. retry_payment( & new_route, payment_id) . is_ok( ) ) ;
761
+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
762
+ pass_along_route ( & nodes[ 0 ] , & [ & [ & nodes[ 1 ] , & nodes[ 2 ] ] ] , if use_dust { 1_000 } else { 1_000_000 } , payment_hash, payment_secret) ;
763
+ claim_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] , & nodes[ 2 ] ] , payment_preimage) ;
764
+
765
+ assert ! ( nodes[ 0 ] . node. retry_payment( & new_route, payment_id) . is_err( ) ) ;
766
+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
767
+
768
+ chan_0_monitor_serialized = test_utils:: TestVecWriter ( Vec :: new ( ) ) ;
769
+ get_monitor ! ( nodes[ 0 ] , chan_id) . write ( & mut chan_0_monitor_serialized) . unwrap ( ) ;
770
+ chan_1_monitor_serialized = test_utils:: TestVecWriter ( Vec :: new ( ) ) ;
771
+ get_monitor ! ( nodes[ 0 ] , chan_id_3) . write ( & mut chan_1_monitor_serialized) . unwrap ( ) ;
772
+ nodes_0_serialized = nodes[ 0 ] . node . encode ( ) ;
773
+
774
+ reload_node ! ( third_new_chain_monitor, third_nodes_0_deserialized, third_persister) ;
775
+ reconnect_nodes ( & nodes[ 0 ] , & nodes[ 1 ] , ( false , false ) , ( 0 , 0 ) , ( 0 , 0 ) , ( 0 , 0 ) , ( 0 , 0 ) , ( 0 , 0 ) , ( false , false ) ) ;
776
+
777
+ assert ! ( nodes[ 0 ] . node. retry_payment( & new_route, payment_id) . is_err( ) ) ;
778
+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
779
+ }
780
+
781
+ #[ test]
782
+ fn test_completed_payment_not_retryable_on_reload ( ) {
783
+ do_test_completed_payment_not_retryable_on_reload ( true ) ;
784
+ do_test_completed_payment_not_retryable_on_reload ( false ) ;
785
+ }
786
+
787
+
566
788
fn do_test_dup_htlc_onchain_fails_on_reload ( persist_manager_post_event : bool , confirm_commitment_tx : bool , payment_timeout : bool ) {
567
789
// When a Channel is closed, any outbound HTLCs which were relayed through it are simply
568
790
// dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor
0 commit comments