12
12
//! payments thereafter.
13
13
14
14
use crate :: chain:: { ChannelMonitorUpdateStatus , Confirm , Listen , Watch } ;
15
- use crate :: chain:: channelmonitor:: { ANTI_REORG_DELAY , LATENCY_GRACE_PERIOD_BLOCKS } ;
15
+ use crate :: chain:: channelmonitor:: { ANTI_REORG_DELAY , HTLC_FAIL_BACK_BUFFER , LATENCY_GRACE_PERIOD_BLOCKS } ;
16
16
use crate :: chain:: keysinterface:: EntropySource ;
17
17
use crate :: chain:: transaction:: OutPoint ;
18
18
use crate :: events:: { ClosureReason , Event , HTLCDestination , MessageSendEvent , MessageSendEventsProvider , PathFailure } ;
@@ -23,7 +23,7 @@ use crate::ln::msgs;
23
23
use crate :: ln:: msgs:: ChannelMessageHandler ;
24
24
use crate :: ln:: outbound_payment:: Retry ;
25
25
use crate :: routing:: gossip:: { EffectiveCapacity , RoutingFees } ;
26
- use crate :: routing:: router:: { get_route, PaymentParameters , Route , RouteHint , RouteHintHop , RouteHop , RouteParameters } ;
26
+ use crate :: routing:: router:: { get_route, PaymentParameters , Route , Router , RouteHint , RouteHintHop , RouteHop , RouteParameters } ;
27
27
use crate :: routing:: scoring:: ChannelUsage ;
28
28
use crate :: util:: test_utils;
29
29
use crate :: util:: errors:: APIError ;
@@ -2838,3 +2838,138 @@ fn no_missing_sent_on_midpoint_reload() {
2838
2838
do_no_missing_sent_on_midpoint_reload ( false ) ;
2839
2839
do_no_missing_sent_on_midpoint_reload ( true ) ;
2840
2840
}
2841
+
2842
+ fn do_claim_from_closed_chan ( fail_payment : bool ) {
2843
+ // Previously, LDK would refuse to claim a payment if a channel on which the payment was
2844
+ // received had been closed between when the HTLC was received and when we went to claim it.
2845
+ // This makes sense in the payment case - why pay an on-chain fee to claim the HTLC when
2846
+ // presumably the sender may retry later. Long ago it also reduced total code in the claim
2847
+ // pipeline.
2848
+ //
2849
+ // However, this doesn't make sense if you're trying to do an atomic swap or some other
2850
+ // protocol that requires atomicity with some other action - if your money got claimed
2851
+ // elsewhere you need to be able to claim the HTLC in lightning no matter what. Further, this
2852
+ // is an over-optimization - there should be a very, very low likelihood that a channel closes
2853
+ // between when we receive the last HTLC for a payment and the user goes to claim the payment.
2854
+ // Since we now have code to handle this anyway we should allow it.
2855
+
2856
+ // Build 4 nodes and send an MPP payment across two paths. By building a route manually set the
2857
+ // CLTVs on the paths to different value resulting in a different claim deadline.
2858
+ let chanmon_cfgs = create_chanmon_cfgs ( 4 ) ;
2859
+ let node_cfgs = create_node_cfgs ( 4 , & chanmon_cfgs) ;
2860
+ let node_chanmgrs = create_node_chanmgrs ( 4 , & node_cfgs, & [ None , None , None , None ] ) ;
2861
+ let mut nodes = create_network ( 4 , & node_cfgs, & node_chanmgrs) ;
2862
+
2863
+ create_announced_chan_between_nodes ( & nodes, 0 , 1 ) ;
2864
+ create_announced_chan_between_nodes_with_value ( & nodes, 0 , 2 , 1_000_000 , 0 ) ;
2865
+ let chan_bd = create_announced_chan_between_nodes_with_value ( & nodes, 1 , 3 , 1_000_000 , 0 ) . 2 ;
2866
+ create_announced_chan_between_nodes ( & nodes, 2 , 3 ) ;
2867
+
2868
+ let ( payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash ! ( nodes[ 3 ] ) ;
2869
+ let mut route_params = RouteParameters {
2870
+ payment_params : PaymentParameters :: from_node_id ( nodes[ 3 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV )
2871
+ . with_features ( nodes[ 1 ] . node . invoice_features ( ) ) ,
2872
+ final_value_msat : 10_000_000 ,
2873
+ } ;
2874
+ let mut route = nodes[ 0 ] . router . find_route ( & nodes[ 0 ] . node . get_our_node_id ( ) , & route_params,
2875
+ None , & nodes[ 0 ] . node . compute_inflight_htlcs ( ) ) . unwrap ( ) ;
2876
+ // Make sure the route is ordered as the B->D path before C->D
2877
+ route. paths . sort_by ( |a, _| if a[ 0 ] . pubkey == nodes[ 1 ] . node . get_our_node_id ( ) {
2878
+ std:: cmp:: Ordering :: Less } else { std:: cmp:: Ordering :: Greater } ) ;
2879
+
2880
+ // Note that we add an extra 1 in the send pipeline to compensate for any blocks found while
2881
+ // the HTLC is being relayed.
2882
+ route. paths [ 0 ] [ 1 ] . cltv_expiry_delta = TEST_FINAL_CLTV + 8 ;
2883
+ route. paths [ 1 ] [ 1 ] . cltv_expiry_delta = TEST_FINAL_CLTV + 12 ;
2884
+ let final_cltv = nodes[ 0 ] . best_block_info ( ) . 1 + TEST_FINAL_CLTV + 8 + 1 ;
2885
+
2886
+ nodes[ 0 ] . router . expect_find_route ( route_params. clone ( ) , Ok ( route. clone ( ) ) ) ;
2887
+ nodes[ 0 ] . node . send_payment_with_retry ( payment_hash, & Some ( payment_secret) ,
2888
+ PaymentId ( payment_hash. 0 ) , route_params. clone ( ) , Retry :: Attempts ( 1 ) ) . unwrap ( ) ;
2889
+ check_added_monitors ( & nodes[ 0 ] , 2 ) ;
2890
+ let mut send_msgs = nodes[ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
2891
+ send_msgs. sort_by ( |a, _| {
2892
+ let a_node_id =
2893
+ if let MessageSendEvent :: UpdateHTLCs { node_id, .. } = a { node_id } else { panic ! ( ) } ;
2894
+ let node_b_id = nodes[ 1 ] . node . get_our_node_id ( ) ;
2895
+ if * a_node_id == node_b_id { std:: cmp:: Ordering :: Less } else { std:: cmp:: Ordering :: Greater }
2896
+ } ) ;
2897
+
2898
+ assert_eq ! ( send_msgs. len( ) , 2 ) ;
2899
+ pass_along_path ( & nodes[ 0 ] , & [ & nodes[ 1 ] , & nodes[ 3 ] ] , 10_000_000 ,
2900
+ payment_hash, Some ( payment_secret) , send_msgs. remove ( 0 ) , false , None ) ;
2901
+ pass_along_path ( & nodes[ 0 ] , & [ & nodes[ 2 ] , & nodes[ 3 ] ] , 10_000_000 ,
2902
+ payment_hash, Some ( payment_secret) , send_msgs. remove ( 0 ) , true , None ) ;
2903
+
2904
+ // Ensure that the claim_deadline is correct, with the payment failing at exactly the given
2905
+ // height.
2906
+ connect_blocks ( & nodes[ 3 ] , final_cltv - HTLC_FAIL_BACK_BUFFER - nodes[ 3 ] . best_block_info ( ) . 1
2907
+ - if fail_payment { 0 } else { 2 } ) ;
2908
+ if fail_payment {
2909
+ // We fail the HTLC on the A->B->D path first as it expires 4 blocks earlier. We go ahead
2910
+ // and expire both immediately, though, by connecting another 4 blocks.
2911
+ let reason = HTLCDestination :: FailedPayment { payment_hash } ;
2912
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed ! ( & nodes[ 3 ] , [ reason. clone( ) ] ) ;
2913
+ connect_blocks ( & nodes[ 3 ] , 4 ) ;
2914
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed ! ( & nodes[ 3 ] , [ reason] ) ;
2915
+ pass_failed_payment_back ( & nodes[ 0 ] , & [ & [ & nodes[ 1 ] , & nodes[ 3 ] ] , & [ & nodes[ 2 ] , & nodes[ 3 ] ] ] , false , payment_hash) ;
2916
+ } else {
2917
+ nodes[ 1 ] . node . force_close_broadcasting_latest_txn ( & chan_bd, & nodes[ 3 ] . node . get_our_node_id ( ) ) . unwrap ( ) ;
2918
+ check_closed_event ( & nodes[ 1 ] , 1 , ClosureReason :: HolderForceClosed , false ) ;
2919
+ check_closed_broadcast ( & nodes[ 1 ] , 1 , true ) ;
2920
+ let bs_tx = nodes[ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
2921
+ assert_eq ! ( bs_tx. len( ) , 1 ) ;
2922
+
2923
+ mine_transaction ( & nodes[ 3 ] , & bs_tx[ 0 ] ) ;
2924
+ check_added_monitors ( & nodes[ 3 ] , 1 ) ;
2925
+ check_closed_broadcast ( & nodes[ 3 ] , 1 , true ) ;
2926
+ check_closed_event ( & nodes[ 3 ] , 1 , ClosureReason :: CommitmentTxConfirmed , false ) ;
2927
+
2928
+ nodes[ 3 ] . node . claim_funds ( payment_preimage) ;
2929
+ check_added_monitors ( & nodes[ 3 ] , 2 ) ;
2930
+ expect_payment_claimed ! ( nodes[ 3 ] , payment_hash, 10_000_000 ) ;
2931
+
2932
+ let ds_tx = nodes[ 3 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
2933
+ assert_eq ! ( ds_tx. len( ) , 1 ) ;
2934
+ check_spends ! ( & ds_tx[ 0 ] , & bs_tx[ 0 ] ) ;
2935
+
2936
+ mine_transactions ( & nodes[ 1 ] , & [ & bs_tx[ 0 ] , & ds_tx[ 0 ] ] ) ;
2937
+ check_added_monitors ( & nodes[ 1 ] , 1 ) ;
2938
+ expect_payment_forwarded ! ( nodes[ 1 ] , nodes[ 0 ] , nodes[ 3 ] , Some ( 1000 ) , false , true ) ;
2939
+
2940
+ let bs_claims = nodes[ 1 ] . node . get_and_clear_pending_msg_events ( ) ;
2941
+ check_added_monitors ( & nodes[ 1 ] , 1 ) ;
2942
+ assert_eq ! ( bs_claims. len( ) , 1 ) ;
2943
+ if let MessageSendEvent :: UpdateHTLCs { updates, .. } = & bs_claims[ 0 ] {
2944
+ nodes[ 0 ] . node . handle_update_fulfill_htlc ( & nodes[ 1 ] . node . get_our_node_id ( ) , & updates. update_fulfill_htlcs [ 0 ] ) ;
2945
+ commitment_signed_dance ! ( nodes[ 0 ] , nodes[ 1 ] , updates. commitment_signed, false , true ) ;
2946
+ } else { panic ! ( ) ; }
2947
+
2948
+ expect_payment_sent ! ( nodes[ 0 ] , payment_preimage) ;
2949
+
2950
+ let ds_claim_msgs = nodes[ 3 ] . node . get_and_clear_pending_msg_events ( ) ;
2951
+ assert_eq ! ( ds_claim_msgs. len( ) , 1 ) ;
2952
+ let cs_claim_msgs = if let MessageSendEvent :: UpdateHTLCs { updates, .. } = & ds_claim_msgs[ 0 ] {
2953
+ nodes[ 2 ] . node . handle_update_fulfill_htlc ( & nodes[ 3 ] . node . get_our_node_id ( ) , & updates. update_fulfill_htlcs [ 0 ] ) ;
2954
+ let cs_claim_msgs = nodes[ 2 ] . node . get_and_clear_pending_msg_events ( ) ;
2955
+ check_added_monitors ( & nodes[ 2 ] , 1 ) ;
2956
+ commitment_signed_dance ! ( nodes[ 2 ] , nodes[ 3 ] , updates. commitment_signed, false , true ) ;
2957
+ expect_payment_forwarded ! ( nodes[ 2 ] , nodes[ 0 ] , nodes[ 3 ] , Some ( 1000 ) , false , false ) ;
2958
+ cs_claim_msgs
2959
+ } else { panic ! ( ) ; } ;
2960
+
2961
+ assert_eq ! ( cs_claim_msgs. len( ) , 1 ) ;
2962
+ if let MessageSendEvent :: UpdateHTLCs { updates, .. } = & cs_claim_msgs[ 0 ] {
2963
+ nodes[ 0 ] . node . handle_update_fulfill_htlc ( & nodes[ 2 ] . node . get_our_node_id ( ) , & updates. update_fulfill_htlcs [ 0 ] ) ;
2964
+ commitment_signed_dance ! ( nodes[ 0 ] , nodes[ 2 ] , updates. commitment_signed, false , true ) ;
2965
+ } else { panic ! ( ) ; }
2966
+
2967
+ expect_payment_path_successful ! ( nodes[ 0 ] ) ;
2968
+ }
2969
+ }
2970
+
2971
+ #[ test]
2972
+ fn claim_from_closed_chan ( ) {
2973
+ do_claim_from_closed_chan ( true ) ;
2974
+ do_claim_from_closed_chan ( false ) ;
2975
+ }
0 commit comments