@@ -62,7 +62,7 @@ use util::logger::Logger;
62
62
use util:: time:: Time ;
63
63
64
64
use prelude:: * ;
65
- use core:: fmt;
65
+ use core:: { cmp , fmt} ;
66
66
use core:: cell:: { RefCell , RefMut } ;
67
67
use core:: convert:: TryInto ;
68
68
use core:: ops:: { Deref , DerefMut } ;
@@ -436,6 +436,16 @@ pub struct ProbabilisticScoringParameters {
436
436
/// [`liquidity_penalty_amount_multiplier_msat`]: Self::liquidity_penalty_amount_multiplier_msat
437
437
pub historical_liquidity_penalty_amount_multiplier_msat : u64 ,
438
438
439
+ /// If we aren't learning any new datapoints for a channel, the historical liquidity bounds
440
+ /// tracking can simply live on with increasingly stale data. Instead, when a channel has not
441
+ /// seen a liquidity estimate update for this amount of time, the historical datapoints are
442
+ /// decayed by half.
443
+ ///
444
+ /// Note that after 16 or more half lives all historical data will be completely gone.
445
+ ///
446
+ /// Default value: 14 days
447
+ pub historical_no_updates_half_life : Duration ,
448
+
439
449
/// Manual penalties used for the given nodes. Allows to set a particular penalty for a given
440
450
/// node. Note that a manual penalty of `u64::max_value()` means the node would not ever be
441
451
/// considered during path finding.
@@ -509,10 +519,89 @@ impl HistoricalBucketRangeTracker {
509
519
self . buckets [ bucket_idx as usize ] = self . buckets [ bucket_idx as usize ] . saturating_add ( 32 ) ;
510
520
}
511
521
}
522
+ /// Decay all buckets by the given number of half-lives. Used to more aggressively remove old
523
+ /// datapoints as we receive newer information.
524
+ fn time_decay_data ( & mut self , half_lives : u32 ) {
525
+ for e in self . buckets . iter_mut ( ) {
526
+ * e = e. checked_shr ( half_lives) . unwrap_or ( 0 ) ;
527
+ }
528
+ }
512
529
}
513
530
514
531
impl_writeable_tlv_based ! ( HistoricalBucketRangeTracker , { ( 0 , buckets, required) } ) ;
515
532
533
+ struct HistoricalMinMaxBuckets < ' a > {
534
+ min_liquidity_offset_history : & ' a HistoricalBucketRangeTracker ,
535
+ max_liquidity_offset_history : & ' a HistoricalBucketRangeTracker ,
536
+ }
537
+
538
+ impl HistoricalMinMaxBuckets < ' _ > {
539
+ #[ inline]
540
+ fn calculate_success_probability_times_billion ( & self , required_decays : u32 , payment_amt_64th_bucket : u8 ) -> Option < u64 > {
541
+ // If historical penalties are enabled, calculate the penalty by walking the set of
542
+ // historical liquidity bucket (min, max) combinations (where min_idx < max_idx) and, for
543
+ // each, calculate the probability of success given our payment amount, then total the
544
+ // weighted average probability of success.
545
+ //
546
+ // We use a sliding scale to decide which point within a given bucket will be compared to
547
+ // the amount being sent - for lower-bounds, the amount being sent is compared to the lower
548
+ // edge of the first bucket (i.e. zero), but compared to the upper 7/8ths of the last
549
+ // bucket (i.e. 9 times the index, or 63), with each bucket in between increasing the
550
+ // comparison point by 1/64th. For upper-bounds, the same applies, however with an offset
551
+ // of 1/64th (i.e. starting at one and ending at 64). This avoids failing to assign
552
+ // penalties to channels at the edges.
553
+ //
554
+ // If we used the bottom edge of buckets, we'd end up never assigning any penalty at all to
555
+ // such a channel when sending less than ~0.19% of the channel's capacity (e.g. ~200k sats
556
+ // for a 1 BTC channel!).
557
+ //
558
+ // If we used the middle of each bucket we'd never assign any penalty at all when sending
559
+ // less than 1/16th of a channel's capacity, or 1/8th if we used the top of the bucket.
560
+ let mut total_valid_points_tracked = 0 ;
561
+
562
+ // Rather than actually decaying the individual buckets, which would lose precision, we
563
+ // simply track whether all buckets would be decayed to zero, in which case we treat it as
564
+ // if we had no data.
565
+ let mut is_fully_decayed = true ;
566
+ let mut check_track_bucket_contains_undecayed_points =
567
+ |bucket_val : u16 | if bucket_val. checked_shr ( required_decays) . unwrap_or ( 0 ) > 0 { is_fully_decayed = false ; } ;
568
+
569
+ for ( min_idx, min_bucket) in self . min_liquidity_offset_history . buckets . iter ( ) . enumerate ( ) {
570
+ check_track_bucket_contains_undecayed_points ( * min_bucket) ;
571
+ for max_bucket in self . max_liquidity_offset_history . buckets . iter ( ) . take ( 8 - min_idx) {
572
+ total_valid_points_tracked += ( * min_bucket as u64 ) * ( * max_bucket as u64 ) ;
573
+ check_track_bucket_contains_undecayed_points ( * max_bucket) ;
574
+ }
575
+ }
576
+ // If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme), treat
577
+ // it as if we were fully decayed.
578
+ if total_valid_points_tracked. checked_shr ( required_decays) . unwrap_or ( 0 ) < 32 * 32 || is_fully_decayed {
579
+ return None ;
580
+ }
581
+
582
+ let mut cumulative_success_prob_times_billion = 0 ;
583
+ for ( min_idx, min_bucket) in self . min_liquidity_offset_history . buckets . iter ( ) . enumerate ( ) {
584
+ for ( max_idx, max_bucket) in self . max_liquidity_offset_history . buckets . iter ( ) . enumerate ( ) . take ( 8 - min_idx) {
585
+ let bucket_prob_times_million = ( * min_bucket as u64 ) * ( * max_bucket as u64 )
586
+ * 1024 * 1024 / total_valid_points_tracked;
587
+ let min_64th_bucket = min_idx as u8 * 9 ;
588
+ let max_64th_bucket = ( 7 - max_idx as u8 ) * 9 + 1 ;
589
+ if payment_amt_64th_bucket > max_64th_bucket {
590
+ // Success probability 0, the payment amount is above the max liquidity
591
+ } else if payment_amt_64th_bucket <= min_64th_bucket {
592
+ cumulative_success_prob_times_billion += bucket_prob_times_million * 1024 ;
593
+ } else {
594
+ cumulative_success_prob_times_billion += bucket_prob_times_million *
595
+ ( ( max_64th_bucket - payment_amt_64th_bucket) as u64 ) * 1024 /
596
+ ( ( max_64th_bucket - min_64th_bucket) as u64 ) ;
597
+ }
598
+ }
599
+ }
600
+
601
+ Some ( cumulative_success_prob_times_billion)
602
+ }
603
+ }
604
+
516
605
/// Accounting for channel liquidity balance uncertainty.
517
606
///
518
607
/// Direction is defined in terms of [`NodeId`] partial ordering, where the source node is the
@@ -645,6 +734,7 @@ impl ProbabilisticScoringParameters {
645
734
liquidity_penalty_amount_multiplier_msat : 0 ,
646
735
historical_liquidity_penalty_multiplier_msat : 0 ,
647
736
historical_liquidity_penalty_amount_multiplier_msat : 0 ,
737
+ historical_no_updates_half_life : Duration :: from_secs ( 60 * 60 * 24 * 14 ) ,
648
738
manual_node_penalties : HashMap :: new ( ) ,
649
739
anti_probing_penalty_msat : 0 ,
650
740
considered_impossible_penalty_msat : 0 ,
@@ -670,6 +760,7 @@ impl Default for ProbabilisticScoringParameters {
670
760
liquidity_penalty_amount_multiplier_msat : 192 ,
671
761
historical_liquidity_penalty_multiplier_msat : 10_000 ,
672
762
historical_liquidity_penalty_amount_multiplier_msat : 64 ,
763
+ historical_no_updates_half_life : Duration :: from_secs ( 60 * 60 * 24 * 14 ) ,
673
764
manual_node_penalties : HashMap :: new ( ) ,
674
765
anti_probing_penalty_msat : 250 ,
675
766
considered_impossible_penalty_msat : 1_0000_0000_000 ,
@@ -791,35 +882,27 @@ impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>,
791
882
792
883
if params. historical_liquidity_penalty_multiplier_msat != 0 ||
793
884
params. historical_liquidity_penalty_amount_multiplier_msat != 0 {
794
- // If historical penalties are enabled, calculate the penalty by walking the set of
795
- // historical liquidity bucket (min, max) combinations (where min_idx < max_idx)
796
- // and, for each, calculate the probability of success given our payment amount, then
797
- // total the weighted average probability of success.
798
- //
799
- // We use a sliding scale to decide which point within a given bucket will be compared
800
- // to the amount being sent - for lower-bounds, the amount being sent is compared to
801
- // the lower edge of the first bucket (i.e. zero), but compared to the upper 7/8ths of
802
- // the last bucket (i.e. 9 times the index, or 63), with each bucket in between
803
- // increasing the comparison point by 1/64th. For upper-bounds, the same applies,
804
- // however with an offset of 1/64th (i.e. starting at one and ending at 64). This
805
- // avoids failing to assign penalties to channels at the edges.
806
- //
807
- // If we used the bottom edge of buckets, we'd end up never assigning any penalty at
808
- // all to such a channel when sending less than ~0.19% of the channel's capacity (e.g.
809
- // ~200k sats for a 1 BTC channel!).
810
- //
811
- // If we used the middle of each bucket we'd never assign any penalty at all when
812
- // sending less than 1/16th of a channel's capacity, or 1/8th if we used the top of the
813
- // bucket.
814
- let mut total_valid_points_tracked = 0 ;
815
- for ( min_idx, min_bucket) in self . min_liquidity_offset_history . buckets . iter ( ) . enumerate ( ) {
816
- for max_bucket in self . max_liquidity_offset_history . buckets . iter ( ) . take ( 8 - min_idx) {
817
- total_valid_points_tracked += ( * min_bucket as u64 ) * ( * max_bucket as u64 ) ;
818
- }
819
- }
820
- if total_valid_points_tracked == 0 {
821
- // If we don't have any valid points, redo the non-historical calculation with no
822
- // liquidity bounds tracked and the historical penalty multipliers.
885
+ let required_decays = self . now . duration_since ( * self . last_updated ) . as_secs ( )
886
+ . checked_div ( params. historical_no_updates_half_life . as_secs ( ) )
887
+ . map_or ( u32:: max_value ( ) , |decays| cmp:: min ( decays, u32:: max_value ( ) as u64 ) as u32 ) ;
888
+ let payment_amt_64th_bucket = amount_msat * 64 / self . capacity_msat ;
889
+ debug_assert ! ( payment_amt_64th_bucket <= 64 ) ;
890
+ if payment_amt_64th_bucket > 64 { return res; }
891
+
892
+ let buckets = HistoricalMinMaxBuckets {
893
+ min_liquidity_offset_history : & self . min_liquidity_offset_history ,
894
+ max_liquidity_offset_history : & self . max_liquidity_offset_history ,
895
+ } ;
896
+ if let Some ( cumulative_success_prob_times_billion) = buckets
897
+ . calculate_success_probability_times_billion ( required_decays, payment_amt_64th_bucket as u8 ) {
898
+ let historical_negative_log10_times_2048 = approx:: negative_log10_times_2048 ( cumulative_success_prob_times_billion + 1 , 1024 * 1024 * 1024 ) ;
899
+ res = res. saturating_add ( Self :: combined_penalty_msat ( amount_msat,
900
+ historical_negative_log10_times_2048, params. historical_liquidity_penalty_multiplier_msat ,
901
+ params. historical_liquidity_penalty_amount_multiplier_msat ) ) ;
902
+ } else {
903
+ // If we don't have any valid points (or, once decayed, we have less than a full
904
+ // point), redo the non-historical calculation with no liquidity bounds tracked and
905
+ // the historical penalty multipliers.
823
906
let max_capacity = self . capacity_msat . saturating_sub ( amount_msat) . saturating_add ( 1 ) ;
824
907
let negative_log10_times_2048 =
825
908
approx:: negative_log10_times_2048 ( max_capacity, self . capacity_msat . saturating_add ( 1 ) ) ;
@@ -828,33 +911,6 @@ impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>,
828
911
params. historical_liquidity_penalty_amount_multiplier_msat ) ) ;
829
912
return res;
830
913
}
831
-
832
- let payment_amt_64th_bucket = amount_msat * 64 / self . capacity_msat ;
833
- debug_assert ! ( payment_amt_64th_bucket <= 64 ) ;
834
- if payment_amt_64th_bucket > 64 { return res; }
835
-
836
- let mut cumulative_success_prob_times_billion = 0 ;
837
- for ( min_idx, min_bucket) in self . min_liquidity_offset_history . buckets . iter ( ) . enumerate ( ) {
838
- for ( max_idx, max_bucket) in self . max_liquidity_offset_history . buckets . iter ( ) . enumerate ( ) . take ( 8 - min_idx) {
839
- let bucket_prob_times_million = ( * min_bucket as u64 ) * ( * max_bucket as u64 )
840
- * 1024 * 1024 / total_valid_points_tracked;
841
- let min_64th_bucket = min_idx as u64 * 9 ;
842
- let max_64th_bucket = ( 7 - max_idx as u64 ) * 9 + 1 ;
843
- if payment_amt_64th_bucket > max_64th_bucket {
844
- // Success probability 0, the payment amount is above the max liquidity
845
- } else if payment_amt_64th_bucket <= min_64th_bucket {
846
- cumulative_success_prob_times_billion += bucket_prob_times_million * 1024 ;
847
- } else {
848
- cumulative_success_prob_times_billion += bucket_prob_times_million *
849
- ( max_64th_bucket - payment_amt_64th_bucket) * 1024 /
850
- ( max_64th_bucket - min_64th_bucket) ;
851
- }
852
- }
853
- }
854
- let historical_negative_log10_times_2048 = approx:: negative_log10_times_2048 ( cumulative_success_prob_times_billion + 1 , 1024 * 1024 * 1024 ) ;
855
- res = res. saturating_add ( Self :: combined_penalty_msat ( amount_msat,
856
- historical_negative_log10_times_2048, params. historical_liquidity_penalty_multiplier_msat ,
857
- params. historical_liquidity_penalty_amount_multiplier_msat ) ) ;
858
914
}
859
915
860
916
res
@@ -927,6 +983,12 @@ impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTrac
927
983
}
928
984
929
985
fn update_history_buckets ( & mut self ) {
986
+ let half_lives = self . now . duration_since ( * self . last_updated ) . as_secs ( )
987
+ . checked_div ( self . params . historical_no_updates_half_life . as_secs ( ) )
988
+ . map ( |v| v. try_into ( ) . unwrap_or ( u32:: max_value ( ) ) ) . unwrap_or ( u32:: max_value ( ) ) ;
989
+ self . min_liquidity_offset_history . time_decay_data ( half_lives) ;
990
+ self . max_liquidity_offset_history . time_decay_data ( half_lives) ;
991
+
930
992
debug_assert ! ( * self . min_liquidity_offset_msat <= self . capacity_msat) ;
931
993
self . min_liquidity_offset_history . track_datapoint (
932
994
// Ensure the bucket index we pass is in the range [0, 7], even if the liquidity offset
@@ -949,8 +1011,8 @@ impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTrac
949
1011
} else {
950
1012
self . decayed_offset_msat ( * self . max_liquidity_offset_msat )
951
1013
} ;
952
- * self . last_updated = self . now ;
953
1014
self . update_history_buckets ( ) ;
1015
+ * self . last_updated = self . now ;
954
1016
}
955
1017
956
1018
/// Adjusts the upper bound of the channel liquidity balance in this direction.
@@ -961,8 +1023,8 @@ impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTrac
961
1023
} else {
962
1024
self . decayed_offset_msat ( * self . min_liquidity_offset_msat )
963
1025
} ;
964
- * self . last_updated = self . now ;
965
1026
self . update_history_buckets ( ) ;
1027
+ * self . last_updated = self . now ;
966
1028
}
967
1029
}
968
1030
@@ -2479,6 +2541,7 @@ mod tests {
2479
2541
let params = ProbabilisticScoringParameters {
2480
2542
historical_liquidity_penalty_multiplier_msat : 1024 ,
2481
2543
historical_liquidity_penalty_amount_multiplier_msat : 1024 ,
2544
+ historical_no_updates_half_life : Duration :: from_secs ( 10 ) ,
2482
2545
..ProbabilisticScoringParameters :: zero_penalty ( )
2483
2546
} ;
2484
2547
let mut scorer = ProbabilisticScorer :: new ( params, & network_graph, & logger) ;
@@ -2500,6 +2563,11 @@ mod tests {
2500
2563
// still remember that there was some failure in the past, and assign a non-0 penalty.
2501
2564
scorer. payment_path_failed ( & payment_path_for_amount ( 1000 ) . iter ( ) . collect :: < Vec < _ > > ( ) , 43 ) ;
2502
2565
assert_eq ! ( scorer. channel_penalty_msat( 42 , & source, & target, usage) , 198 ) ;
2566
+
2567
+ // Advance the time forward 16 half-lives (which the docs claim will ensure all data is
2568
+ // gone), and check that we're back to where we started.
2569
+ SinceEpoch :: advance ( Duration :: from_secs ( 10 * 16 ) ) ;
2570
+ assert_eq ! ( scorer. channel_penalty_msat( 42 , & source, & target, usage) , 47 ) ;
2503
2571
}
2504
2572
2505
2573
#[ test]
0 commit comments