@@ -731,15 +731,34 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
731
731
/// Note that this writes roughly one line per channel for which we have a liquidity estimate,
732
732
/// which may be a substantial amount of log output.
733
733
pub fn debug_log_liquidity_stats ( & self ) {
734
+ let now = T :: now ( ) ;
735
+
734
736
let graph = self . network_graph . read_only ( ) ;
735
737
for ( scid, liq) in self . channel_liquidities . iter ( ) {
736
738
if let Some ( chan_debug) = graph. channels ( ) . get ( scid) {
737
739
let log_direction = |source, target| {
738
740
if let Some ( ( directed_info, _) ) = chan_debug. as_directed_to ( target) {
739
741
let amt = directed_info. effective_capacity ( ) . as_msat ( ) ;
740
742
let dir_liq = liq. as_directed ( source, target, amt, & self . params ) ;
741
- log_debug ! ( self . logger, "Liquidity from {:?} to {:?} via {} is in the range ({}, {})" ,
742
- source, target, scid, dir_liq. min_liquidity_msat( ) , dir_liq. max_liquidity_msat( ) ) ;
743
+
744
+ let buckets = HistoricalMinMaxBuckets {
745
+ min_liquidity_offset_history : & dir_liq. min_liquidity_offset_history ,
746
+ max_liquidity_offset_history : & dir_liq. max_liquidity_offset_history ,
747
+ } ;
748
+ let ( min_buckets, max_buckets, _) = buckets. get_decayed_buckets ( now,
749
+ * dir_liq. last_updated , self . params . historical_no_updates_half_life ) ;
750
+
751
+ log_debug ! ( self . logger, core:: concat!(
752
+ "Liquidity from {} to {} via {} is in the range ({}, {}).\n " ,
753
+ "\t Historical min liquidity octile relative probabilities: {} {} {} {} {} {} {} {}\n " ,
754
+ "\t Historical max liquidity octile relative probabilities: {} {} {} {} {} {} {} {}" ) ,
755
+ source, target, scid, dir_liq. min_liquidity_msat( ) , dir_liq. max_liquidity_msat( ) ,
756
+ min_buckets[ 0 ] , min_buckets[ 1 ] , min_buckets[ 2 ] , min_buckets[ 3 ] ,
757
+ min_buckets[ 4 ] , min_buckets[ 5 ] , min_buckets[ 6 ] , min_buckets[ 7 ] ,
758
+ // Note that the liquidity buckets are an offset from the edge, so we
759
+ // inverse the max order to get the probabilities from zero.
760
+ max_buckets[ 7 ] , max_buckets[ 6 ] , max_buckets[ 5 ] , max_buckets[ 4 ] ,
761
+ max_buckets[ 3 ] , max_buckets[ 2 ] , max_buckets[ 1 ] , max_buckets[ 0 ] ) ;
743
762
} else {
744
763
log_debug ! ( self . logger, "No amount known for SCID {} from {:?} to {:?}" , scid, source, target) ;
745
764
}
@@ -770,6 +789,53 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
770
789
None
771
790
}
772
791
792
+ /// Query the historical estimated minimum and maximum liquidity available for sending a
793
+ /// payment over the channel with `scid` towards the given `target` node.
794
+ ///
795
+ /// Returns two sets of 8 buckets. The first set describes the octiles for lower-bound
796
+ /// liquidity estimates, the second set describes the octiles for upper-bound liquidity
797
+ /// estimates. Each bucket describes the relative frequency at which we've seen a liquidity
798
+ /// bound in the octile relative to the channel's total capacity, on an arbitrary scale.
799
+ /// Because the values are slowly decayed, more recent data points are weighted more heavily
800
+ /// than older datapoints.
801
+ ///
802
+ /// When scoring, the estimated probability that an upper-/lower-bound lies in a given octile
803
+ /// relative to the channel's total capacity is calculated by dividing that bucket's value with
804
+ /// the total of all buckets for the given bound.
805
+ ///
806
+ /// For example, a value of `[0, 0, 0, 0, 0, 0, 32]` indicates that we believe the probability
807
+ /// of a bound being in the top octile to be 100%, and have never (recently) seen it in any
808
+ /// other octiles. A value of `[31, 0, 0, 0, 0, 0, 0, 32]` indicates we've seen the bound being
809
+ /// both in the top and bottom octile, and roughly with similar (recent) frequency.
810
+ ///
811
+ /// Because the datapoints are decayed slowly over time, values will eventually return to
812
+ /// `Some(([0; 8], [0; 8]))`.
813
+ pub fn historical_estimated_channel_liquidity_probabilities ( & self , scid : u64 , target : & NodeId )
814
+ -> Option < ( [ u16 ; 8 ] , [ u16 ; 8 ] ) > {
815
+ let graph = self . network_graph . read_only ( ) ;
816
+
817
+ if let Some ( chan) = graph. channels ( ) . get ( & scid) {
818
+ if let Some ( liq) = self . channel_liquidities . get ( & scid) {
819
+ if let Some ( ( directed_info, source) ) = chan. as_directed_to ( target) {
820
+ let amt = directed_info. effective_capacity ( ) . as_msat ( ) ;
821
+ let dir_liq = liq. as_directed ( source, target, amt, & self . params ) ;
822
+
823
+ let buckets = HistoricalMinMaxBuckets {
824
+ min_liquidity_offset_history : & dir_liq. min_liquidity_offset_history ,
825
+ max_liquidity_offset_history : & dir_liq. max_liquidity_offset_history ,
826
+ } ;
827
+ let ( min_buckets, mut max_buckets, _) = buckets. get_decayed_buckets ( T :: now ( ) ,
828
+ * dir_liq. last_updated , self . params . historical_no_updates_half_life ) ;
829
+ // Note that the liquidity buckets are an offset from the edge, so we inverse
830
+ // the max order to get the probabilities from zero.
831
+ max_buckets. reverse ( ) ;
832
+ return Some ( ( min_buckets, max_buckets) ) ;
833
+ }
834
+ }
835
+ }
836
+ None
837
+ }
838
+
773
839
/// Marks the node with the given `node_id` as banned, i.e.,
774
840
/// it will be avoided during path finding.
775
841
pub fn add_banned ( & mut self , node_id : & NodeId ) {
@@ -2684,19 +2750,32 @@ mod tests {
2684
2750
} ;
2685
2751
// With no historical data the normal liquidity penalty calculation is used.
2686
2752
assert_eq ! ( scorer. channel_penalty_msat( 42 , & source, & target, usage) , 47 ) ;
2753
+ assert_eq ! ( scorer. historical_estimated_channel_liquidity_probabilities( 42 , & target) ,
2754
+ None ) ;
2687
2755
2688
2756
scorer. payment_path_failed ( & payment_path_for_amount ( 1 ) . iter ( ) . collect :: < Vec < _ > > ( ) , 42 ) ;
2689
2757
assert_eq ! ( scorer. channel_penalty_msat( 42 , & source, & target, usage) , 2048 ) ;
2758
+ // The "it failed" increment is 32, where the probability should lie fully in the first
2759
+ // octile.
2760
+ assert_eq ! ( scorer. historical_estimated_channel_liquidity_probabilities( 42 , & target) ,
2761
+ Some ( ( [ 32 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ] , [ 32 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ] ) ) ) ;
2690
2762
2691
2763
// Even after we tell the scorer we definitely have enough available liquidity, it will
2692
2764
// still remember that there was some failure in the past, and assign a non-0 penalty.
2693
2765
scorer. payment_path_failed ( & payment_path_for_amount ( 1000 ) . iter ( ) . collect :: < Vec < _ > > ( ) , 43 ) ;
2694
2766
assert_eq ! ( scorer. channel_penalty_msat( 42 , & source, & target, usage) , 198 ) ;
2767
+ // The first octile should be decayed just slightly and the last octile has a new point.
2768
+ assert_eq ! ( scorer. historical_estimated_channel_liquidity_probabilities( 42 , & target) ,
2769
+ Some ( ( [ 31 , 0 , 0 , 0 , 0 , 0 , 0 , 32 ] , [ 31 , 0 , 0 , 0 , 0 , 0 , 0 , 32 ] ) ) ) ;
2695
2770
2696
2771
// Advance the time forward 16 half-lives (which the docs claim will ensure all data is
2697
2772
// gone), and check that we're back to where we started.
2698
2773
SinceEpoch :: advance ( Duration :: from_secs ( 10 * 16 ) ) ;
2699
2774
assert_eq ! ( scorer. channel_penalty_msat( 42 , & source, & target, usage) , 47 ) ;
2775
+ // Once fully decayed we still have data, but its all-0s. In the future we may remove the
2776
+ // data entirely instead.
2777
+ assert_eq ! ( scorer. historical_estimated_channel_liquidity_probabilities( 42 , & target) ,
2778
+ Some ( ( [ 0 ; 8 ] , [ 0 ; 8 ] ) ) ) ;
2700
2779
}
2701
2780
2702
2781
#[ test]
0 commit comments