Skip to content

Commit 30060c1

Browse files
committed
Calc decayed buckets to decide if we have valid historical points
When we're calculating if, once we apply the unupdated decays, the historical data tracker has enough data to assign a score, we previously calculated the decayed points while walking the buckets as we don't use the decayed buckets anyway (to avoid losing precision). That is fine, except that as written it decayed individual buckets additional times. Instead, here we actually calculate the full set of decayed buckets and use those to decide if we have valid points. This adds some additional stack space and may in fact be slower, but will be useful in the next commit and shouldn't be a huge change.
1 parent 869b71d commit 30060c1

File tree

1 file changed

+27
-14
lines changed

1 file changed

+27
-14
lines changed

lightning/src/routing/scoring.rs

Lines changed: 27 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -597,7 +597,22 @@ struct HistoricalMinMaxBuckets<'a> {
597597

598598
impl HistoricalMinMaxBuckets<'_> {
599599
#[inline]
600-
fn calculate_success_probability_times_billion(&self, required_decays: u32, payment_amt_64th_bucket: u8) -> Option<u64> {
600+
fn get_decayed_buckets<T: Time>(&self, now: T, last_updated: T, half_life: Duration)
601+
-> ([u16; 8], [u16; 8], u32) {
602+
let required_decays = now.duration_since(last_updated).as_secs()
603+
.checked_div(half_life.as_secs())
604+
.map_or(u32::max_value(), |decays| cmp::min(decays, u32::max_value() as u64) as u32);
605+
let mut min_buckets = *self.min_liquidity_offset_history;
606+
min_buckets.time_decay_data(required_decays);
607+
let mut max_buckets = *self.max_liquidity_offset_history;
608+
max_buckets.time_decay_data(required_decays);
609+
(min_buckets.buckets, max_buckets.buckets, required_decays)
610+
}
611+
612+
#[inline]
613+
fn calculate_success_probability_times_billion<T: Time>(
614+
&self, now: T, last_updated: T, half_life: Duration, payment_amt_64th_bucket: u8)
615+
-> Option<u64> {
601616
// If historical penalties are enabled, calculate the penalty by walking the set of
602617
// historical liquidity bucket (min, max) combinations (where min_idx < max_idx) and, for
603618
// each, calculate the probability of success given our payment amount, then total the
@@ -619,23 +634,22 @@ impl HistoricalMinMaxBuckets<'_> {
619634
// less than 1/16th of a channel's capacity, or 1/8th if we used the top of the bucket.
620635
let mut total_valid_points_tracked = 0;
621636

622-
// Rather than actually decaying the individual buckets, which would lose precision, we
623-
// simply track whether all buckets would be decayed to zero, in which case we treat it as
624-
// if we had no data.
625-
let mut is_fully_decayed = true;
626-
let mut check_track_bucket_contains_undecayed_points =
627-
|bucket_val: u16| if bucket_val.checked_shr(required_decays).unwrap_or(0) > 0 { is_fully_decayed = false; };
637+
// Check if all our buckets are zero, once decayed and treat it as if we had no data. We
638+
// don't actually use the decayed buckets, though, as that would lose precision.
639+
let (decayed_min_buckets, decayed_max_buckets, required_decays) =
640+
self.get_decayed_buckets(now, last_updated, half_life);
641+
if decayed_min_buckets.iter().all(|v| *v == 0) || decayed_max_buckets.iter().all(|v| *v == 0) {
642+
return None;
643+
}
628644

629645
for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
630-
check_track_bucket_contains_undecayed_points(*min_bucket);
631646
for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(8 - min_idx) {
632647
total_valid_points_tracked += (*min_bucket as u64) * (*max_bucket as u64);
633-
check_track_bucket_contains_undecayed_points(*max_bucket);
634648
}
635649
}
636650
// If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme), treat
637651
// it as if we were fully decayed.
638-
if total_valid_points_tracked.checked_shr(required_decays).unwrap_or(0) < 32*32 || is_fully_decayed {
652+
if total_valid_points_tracked.checked_shr(required_decays).unwrap_or(0) < 32*32 {
639653
return None;
640654
}
641655

@@ -942,9 +956,6 @@ impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>,
942956

943957
if params.historical_liquidity_penalty_multiplier_msat != 0 ||
944958
params.historical_liquidity_penalty_amount_multiplier_msat != 0 {
945-
let required_decays = self.now.duration_since(*self.last_updated).as_secs()
946-
.checked_div(params.historical_no_updates_half_life.as_secs())
947-
.map_or(u32::max_value(), |decays| cmp::min(decays, u32::max_value() as u64) as u32);
948959
let payment_amt_64th_bucket = amount_msat * 64 / self.capacity_msat;
949960
debug_assert!(payment_amt_64th_bucket <= 64);
950961
if payment_amt_64th_bucket > 64 { return res; }
@@ -954,7 +965,9 @@ impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>,
954965
max_liquidity_offset_history: &self.max_liquidity_offset_history,
955966
};
956967
if let Some(cumulative_success_prob_times_billion) = buckets
957-
.calculate_success_probability_times_billion(required_decays, payment_amt_64th_bucket as u8) {
968+
.calculate_success_probability_times_billion(self.now, *self.last_updated,
969+
params.historical_no_updates_half_life, payment_amt_64th_bucket as u8)
970+
{
958971
let historical_negative_log10_times_2048 = approx::negative_log10_times_2048(cumulative_success_prob_times_billion + 1, 1024 * 1024 * 1024);
959972
res = res.saturating_add(Self::combined_penalty_msat(amount_msat,
960973
historical_negative_log10_times_2048, params.historical_liquidity_penalty_multiplier_msat,

0 commit comments

Comments
 (0)