Skip to content

Commit e354c73

Browse files
committed
uwu
1 parent cb088a7 commit e354c73

File tree

1 file changed

+57
-11
lines changed
  • compiler/rustc_type_ir/src/search_graph

1 file changed

+57
-11
lines changed

compiler/rustc_type_ir/src/search_graph/mod.rs

+57-11
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,9 @@ impl AvailableDepth {
179179
}
180180
}
181181

182-
/// All cycle heads a given goal depends on.
182+
/// All cycle heads a given goal depends on, ordered by their stack depth.
183+
///
184+
/// We therefore pop the cycle heads from highest to lowest.
183185
#[derive(Clone, Debug, PartialEq, Eq, Default)]
184186
struct CycleHeads {
185187
heads: BTreeSet<StackDepth>,
@@ -217,6 +219,9 @@ impl CycleHeads {
217219
}
218220
}
219221

222+
/// Update the cycle heads of a goal at depth `this` given the cycle heads
223+
/// of a nested goal. This merges the heads after filtering the parent goal
224+
/// itself.
220225
fn extend_from_child(&mut self, this: StackDepth, child: &CycleHeads) {
221226
for &head in child.heads.iter() {
222227
match head.cmp(&this) {
@@ -264,6 +269,12 @@ impl<X: Cx> NestedGoals<X> {
264269
}
265270
}
266271

272+
/// Adds the nested goals of a nested goal, given that the path `step_kind` from this goal
273+
/// to the parent goal.
274+
///
275+
/// If the path from this goal to the nested goal is inductive, the paths from this goal
276+
/// to all nested goals of that nested goal are also inductive. Otherwise the paths are
277+
/// the same as for the child.
267278
fn extend_from_child(&mut self, step_kind: PathKind, nested_goals: &NestedGoals<X>) {
268279
#[allow(rustc::potential_query_instability)]
269280
for (input, path_from_entry) in nested_goals.iter() {
@@ -332,8 +343,13 @@ struct StackEntry<X: Cx> {
332343
/// goals still on the stack.
333344
#[derive_where(Debug; X: Cx)]
334345
struct ProvisionalCacheEntry<X: Cx> {
346+
/// Whether evaluating the goal encountered overflow. This is used to
347+
/// disable the cache entry except if the last goal on the stack is
348+
/// already involved in this cycle.
335349
encountered_overflow: bool,
350+
/// All cycle heads this cache entry depends on.
336351
heads: CycleHeads,
352+
/// The path from the highest cycle head to this goal.
337353
path_from_head: PathKind,
338354
nested_goals: NestedGoals<X>,
339355
result: X::Result,
@@ -345,6 +361,10 @@ pub struct SearchGraph<D: Delegate<Cx = X>, X: Cx = <D as Delegate>::Cx> {
345361
///
346362
/// An element is *deeper* in the stack if its index is *lower*.
347363
stack: IndexVec<StackDepth, StackEntry<X>>,
364+
/// The provisional cache contains entries for already computed goals which
365+
/// still depend on goals higher-up in the stack. We don't move them to the
366+
/// global cache and track them locally instead. A provisional cache entry
367+
/// is only valid until the result of one of its cycle heads changes.
348368
provisional_cache: HashMap<X::Input, Vec<ProvisionalCacheEntry<X>>>,
349369

350370
_marker: PhantomData<D>,
@@ -589,6 +609,17 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
589609
/// provisional cache entry is involved in would stay the same when computing the
590610
/// goal without its cycle head on the stack. For more details, see the relevant
591611
/// [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html).
612+
///
613+
/// This can be thought of rotating the sub-tree of this provisional result and changing
614+
/// its entry point while making sure that all paths through this sub-tree stay the same.
615+
///
616+
///
617+
/// In case the popped cycle head failed to reach a fixpoint anything which depends on
618+
/// its provisional result is invalid. Actually discarding provisional cache entries in
619+
/// this case would cause hangs, so we instead change the result of dependant provisional
620+
/// cache entries to also be ambiguous. This causes some undesirable ambiguity for nested
621+
/// goals whose result doesn't actually depend on this cycle head, but that's acceptable
622+
/// to me.
592623
fn rebase_provisional_cache_entries(
593624
&mut self,
594625
cx: X,
@@ -614,31 +645,37 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
614645
// to the cache entry is not coinductive or if the path from
615646
// the cache entry to the current head is not coinductive.
616647
//
617-
// Both of these constraints could be lowered, but by only
648+
// Both of these constraints could be weakened, but by only
618649
// accepting coinductive paths we don't have to worry about
619650
// changing the cycle kind of the remaining cycles. We can
620651
// extend this in the future once there's a known issue
621652
// caused by it.
622-
if *path_from_head != PathKind::Coinductive {
623-
return false;
624-
}
625-
626-
if nested_goals.get(stack_entry.input).unwrap()
627-
!= UsageKind::Single(PathKind::Coinductive)
653+
if *path_from_head != PathKind::Coinductive
654+
|| nested_goals.get(stack_entry.input).unwrap()
655+
!= UsageKind::Single(PathKind::Coinductive)
628656
{
629657
return false;
630658
}
631659

660+
// Merge the cycle heads of the provisional cache entry and the
661+
// popped head. If the popped cycle head was a root, discard all
662+
// provisional cache entries which depend on it.
632663
heads.remove_highest_cycle_head();
633664
heads.merge(&stack_entry.heads);
634-
// If the popped cycle head was a root, discard all provisional
635-
// cache entries.
636665
let Some(head) = heads.opt_highest_cycle_head() else {
637666
return false;
638667
};
639668

669+
// As we've made sure that the path from the new highest cycle
670+
// head to the uses of the popped cycle head are fully coinductive,
671+
// we can be sure that the paths to all nested goals of the popped
672+
// cycle head remain the same. We can simply merge them.
640673
nested_goals.merge(&stack_entry.nested_goals);
674+
// We now care about the path from the next highest cycle head to the
675+
// provisional cache entry.
641676
*path_from_head = Self::stack_path_kind(cx, &self.stack, head);
677+
// Mutate the result of the provisional cache entry in case we did
678+
// not reach a fixpoint.
642679
*result = mutate_result(input, *result);
643680
true
644681
});
@@ -677,6 +714,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
677714
}
678715
}
679716

717+
// A provisional cache entry is only valid if the current path from its
718+
// highest cycle head to the goal is the same.
680719
if path_from_head == Self::stack_path_kind(cx, &self.stack, head) {
681720
// While we don't have to track the full depth of the provisional cache entry,
682721
// we do have to increment the required depth by one as we'd have already failed
@@ -718,11 +757,15 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
718757
return true;
719758
}
720759

760+
// If a nested goal of the global cache entry is on the stack, we would
761+
// definitely encounter a cycle.
721762
if stack.iter().any(|e| nested_goals.contains(e.input)) {
722763
debug!("cache entry not applicable due to stack");
723764
return false;
724765
}
725766

767+
// The global cache entry is also invalid if there's a provisional cache entry
768+
// would apply for any of its nested goals.
726769
#[allow(rustc::potential_query_instability)]
727770
for (input, path_from_global_entry) in nested_goals.iter() {
728771
let Some(entries) = provisional_cache.get(&input) else {
@@ -746,6 +789,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
746789
continue;
747790
}
748791

792+
// A provisional cache entry only applies if the path from its highest head
793+
// matches the path when encountering the goal.
749794
let head = heads.highest_cycle_head();
750795
let full_path = match Self::stack_path_kind(cx, stack, head) {
751796
PathKind::Coinductive => path_from_global_entry,
@@ -817,7 +862,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
817862
// its state is the same regardless of whether we've used the
818863
// global cache or not.
819864
let reached_depth = self.stack.next_index().plus(additional_depth);
820-
// We don't move cycle participants to the global cache.
865+
// We don't move cycle participants to the global cache, so the
866+
// cycle heads are always empty.
821867
let heads = Default::default();
822868
Self::update_parent_goal(
823869
cx,

0 commit comments

Comments
 (0)