@@ -179,7 +179,9 @@ impl AvailableDepth {
179
179
}
180
180
}
181
181
182
- /// All cycle heads a given goal depends on.
182
+ /// All cycle heads a given goal depends on, ordered by their stack depth.
183
+ ///
184
+ /// We therefore pop the cycle heads from highest to lowest.
183
185
#[ derive( Clone , Debug , PartialEq , Eq , Default ) ]
184
186
struct CycleHeads {
185
187
heads : BTreeSet < StackDepth > ,
@@ -217,6 +219,9 @@ impl CycleHeads {
217
219
}
218
220
}
219
221
222
+ /// Update the cycle heads of a goal at depth `this` given the cycle heads
223
+ /// of a nested goal. This merges the heads after filtering the parent goal
224
+ /// itself.
220
225
fn extend_from_child ( & mut self , this : StackDepth , child : & CycleHeads ) {
221
226
for & head in child. heads . iter ( ) {
222
227
match head. cmp ( & this) {
@@ -264,6 +269,12 @@ impl<X: Cx> NestedGoals<X> {
264
269
}
265
270
}
266
271
272
+ /// Adds the nested goals of a nested goal, given that the path `step_kind` from this goal
273
+ /// to the parent goal.
274
+ ///
275
+ /// If the path from this goal to the nested goal is inductive, the paths from this goal
276
+ /// to all nested goals of that nested goal are also inductive. Otherwise the paths are
277
+ /// the same as for the child.
267
278
fn extend_from_child ( & mut self , step_kind : PathKind , nested_goals : & NestedGoals < X > ) {
268
279
#[ allow( rustc:: potential_query_instability) ]
269
280
for ( input, path_from_entry) in nested_goals. iter ( ) {
@@ -332,8 +343,13 @@ struct StackEntry<X: Cx> {
332
343
/// goals still on the stack.
333
344
#[ derive_where( Debug ; X : Cx ) ]
334
345
struct ProvisionalCacheEntry < X : Cx > {
346
+ /// Whether evaluating the goal encountered overflow. This is used to
347
+ /// disable the cache entry except if the last goal on the stack is
348
+ /// already involved in this cycle.
335
349
encountered_overflow : bool ,
350
+ /// All cycle heads this cache entry depends on.
336
351
heads : CycleHeads ,
352
+ /// The path from the highest cycle head to this goal.
337
353
path_from_head : PathKind ,
338
354
nested_goals : NestedGoals < X > ,
339
355
result : X :: Result ,
@@ -345,6 +361,10 @@ pub struct SearchGraph<D: Delegate<Cx = X>, X: Cx = <D as Delegate>::Cx> {
345
361
///
346
362
/// An element is *deeper* in the stack if its index is *lower*.
347
363
stack : IndexVec < StackDepth , StackEntry < X > > ,
364
+ /// The provisional cache contains entries for already computed goals which
365
+ /// still depend on goals higher-up in the stack. We don't move them to the
366
+ /// global cache and track them locally instead. A provisional cache entry
367
+ /// is only valid until the result of one of its cycle heads changes.
348
368
provisional_cache : HashMap < X :: Input , Vec < ProvisionalCacheEntry < X > > > ,
349
369
350
370
_marker : PhantomData < D > ,
@@ -589,6 +609,17 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
589
609
/// provisional cache entry is involved in would stay the same when computing the
590
610
/// goal without its cycle head on the stack. For more details, see the relevant
591
611
/// [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html).
612
+ ///
613
+ /// This can be thought of rotating the sub-tree of this provisional result and changing
614
+ /// its entry point while making sure that all paths through this sub-tree stay the same.
615
+ ///
616
+ ///
617
+ /// In case the popped cycle head failed to reach a fixpoint anything which depends on
618
+ /// its provisional result is invalid. Actually discarding provisional cache entries in
619
+ /// this case would cause hangs, so we instead change the result of dependant provisional
620
+ /// cache entries to also be ambiguous. This causes some undesirable ambiguity for nested
621
+ /// goals whose result doesn't actually depend on this cycle head, but that's acceptable
622
+ /// to me.
592
623
fn rebase_provisional_cache_entries (
593
624
& mut self ,
594
625
cx : X ,
@@ -614,31 +645,37 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
614
645
// to the cache entry is not coinductive or if the path from
615
646
// the cache entry to the current head is not coinductive.
616
647
//
617
- // Both of these constraints could be lowered , but by only
648
+ // Both of these constraints could be weakened , but by only
618
649
// accepting coinductive paths we don't have to worry about
619
650
// changing the cycle kind of the remaining cycles. We can
620
651
// extend this in the future once there's a known issue
621
652
// caused by it.
622
- if * path_from_head != PathKind :: Coinductive {
623
- return false ;
624
- }
625
-
626
- if nested_goals. get ( stack_entry. input ) . unwrap ( )
627
- != UsageKind :: Single ( PathKind :: Coinductive )
653
+ if * path_from_head != PathKind :: Coinductive
654
+ || nested_goals. get ( stack_entry. input ) . unwrap ( )
655
+ != UsageKind :: Single ( PathKind :: Coinductive )
628
656
{
629
657
return false ;
630
658
}
631
659
660
+ // Merge the cycle heads of the provisional cache entry and the
661
+ // popped head. If the popped cycle head was a root, discard all
662
+ // provisional cache entries which depend on it.
632
663
heads. remove_highest_cycle_head ( ) ;
633
664
heads. merge ( & stack_entry. heads ) ;
634
- // If the popped cycle head was a root, discard all provisional
635
- // cache entries.
636
665
let Some ( head) = heads. opt_highest_cycle_head ( ) else {
637
666
return false ;
638
667
} ;
639
668
669
+ // As we've made sure that the path from the new highest cycle
670
+ // head to the uses of the popped cycle head are fully coinductive,
671
+ // we can be sure that the paths to all nested goals of the popped
672
+ // cycle head remain the same. We can simply merge them.
640
673
nested_goals. merge ( & stack_entry. nested_goals ) ;
674
+ // We now care about the path from the next highest cycle head to the
675
+ // provisional cache entry.
641
676
* path_from_head = Self :: stack_path_kind ( cx, & self . stack , head) ;
677
+ // Mutate the result of the provisional cache entry in case we did
678
+ // not reach a fixpoint.
642
679
* result = mutate_result ( input, * result) ;
643
680
true
644
681
} ) ;
@@ -677,6 +714,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
677
714
}
678
715
}
679
716
717
+ // A provisional cache entry is only valid if the current path from its
718
+ // highest cycle head to the goal is the same.
680
719
if path_from_head == Self :: stack_path_kind ( cx, & self . stack , head) {
681
720
// While we don't have to track the full depth of the provisional cache entry,
682
721
// we do have to increment the required depth by one as we'd have already failed
@@ -718,11 +757,15 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
718
757
return true ;
719
758
}
720
759
760
+ // If a nested goal of the global cache entry is on the stack, we would
761
+ // definitely encounter a cycle.
721
762
if stack. iter ( ) . any ( |e| nested_goals. contains ( e. input ) ) {
722
763
debug ! ( "cache entry not applicable due to stack" ) ;
723
764
return false ;
724
765
}
725
766
767
+ // The global cache entry is also invalid if there's a provisional cache entry
768
+ // would apply for any of its nested goals.
726
769
#[ allow( rustc:: potential_query_instability) ]
727
770
for ( input, path_from_global_entry) in nested_goals. iter ( ) {
728
771
let Some ( entries) = provisional_cache. get ( & input) else {
@@ -746,6 +789,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
746
789
continue ;
747
790
}
748
791
792
+ // A provisional cache entry only applies if the path from its highest head
793
+ // matches the path when encountering the goal.
749
794
let head = heads. highest_cycle_head ( ) ;
750
795
let full_path = match Self :: stack_path_kind ( cx, stack, head) {
751
796
PathKind :: Coinductive => path_from_global_entry,
@@ -817,7 +862,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
817
862
// its state is the same regardless of whether we've used the
818
863
// global cache or not.
819
864
let reached_depth = self . stack . next_index ( ) . plus ( additional_depth) ;
820
- // We don't move cycle participants to the global cache.
865
+ // We don't move cycle participants to the global cache, so the
866
+ // cycle heads are always empty.
821
867
let heads = Default :: default ( ) ;
822
868
Self :: update_parent_goal (
823
869
cx,
0 commit comments