@@ -459,16 +459,52 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
459
459
460
460
for scope in self . scopes . iter_mut ( ) . rev ( ) {
461
461
let this_scope = scope. extent == extent;
462
- // We must invalidate all the caches leading up to the scope we’re looking for, because
463
- // the cached blocks will branch into build of scope not containing the new drop. If we
464
- // add stuff to the currently inspected scope, then in some cases the non-unwind caches
465
- // may become invalid, therefore we should invalidate these as well. The unwind caches
466
- // will stay correct, because the already generated unwind blocks cannot be influenced
467
- // by just added drop.
462
+ // When building drops, we try to cache chains of drops in such a way so these drops
463
+ // could be reused by the drops which would branch into the cached (already built)
464
+ // blocks. This, however, means that whenever we add a drop into a scope which already
465
+ // had some blocks built (and thus, cached) for it, we must invalidate all caches which
466
+ // might branch into the scope which had a drop just added to it. This is necessary,
467
+ // because otherwise some other code might use the cache to branch into already built
468
+ // chain of drops, essentially ignoring the newly added drop.
468
469
//
469
- // If we’re scheduling cleanup for non-droppable type (i.e. DropKind::Storage), then we
470
- // do not need to invalidate unwind branch, because DropKind::Storage does not end up
471
- // built in the unwind branch currently.
470
+ // For example consider there’s two scopes with a drop in each. These are built and
471
+ // thus the caches are filled:
472
+ //
473
+ // +--------------------------------------------------------+
474
+ // | +---------------------------------+ |
475
+ // | | +--------+ +-------------+ | +---------------+ |
476
+ // | | | return | <-+ | drop(outer) | <-+ | drop(middle) | |
477
+ // | | +--------+ +-------------+ | +---------------+ |
478
+ // | +------------|outer_scope cache|--+ |
479
+ // +------------------------------|middle_scope cache|------+
480
+ //
481
+ // Now, a new, inner-most scope is added along with a new drop into both inner-most and
482
+ // outer-most scopes:
483
+ //
484
+ // +------------------------------------------------------------+
485
+ // | +----------------------------------+ |
486
+ // | | +--------+ +-------------+ | +---------------+ | +-------------+
487
+ // | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) |
488
+ // | | +--------+ | | drop(outer) | | +---------------+ | +-------------+
489
+ // | | +-+ +-------------+ | |
490
+ // | +---|invalid outer_scope cache|----+ |
491
+ // +----=----------------|invalid middle_scope cache|-----------+
492
+ //
493
+ // If, when adding `drop(new)` we do not invalidate the cached blocks for both
494
+ // outer_scope and middle_scope, then, when building drops for the inner (right-most)
495
+ // scope, the old, cached blocks, without `drop(new)` will get used, producing the
496
+ // wrong results.
497
+ //
498
+ // The cache and its invalidation for unwind branch is somewhat special. The cache is
499
+ // per-drop, rather than per scope, which has a several different implications. Adding
500
+ // a new drop into a scope will not invalidate cached blocks of the prior drops in the
501
+ // scope. That is true, because none of the already existing drops will have an edge
502
+ // into a block with the newly added drop.
503
+ //
504
+ // Note that this code iterates scopes from the inner-most to the outer-most,
505
+ // invalidating caches of each scope visited. This way bare minimum of the
506
+ // caches gets invalidated. i.e. if a new drop is added into the middle scope, the
507
+ // cache of outer scpoe stays intact.
472
508
let invalidate_unwind = needs_drop && !this_scope;
473
509
scope. invalidate_cache ( invalidate_unwind) ;
474
510
if this_scope {
@@ -497,9 +533,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
497
533
value : & Lvalue < ' tcx > ,
498
534
item_ty : Ty < ' tcx > ) {
499
535
for scope in self . scopes . iter_mut ( ) . rev ( ) {
500
- // We must invalidate all the caches leading up to and including the scope we’re
501
- // looking for, because otherwise some of the blocks in the chain will become
502
- // incorrect and must be rebuilt .
536
+ // See the comment in schedule_drop above. The primary difference is that we invalidate
537
+ // the unwind blocks unconditionally. That’s because the box free may be considered
538
+ // outer-most cleanup within the scope .
503
539
scope. invalidate_cache ( true ) ;
504
540
if scope. extent == extent {
505
541
assert ! ( scope. free. is_none( ) , "scope already has a scheduled free!" ) ;
0 commit comments