@@ -198,8 +198,11 @@ impl<'tcx> Scope<'tcx> {
198
198
///
199
199
/// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
200
200
/// larger extent of code.
201
- fn invalidate_cache ( & mut self ) {
202
- self . cached_exits = FnvHashMap ( ) ;
201
+ ///
202
+ /// `unwind` controls whether caches for the unwind branch are also invalidated.
203
+ fn invalidate_cache ( & mut self , unwind : bool ) {
204
+ self . cached_exits . clear ( ) ;
205
+ if !unwind { return ; }
203
206
for dropdata in & mut self . drops {
204
207
if let DropKind :: Value { ref mut cached_block } = dropdata. kind {
205
208
* cached_block = None ;
@@ -455,25 +458,29 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
455
458
} ;
456
459
457
460
for scope in self . scopes . iter_mut ( ) . rev ( ) {
458
- if scope. extent == extent {
461
+ let this_scope = scope. extent == extent;
462
+ // We must invalidate all the caches leading up to the scope we’re looking for, because
463
+ // the cached blocks will branch into build of scope not containing the new drop. If we
464
+ // add stuff to the currently inspected scope, then in some cases the non-unwind caches
465
+ // may become invalid, therefore we should invalidate these as well. The unwind caches
466
+ // will stay correct, because the already generated unwind blocks cannot be influenced
467
+ // by just added drop.
468
+ //
469
+ // If we’re scheduling cleanup for non-droppable type (i.e. DropKind::Storage), then we
470
+ // do not need to invalidate unwind branch, because DropKind::Storage does not end up
471
+ // built in the unwind branch currently.
472
+ let invalidate_unwind = needs_drop && !this_scope;
473
+ scope. invalidate_cache ( invalidate_unwind) ;
474
+ if this_scope {
459
475
if let DropKind :: Value { .. } = drop_kind {
460
476
scope. needs_cleanup = true ;
461
477
}
462
-
463
- // No need to invalidate any caches here. The just-scheduled drop will branch into
464
- // the drop that comes before it in the vector.
465
478
scope. drops . push ( DropData {
466
479
span : span,
467
480
location : lvalue. clone ( ) ,
468
481
kind : drop_kind
469
482
} ) ;
470
483
return ;
471
- } else {
472
- // We must invalidate all the cached_blocks leading up to the scope we’re
473
- // looking for, because all of the blocks in the chain will become incorrect.
474
- if let DropKind :: Value { .. } = drop_kind {
475
- scope. invalidate_cache ( )
476
- }
477
484
}
478
485
}
479
486
span_bug ! ( span, "extent {:?} not in scope to drop {:?}" , extent, lvalue) ;
@@ -490,11 +497,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
490
497
value : & Lvalue < ' tcx > ,
491
498
item_ty : Ty < ' tcx > ) {
492
499
for scope in self . scopes . iter_mut ( ) . rev ( ) {
500
+ // We must invalidate all the caches leading up to and including the scope we’re
501
+ // looking for, because otherwise some of the blocks in the chain will become
502
+ // incorrect and must be rebuilt.
503
+ scope. invalidate_cache ( true ) ;
493
504
if scope. extent == extent {
494
505
assert ! ( scope. free. is_none( ) , "scope already has a scheduled free!" ) ;
495
- // We also must invalidate the caches in the scope for which the free is scheduled
496
- // because the drops must branch into the free we schedule here.
497
- scope. invalidate_cache ( ) ;
498
506
scope. needs_cleanup = true ;
499
507
scope. free = Some ( FreeData {
500
508
span : span,
@@ -503,11 +511,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
503
511
cached_block : None
504
512
} ) ;
505
513
return ;
506
- } else {
507
- // We must invalidate all the cached_blocks leading up to the scope we’re looking
508
- // for, because otherwise some/most of the blocks in the chain will become
509
- // incorrect.
510
- scope. invalidate_cache ( ) ;
511
514
}
512
515
}
513
516
span_bug ! ( span, "extent {:?} not in scope to free {:?}" , extent, value) ;
0 commit comments