Skip to content

Commit 2d36642

Browse files
committed
Properly invalidate the early exit cache
Fixes #35737
1 parent e25542c commit 2d36642

File tree

2 files changed

+60
-20
lines changed

2 files changed

+60
-20
lines changed

src/librustc_mir/build/scope.rs

+23-20
Original file line numberDiff line numberDiff line change
@@ -198,8 +198,11 @@ impl<'tcx> Scope<'tcx> {
198198
///
199199
/// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
200200
/// larger extent of code.
201-
fn invalidate_cache(&mut self) {
202-
self.cached_exits = FnvHashMap();
201+
///
202+
/// `unwind` controls whether caches for the unwind branch are also invalidated.
203+
fn invalidate_cache(&mut self, unwind: bool) {
204+
self.cached_exits.clear();
205+
if !unwind { return; }
203206
for dropdata in &mut self.drops {
204207
if let DropKind::Value { ref mut cached_block } = dropdata.kind {
205208
*cached_block = None;
@@ -455,25 +458,29 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
455458
};
456459

457460
for scope in self.scopes.iter_mut().rev() {
458-
if scope.extent == extent {
461+
let this_scope = scope.extent == extent;
462+
// We must invalidate all the caches leading up to the scope we’re looking for, because
463+
// the cached blocks will branch into build of scope not containing the new drop. If we
464+
// add stuff to the currently inspected scope, then in some cases the non-unwind caches
465+
// may become invalid, therefore we should invalidate these as well. The unwind caches
466+
// will stay correct, because the already generated unwind blocks cannot be influenced
467+
// by just added drop.
468+
//
469+
// If we’re scheduling cleanup for non-droppable type (i.e. DropKind::Storage), then we
470+
// do not need to invalidate unwind branch, because DropKind::Storage does not end up
471+
// built in the unwind branch currently.
472+
let invalidate_unwind = needs_drop && !this_scope;
473+
scope.invalidate_cache(invalidate_unwind);
474+
if this_scope {
459475
if let DropKind::Value { .. } = drop_kind {
460476
scope.needs_cleanup = true;
461477
}
462-
463-
// No need to invalidate any caches here. The just-scheduled drop will branch into
464-
// the drop that comes before it in the vector.
465478
scope.drops.push(DropData {
466479
span: span,
467480
location: lvalue.clone(),
468481
kind: drop_kind
469482
});
470483
return;
471-
} else {
472-
// We must invalidate all the cached_blocks leading up to the scope we’re
473-
// looking for, because all of the blocks in the chain will become incorrect.
474-
if let DropKind::Value { .. } = drop_kind {
475-
scope.invalidate_cache()
476-
}
477484
}
478485
}
479486
span_bug!(span, "extent {:?} not in scope to drop {:?}", extent, lvalue);
@@ -490,11 +497,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
490497
value: &Lvalue<'tcx>,
491498
item_ty: Ty<'tcx>) {
492499
for scope in self.scopes.iter_mut().rev() {
500+
// We must invalidate all the caches leading up to and including the scope we’re
501+
// looking for, because otherwise some of the blocks in the chain will become
502+
// incorrect and must be rebuilt.
503+
scope.invalidate_cache(true);
493504
if scope.extent == extent {
494505
assert!(scope.free.is_none(), "scope already has a scheduled free!");
495-
// We also must invalidate the caches in the scope for which the free is scheduled
496-
// because the drops must branch into the free we schedule here.
497-
scope.invalidate_cache();
498506
scope.needs_cleanup = true;
499507
scope.free = Some(FreeData {
500508
span: span,
@@ -503,11 +511,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
503511
cached_block: None
504512
});
505513
return;
506-
} else {
507-
// We must invalidate all the cached_blocks leading up to the scope we’re looking
508-
// for, because otherwise some/most of the blocks in the chain will become
509-
// incorrect.
510-
scope.invalidate_cache();
511514
}
512515
}
513516
span_bug!(span, "extent {:?} not in scope to free {:?}", extent, value);
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2+
// file at the top-level directory of this distribution and at
3+
// http://rust-lang.org/COPYRIGHT.
4+
//
5+
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6+
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7+
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8+
// option. This file may not be copied, modified, or distributed
9+
// except according to those terms.
10+
11+
static mut DROP: bool = false;
12+
13+
struct ConnWrap(Conn);
14+
impl ::std::ops::Deref for ConnWrap {
15+
type Target=Conn;
16+
fn deref(&self) -> &Conn { &self.0 }
17+
}
18+
19+
struct Conn;
20+
impl Drop for Conn {
21+
fn drop(&mut self) { unsafe { DROP = true; } }
22+
}
23+
24+
fn inner() {
25+
let conn = &*match Some(ConnWrap(Conn)) {
26+
Some(val) => val,
27+
None => return,
28+
};
29+
return;
30+
}
31+
32+
fn main() {
33+
inner();
34+
unsafe {
35+
assert_eq!(DROP, true);
36+
}
37+
}

0 commit comments

Comments
 (0)