Skip to content

Commit 5638847

Browse files
committed
Address nits on build/scope.rs
1 parent ebf6341 commit 5638847

File tree

1 file changed

+141
-101
lines changed

1 file changed

+141
-101
lines changed

src/librustc_mir/build/scope.rs

+141-101
Original file line numberDiff line numberDiff line change
@@ -101,11 +101,18 @@ pub struct Scope<'tcx> {
101101
// A scope may only have one associated free, because:
102102
// 1. We require a `free` to only be scheduled in the scope of `EXPR` in `box EXPR`;
103103
// 2. It only makes sense to have it translated into the diverge-path.
104+
//
105+
// This kind of drop will be run *after* all the regular drops scheduled onto this scope,
106+
// because drops may have dependencies on the allocated memory.
107+
//
108+
// This is expected to go away once `box EXPR` becomes a sugar for placement protocol and gets
109+
// desugared in some earlier stage.
104110
free: Option<FreeData<'tcx>>,
105111
}
106112

107113
struct DropData<'tcx> {
108114
value: Lvalue<'tcx>,
115+
// NB: per-drop “cache” is necessary for the build_scope_drops function below.
109116
/// The cached block for the cleanups-on-diverge path. This block contains code to run the
110117
/// current drop and all the preceding drops (i.e. those having lower index in Drop’s
111118
/// Scope drop array)
@@ -137,28 +144,30 @@ pub struct LoopScope {
137144
}
138145

139146
impl<'tcx> Scope<'tcx> {
140-
/// Invalidate cached blocks in the scope. Should always be run for all inner scopes when a
141-
/// drop is pushed into some scope enclosing a larger extent of code.
142-
fn invalidate_cache(&mut self, only_free: bool) {
147+
/// Invalidate all the cached blocks in the scope.
148+
///
149+
/// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
150+
/// larger extent of code.
151+
fn invalidate_cache(&mut self) {
152+
for dropdata in &mut self.drops {
153+
dropdata.cached_block = None;
154+
}
143155
if let Some(ref mut freedata) = self.free {
144156
freedata.cached_block = None;
145157
}
146-
if !only_free {
147-
for dropdata in &mut self.drops {
148-
dropdata.cached_block = None;
149-
}
150-
}
151158
}
152159

153160
/// Returns the cached block for this scope.
154161
///
155162
/// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for
156163
/// this method to work correctly.
157164
fn cached_block(&self) -> Option<BasicBlock> {
158-
if let Some(ref free_data) = self.free {
159-
free_data.cached_block
165+
if let Some(data) = self.drops.last() {
166+
Some(data.cached_block.expect("drop cache is not filled"))
167+
} else if let Some(ref data) = self.free {
168+
Some(data.cached_block.expect("free cache is not filled"))
160169
} else {
161-
self.drops.last().iter().flat_map(|dd| dd.cached_block).next()
170+
None
162171
}
163172
}
164173
}
@@ -297,18 +306,17 @@ impl<'a,'tcx> Builder<'a,'tcx> {
297306
}
298307
for scope in self.scopes.iter_mut().rev() {
299308
if scope.extent == extent {
300-
// We only invalidate cached block of free here; all other drops’ cached blocks to
301-
// not become invalid (this drop will branch into them).
302-
scope.invalidate_cache(true);
309+
// No need to invalidate any caches here. The just-scheduled drop will branch into
310+
// the drop that comes before it in the vector.
303311
scope.drops.push(DropData {
304312
value: lvalue.clone(),
305313
cached_block: None
306314
});
307315
return;
308316
} else {
309317
// We must invalidate all the cached_blocks leading up to the scope we’re
310-
// looking for, because all of the blocks in the chain become incorrect.
311-
scope.invalidate_cache(false)
318+
// looking for, because all of the blocks in the chain will become incorrect.
319+
scope.invalidate_cache()
312320
}
313321
}
314322
self.hir.span_bug(span,
@@ -328,6 +336,9 @@ impl<'a,'tcx> Builder<'a,'tcx> {
328336
for scope in self.scopes.iter_mut().rev() {
329337
if scope.extent == extent {
330338
assert!(scope.free.is_none(), "scope already has a scheduled free!");
339+
// We also must invalidate the caches in the scope for which the free is scheduled
340+
// because the drops must branch into the free we schedule here.
341+
scope.invalidate_cache();
331342
scope.free = Some(FreeData {
332343
span: span,
333344
value: value.clone(),
@@ -337,9 +348,9 @@ impl<'a,'tcx> Builder<'a,'tcx> {
337348
return;
338349
} else {
339350
// We must invalidate all the cached_blocks leading up to the scope we’re looking
340-
// for, because otherwise some/most of the blocks in the chain might become
351+
// for, because otherwise some/most of the blocks in the chain will become
341352
// incorrect.
342-
scope.invalidate_cache(false);
353+
scope.invalidate_cache();
343354
}
344355
}
345356
self.hir.span_bug(span,
@@ -357,95 +368,20 @@ impl<'a,'tcx> Builder<'a,'tcx> {
357368
if self.scopes.is_empty() {
358369
return None;
359370
}
360-
361-
let unit_tmp = self.get_unit_temp();
362-
let Builder { ref mut scopes, ref mut cfg, ref mut hir, .. } = *self;
363-
364-
let tcx = hir.tcx();
371+
let unit_temp = self.get_unit_temp();
372+
let Builder { ref mut hir, ref mut cfg, ref mut scopes, .. } = *self;
365373
let mut next_block = None;
366374

367375
// Given an array of scopes, we generate these from the outermost scope to the innermost
368376
// one. Thus for array [S0, S1, S2] with corresponding cleanup blocks [B0, B1, B2], we will
369377
// generate B0 <- B1 <- B2 in left-to-right order. Control flow of the generated blocks
370378
// always ends up at a block with the Resume terminator.
371-
//
372-
// Similarly to the scopes, we translate drops so:
373-
// * Scheduled free drop is executed first;
374-
// * Drops are executed after the free drop in the decreasing order (decreasing as
375-
// from higher index in drops array to lower index);
376-
//
377-
// NB: We do the building backwards here. We will first produce a block containing the
378-
// Resume terminator (which is executed last for any given chain of cleanups) and then go
379-
// on building the drops from the outermost one to the innermost one. Similar note applies
380-
// to the drops within the scope too.
381-
{
382-
let iter = scopes.iter_mut().filter(|s| !s.drops.is_empty() || s.free.is_some());
383-
for scope in iter {
384-
// Try using the cached free drop if any…
385-
if let Some(FreeData { cached_block: Some(cached_block), .. }) = scope.free {
386-
next_block = Some(cached_block);
387-
continue;
388-
}
389-
// otherwise look for the cached regular drop (fast path)…
390-
if let Some(&DropData { cached_block: Some(cached_block), .. }) = scope.drops.last() {
391-
next_block = Some(cached_block);
392-
continue;
393-
}
394-
// otherwise build the blocks…
395-
for drop_data in scope.drops.iter_mut() {
396-
// skipping them if they’re already built…
397-
if let Some(cached_block) = drop_data.cached_block {
398-
next_block = Some(cached_block);
399-
continue;
400-
}
401-
let block = cfg.start_new_cleanup_block();
402-
let target = next_block.unwrap_or_else(|| {
403-
let b = cfg.start_new_cleanup_block();
404-
cfg.terminate(b, Terminator::Resume);
405-
b
406-
});
407-
cfg.terminate(block, Terminator::Drop {
408-
value: drop_data.value.clone(),
409-
target: target,
410-
unwind: None
411-
});
412-
drop_data.cached_block = Some(block);
413-
next_block = Some(block);
414-
}
415-
416-
if let Some(ref mut free_data) = scope.free {
417-
// The free was not cached yet. It must be translated the last and will be executed
418-
// first.
419-
let free_func = tcx.lang_items.box_free_fn()
420-
.expect("box_free language item is missing");
421-
let substs = tcx.mk_substs(Substs::new(
422-
VecPerParamSpace::new(vec![], vec![], vec![free_data.item_ty]),
423-
VecPerParamSpace::new(vec![], vec![], vec![])
424-
));
425-
let block = cfg.start_new_cleanup_block();
426-
let target = next_block.unwrap_or_else(|| {
427-
let b = cfg.start_new_cleanup_block();
428-
cfg.terminate(b, Terminator::Resume);
429-
b
430-
});
431-
cfg.terminate(block, Terminator::Call {
432-
func: Operand::Constant(Constant {
433-
span: free_data.span,
434-
ty: tcx.lookup_item_type(free_func).ty.subst(tcx, substs),
435-
literal: Literal::Item {
436-
def_id: free_func,
437-
kind: ItemKind::Function,
438-
substs: substs
439-
}
440-
}),
441-
args: vec![Operand::Consume(free_data.value.clone())],
442-
destination: Some((unit_tmp.clone(), target)),
443-
cleanup: None
444-
});
445-
free_data.cached_block = Some(block);
446-
next_block = Some(block);
447-
}
448-
}
379+
for scope in scopes.iter_mut().filter(|s| !s.drops.is_empty() || s.free.is_some()) {
380+
next_block = Some(build_diverge_scope(hir.tcx(),
381+
cfg,
382+
unit_temp.clone(),
383+
scope,
384+
next_block));
449385
}
450386
scopes.iter().rev().flat_map(|x| x.cached_block()).next()
451387
}
@@ -462,6 +398,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
462398
next_target.unit()
463399
}
464400

401+
465402
// Panicking
466403
// =========
467404
// FIXME: should be moved into their own module
@@ -596,3 +533,106 @@ fn build_scope_drops<'tcx>(mut block: BasicBlock,
596533
}
597534
block.unit()
598535
}
536+
537+
fn build_diverge_scope<'tcx>(tcx: &ty::ctxt<'tcx>,
538+
cfg: &mut CFG<'tcx>,
539+
unit_temp: Lvalue<'tcx>,
540+
scope: &mut Scope<'tcx>,
541+
target: Option<BasicBlock>)
542+
-> BasicBlock {
543+
debug_assert!(!scope.drops.is_empty() || scope.free.is_some());
544+
545+
// First, we build the drops, iterating the drops array in reverse. We do that so that as soon
546+
// as we find a `cached_block`, we know that we’re finished and don’t need to do anything else.
547+
let mut previous = None;
548+
let mut last_drop_block = None;
549+
for drop_data in scope.drops.iter_mut().rev() {
550+
if let Some(cached_block) = drop_data.cached_block {
551+
if let Some((previous_block, previous_value)) = previous {
552+
cfg.terminate(previous_block, Terminator::Drop {
553+
value: previous_value,
554+
target: cached_block,
555+
unwind: None
556+
});
557+
return last_drop_block.unwrap();
558+
} else {
559+
return cached_block;
560+
}
561+
} else {
562+
let block = cfg.start_new_cleanup_block();
563+
drop_data.cached_block = Some(block);
564+
if let Some((previous_block, previous_value)) = previous {
565+
cfg.terminate(previous_block, Terminator::Drop {
566+
value: previous_value,
567+
target: block,
568+
unwind: None
569+
});
570+
} else {
571+
last_drop_block = Some(block);
572+
}
573+
previous = Some((block, drop_data.value.clone()));
574+
}
575+
}
576+
577+
// Prepare the end target for this chain.
578+
let mut target = target.unwrap_or_else(||{
579+
let b = cfg.start_new_cleanup_block();
580+
cfg.terminate(b, Terminator::Resume);
581+
b
582+
});
583+
584+
// Then, build the free branching into the prepared target.
585+
if let Some(ref mut free_data) = scope.free {
586+
target = if let Some(cached_block) = free_data.cached_block {
587+
cached_block
588+
} else {
589+
let t = build_free(tcx, cfg, unit_temp, free_data, target);
590+
free_data.cached_block = Some(t);
591+
t
592+
}
593+
};
594+
595+
if let Some((previous_block, previous_value)) = previous {
596+
// Finally, branch into that just-built `target` from the `previous_block`.
597+
cfg.terminate(previous_block, Terminator::Drop {
598+
value: previous_value,
599+
target: target,
600+
unwind: None
601+
});
602+
last_drop_block.unwrap()
603+
} else {
604+
// If `previous.is_none()`, there were no drops in this scope – we return the
605+
// target.
606+
target
607+
}
608+
}
609+
610+
fn build_free<'tcx>(tcx: &ty::ctxt<'tcx>,
611+
cfg: &mut CFG<'tcx>,
612+
unit_temp: Lvalue<'tcx>,
613+
data: &FreeData<'tcx>,
614+
target: BasicBlock)
615+
-> BasicBlock {
616+
let free_func = tcx.lang_items.box_free_fn()
617+
.expect("box_free language item is missing");
618+
let substs = tcx.mk_substs(Substs::new(
619+
VecPerParamSpace::new(vec![], vec![], vec![data.item_ty]),
620+
VecPerParamSpace::new(vec![], vec![], vec![])
621+
));
622+
let block = cfg.start_new_cleanup_block();
623+
cfg.terminate(block, Terminator::Call {
624+
func: Operand::Constant(Constant {
625+
span: data.span,
626+
ty: tcx.lookup_item_type(free_func).ty.subst(tcx, substs),
627+
literal: Literal::Item {
628+
def_id: free_func,
629+
kind: ItemKind::Function,
630+
substs: substs
631+
}
632+
}),
633+
args: vec![Operand::Consume(data.value.clone())],
634+
destination: Some((unit_temp, target)),
635+
cleanup: None
636+
});
637+
block
638+
}

0 commit comments

Comments
 (0)