@@ -89,6 +89,7 @@ use rustc_index::{IndexSlice, IndexVec};
89
89
use rustc_middle:: middle:: region;
90
90
use rustc_middle:: mir:: * ;
91
91
use rustc_middle:: thir:: { ExprId , LintLevel } ;
92
+ use rustc_middle:: ty:: { self , TyCtxt } ;
92
93
use rustc_middle:: { bug, span_bug} ;
93
94
use rustc_session:: lint:: Level ;
94
95
use rustc_span:: source_map:: Spanned ;
@@ -880,22 +881,45 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
880
881
block. unit ( )
881
882
}
882
883
884
+ fn is_async_drop_impl (
885
+ tcx : TyCtxt < ' tcx > ,
886
+ local_decls : & IndexVec < Local , LocalDecl < ' tcx > > ,
887
+ param_env : ty:: ParamEnv < ' tcx > ,
888
+ local : Local ,
889
+ ) -> bool {
890
+ let ty = local_decls[ local] . ty ;
891
+ if ty. is_async_drop ( tcx, param_env) || ty. is_coroutine ( ) {
892
+ return true ;
893
+ }
894
+ ty. needs_async_drop ( tcx, param_env)
895
+ }
896
+ fn is_async_drop ( & self , local : Local ) -> bool {
897
+ Self :: is_async_drop_impl ( self . tcx , & self . local_decls , self . param_env , local)
898
+ }
899
+
883
900
fn leave_top_scope ( & mut self , block : BasicBlock ) -> BasicBlock {
884
901
// If we are emitting a `drop` statement, we need to have the cached
885
902
// diverge cleanup pads ready in case that drop panics.
886
903
let needs_cleanup = self . scopes . scopes . last ( ) . is_some_and ( |scope| scope. needs_cleanup ( ) ) ;
887
904
let is_coroutine = self . coroutine . is_some ( ) ;
888
905
let unwind_to = if needs_cleanup { self . diverge_cleanup ( ) } else { DropIdx :: MAX } ;
889
906
907
+ let scope = self . scopes . scopes . last ( ) . expect ( "leave_top_scope called with no scopes" ) ;
908
+ let has_async_drops = is_coroutine
909
+ && scope. drops . iter ( ) . any ( |v| v. kind == DropKind :: Value && self . is_async_drop ( v. local ) ) ;
910
+ let dropline_to = if has_async_drops { Some ( self . diverge_dropline ( ) ) } else { None } ;
890
911
let scope = self . scopes . scopes . last ( ) . expect ( "leave_top_scope called with no scopes" ) ;
891
912
build_scope_drops (
892
913
& mut self . cfg ,
893
914
& mut self . scopes . unwind_drops ,
915
+ & mut self . scopes . coroutine_drops ,
894
916
scope,
895
917
block,
896
918
unwind_to,
919
+ dropline_to,
897
920
is_coroutine && needs_cleanup,
898
921
self . arg_count ,
922
+ |v : Local | Self :: is_async_drop_impl ( self . tcx , & self . local_decls , self . param_env , v) ,
899
923
)
900
924
. into_block ( )
901
925
}
@@ -1312,22 +1336,22 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
1312
1336
self . scopes . unwind_drops . add_entry_point ( start, next_drop) ;
1313
1337
}
1314
1338
1315
- /// Sets up a path that performs all required cleanup for dropping a
1316
- /// coroutine, starting from the given block that ends in
1317
- /// [TerminatorKind::Yield].
1318
- ///
1319
- /// This path terminates in CoroutineDrop.
1320
- pub ( crate ) fn coroutine_drop_cleanup ( & mut self , yield_block : BasicBlock ) {
1339
+ /// Returns the [DropIdx] for the innermost drop for dropline (coroutine drop path).
1340
+ /// The `DropIdx` will be created if it doesn't already exist.
1341
+ fn diverge_dropline ( & mut self ) -> DropIdx {
1342
+ // It is okay to use dummy span because the getting scope index on the topmost scope
1343
+ // must always succeed.
1344
+ self . diverge_dropline_target ( self . scopes . topmost ( ) , DUMMY_SP )
1345
+ }
1346
+
1347
+ /// Similar to diverge_cleanup_target, but for dropline (coroutine drop path)
1348
+ fn diverge_dropline_target ( & mut self , target_scope : region:: Scope , span : Span ) -> DropIdx {
1321
1349
debug_assert ! (
1322
- matches!(
1323
- self . cfg. block_data( yield_block) . terminator( ) . kind,
1324
- TerminatorKind :: Yield { .. }
1325
- ) ,
1326
- "coroutine_drop_cleanup called on block with non-yield terminator."
1350
+ self . coroutine. is_some( ) ,
1351
+ "diverge_dropline_target is valid only for coroutine"
1327
1352
) ;
1328
- let ( uncached_scope, mut cached_drop) = self
1329
- . scopes
1330
- . scopes
1353
+ let target = self . scopes . scope_index ( target_scope, span) ;
1354
+ let ( uncached_scope, mut cached_drop) = self . scopes . scopes [ ..=target]
1331
1355
. iter ( )
1332
1356
. enumerate ( )
1333
1357
. rev ( )
@@ -1336,13 +1360,34 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
1336
1360
} )
1337
1361
. unwrap_or ( ( 0 , ROOT_NODE ) ) ;
1338
1362
1339
- for scope in & mut self . scopes . scopes [ uncached_scope..] {
1363
+ if uncached_scope > target {
1364
+ return cached_drop;
1365
+ }
1366
+
1367
+ for scope in & mut self . scopes . scopes [ uncached_scope..=target] {
1340
1368
for drop in & scope. drops {
1341
1369
cached_drop = self . scopes . coroutine_drops . add_drop ( * drop, cached_drop) ;
1342
1370
}
1343
1371
scope. cached_coroutine_drop_block = Some ( cached_drop) ;
1344
1372
}
1345
1373
1374
+ cached_drop
1375
+ }
1376
+
1377
+ /// Sets up a path that performs all required cleanup for dropping a
1378
+ /// coroutine, starting from the given block that ends in
1379
+ /// [TerminatorKind::Yield].
1380
+ ///
1381
+ /// This path terminates in CoroutineDrop.
1382
+ pub ( crate ) fn coroutine_drop_cleanup ( & mut self , yield_block : BasicBlock ) {
1383
+ debug_assert ! (
1384
+ matches!(
1385
+ self . cfg. block_data( yield_block) . terminator( ) . kind,
1386
+ TerminatorKind :: Yield { .. }
1387
+ ) ,
1388
+ "coroutine_drop_cleanup called on block with non-yield terminator."
1389
+ ) ;
1390
+ let cached_drop = self . diverge_dropline ( ) ;
1346
1391
self . scopes . coroutine_drops . add_entry_point ( yield_block, cached_drop) ;
1347
1392
}
1348
1393
@@ -1436,18 +1481,26 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
1436
1481
/// * `unwind_to`, describes the drops that would occur at this point in the code if a
1437
1482
/// panic occurred (a subset of the drops in `scope`, since we sometimes elide StorageDead and other
1438
1483
/// instructions on unwinding)
1484
+ /// * `dropline_to`, describes the drops that would occur at this point in the code if a
1485
+ /// coroutine drop occured.
1439
1486
/// * `storage_dead_on_unwind`, if true, then we should emit `StorageDead` even when unwinding
1440
1487
/// * `arg_count`, number of MIR local variables corresponding to fn arguments (used to assert that we don't drop those)
1441
- fn build_scope_drops < ' tcx > (
1488
+ fn build_scope_drops < ' tcx , F > (
1442
1489
cfg : & mut CFG < ' tcx > ,
1443
1490
unwind_drops : & mut DropTree ,
1491
+ coroutine_drops : & mut DropTree ,
1444
1492
scope : & Scope ,
1445
1493
block : BasicBlock ,
1446
1494
unwind_to : DropIdx ,
1495
+ dropline_to : Option < DropIdx > ,
1447
1496
storage_dead_on_unwind : bool ,
1448
1497
arg_count : usize ,
1449
- ) -> BlockAnd < ( ) > {
1450
- debug ! ( "build_scope_drops({:?} -> {:?})" , block, scope) ;
1498
+ is_async_drop : F ,
1499
+ ) -> BlockAnd < ( ) >
1500
+ where
1501
+ F : Fn ( Local ) -> bool ,
1502
+ {
1503
+ debug ! ( "build_scope_drops({:?} -> {:?}), dropline_to={:?}" , block, scope, dropline_to) ;
1451
1504
1452
1505
// Build up the drops in evaluation order. The end result will
1453
1506
// look like:
@@ -1480,6 +1533,9 @@ fn build_scope_drops<'tcx>(
1480
1533
// will branch to `drops[n]`.
1481
1534
let mut block = block;
1482
1535
1536
+ // `dropline_to` indicates what needs to be dropped should coroutine drop occur.
1537
+ let mut dropline_to = dropline_to;
1538
+
1483
1539
for drop_data in scope. drops . iter ( ) . rev ( ) {
1484
1540
let source_info = drop_data. source_info ;
1485
1541
let local = drop_data. local ;
@@ -1496,6 +1552,12 @@ fn build_scope_drops<'tcx>(
1496
1552
debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . data. kind, drop_data. kind) ;
1497
1553
unwind_to = unwind_drops. drops [ unwind_to] . next ;
1498
1554
1555
+ if let Some ( idx) = dropline_to {
1556
+ debug_assert_eq ! ( coroutine_drops. drops[ idx] . data. local, drop_data. local) ;
1557
+ debug_assert_eq ! ( coroutine_drops. drops[ idx] . data. kind, drop_data. kind) ;
1558
+ dropline_to = Some ( coroutine_drops. drops [ idx] . next ) ;
1559
+ }
1560
+
1499
1561
// If the operand has been moved, and we are not on an unwind
1500
1562
// path, then don't generate the drop. (We only take this into
1501
1563
// account for non-unwind paths so as not to disturb the
@@ -1505,6 +1567,12 @@ fn build_scope_drops<'tcx>(
1505
1567
}
1506
1568
1507
1569
unwind_drops. add_entry_point ( block, unwind_to) ;
1570
+ if let Some ( to) = dropline_to
1571
+ && is_async_drop ( local)
1572
+ {
1573
+ coroutine_drops. add_entry_point ( block, to) ;
1574
+ }
1575
+
1508
1576
let next = cfg. start_new_block ( ) ;
1509
1577
cfg. terminate (
1510
1578
block,
@@ -1562,6 +1630,11 @@ fn build_scope_drops<'tcx>(
1562
1630
debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . data. kind, drop_data. kind) ;
1563
1631
unwind_to = unwind_drops. drops [ unwind_to] . next ;
1564
1632
}
1633
+ if let Some ( idx) = dropline_to {
1634
+ debug_assert_eq ! ( coroutine_drops. drops[ idx] . data. local, drop_data. local) ;
1635
+ debug_assert_eq ! ( coroutine_drops. drops[ idx] . data. kind, drop_data. kind) ;
1636
+ dropline_to = Some ( coroutine_drops. drops [ idx] . next ) ;
1637
+ }
1565
1638
// Only temps and vars need their storage dead.
1566
1639
assert ! ( local. index( ) > arg_count) ;
1567
1640
cfg. push ( block, Statement { source_info, kind : StatementKind :: StorageDead ( local) } ) ;
@@ -1620,6 +1693,39 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
1620
1693
}
1621
1694
}
1622
1695
}
1696
+ // Link the exit drop tree to dropline drop tree (coroutine drop path) for async drops
1697
+ if is_coroutine
1698
+ && drops. drops . iter ( ) . any ( |DropNode { data, next : _ } | {
1699
+ data. kind == DropKind :: Value && self . is_async_drop ( data. local )
1700
+ } )
1701
+ {
1702
+ let dropline_target = self . diverge_dropline_target ( else_scope, span) ;
1703
+ let mut dropline_indices = IndexVec :: from_elem_n ( dropline_target, 1 ) ;
1704
+ for ( drop_idx, drop_data) in drops. drops . iter_enumerated ( ) . skip ( 1 ) {
1705
+ match drop_data. data . kind {
1706
+ DropKind :: Storage => {
1707
+ let coroutine_drop = self
1708
+ . scopes
1709
+ . coroutine_drops
1710
+ . add_drop ( drop_data. data , dropline_indices[ drop_data. next ] ) ;
1711
+ dropline_indices. push ( coroutine_drop) ;
1712
+ }
1713
+ DropKind :: Value => {
1714
+ let coroutine_drop = self
1715
+ . scopes
1716
+ . coroutine_drops
1717
+ . add_drop ( drop_data. data , dropline_indices[ drop_data. next ] ) ;
1718
+ if self . is_async_drop ( drop_data. data . local ) {
1719
+ self . scopes . coroutine_drops . add_entry_point (
1720
+ blocks[ drop_idx] . unwrap ( ) ,
1721
+ dropline_indices[ drop_data. next ] ,
1722
+ ) ;
1723
+ }
1724
+ dropline_indices. push ( coroutine_drop) ;
1725
+ }
1726
+ }
1727
+ }
1728
+ }
1623
1729
blocks[ ROOT_NODE ] . map ( BasicBlock :: unit)
1624
1730
}
1625
1731
@@ -1665,9 +1771,11 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
1665
1771
// to be captured by the coroutine. I'm not sure how important this
1666
1772
// optimization is, but it is here.
1667
1773
for ( drop_idx, drop_node) in drops. drops . iter_enumerated ( ) {
1668
- if let DropKind :: Value = drop_node. data . kind {
1774
+ if let DropKind :: Value = drop_node. data . kind
1775
+ && let Some ( bb) = blocks[ drop_idx]
1776
+ {
1669
1777
debug_assert ! ( drop_node. next < drops. drops. next_index( ) ) ;
1670
- drops. entry_points . push ( ( drop_node. next , blocks [ drop_idx ] . unwrap ( ) ) ) ;
1778
+ drops. entry_points . push ( ( drop_node. next , bb ) ) ;
1671
1779
}
1672
1780
}
1673
1781
Self :: build_unwind_tree ( cfg, drops, fn_span, resume_block) ;
@@ -1721,6 +1829,8 @@ impl<'tcx> DropTreeBuilder<'tcx> for CoroutineDrop {
1721
1829
let term = cfg. block_data_mut ( from) . terminator_mut ( ) ;
1722
1830
if let TerminatorKind :: Yield { ref mut drop, .. } = term. kind {
1723
1831
* drop = Some ( to) ;
1832
+ } else if let TerminatorKind :: Drop { ref mut drop, .. } = term. kind {
1833
+ * drop = Some ( to) ;
1724
1834
} else {
1725
1835
span_bug ! (
1726
1836
term. source_info. span,
0 commit comments