@@ -209,7 +209,7 @@ const ROOT_NODE: DropIdx = DropIdx::ZERO;
209
209
#[ derive( Debug ) ]
210
210
struct DropTree {
211
211
/// Nodes in the drop tree, containing drop data and a link to the next node.
212
- drops : IndexVec < DropIdx , DropNode > ,
212
+ drop_nodes : IndexVec < DropIdx , DropNode > ,
213
213
/// Map for finding the index of an existing node, given its contents.
214
214
existing_drops_map : FxHashMap < DropNodeKey , DropIdx > ,
215
215
/// Edges into the `DropTree` that need to be added once it's lowered.
@@ -230,7 +230,6 @@ struct DropNode {
230
230
struct DropNodeKey {
231
231
next : DropIdx ,
232
232
local : Local ,
233
- kind : DropKind ,
234
233
}
235
234
236
235
impl Scope {
@@ -278,8 +277,8 @@ impl DropTree {
278
277
let fake_source_info = SourceInfo :: outermost ( DUMMY_SP ) ;
279
278
let fake_data =
280
279
DropData { source_info : fake_source_info, local : Local :: MAX , kind : DropKind :: Storage } ;
281
- let drops = IndexVec :: from_raw ( vec ! [ DropNode { data: fake_data, next: DropIdx :: MAX } ] ) ;
282
- Self { drops , entry_points : Vec :: new ( ) , existing_drops_map : FxHashMap :: default ( ) }
280
+ let drop_nodes = IndexVec :: from_raw ( vec ! [ DropNode { data: fake_data, next: DropIdx :: MAX } ] ) ;
281
+ Self { drop_nodes , entry_points : Vec :: new ( ) , existing_drops_map : FxHashMap :: default ( ) }
283
282
}
284
283
285
284
/// Adds a node to the drop tree, consisting of drop data and the index of
@@ -288,20 +287,20 @@ impl DropTree {
288
287
/// If there is already an equivalent node in the tree, nothing is added, and
289
288
/// that node's index is returned. Otherwise, the new node's index is returned.
290
289
fn add_drop ( & mut self , data : DropData , next : DropIdx ) -> DropIdx {
291
- let drops = & mut self . drops ;
290
+ let drop_nodes = & mut self . drop_nodes ;
292
291
* self
293
292
. existing_drops_map
294
- . entry ( DropNodeKey { next, local : data. local , kind : data . kind } )
293
+ . entry ( DropNodeKey { next, local : data. local } )
295
294
// Create a new node, and also add its index to the map.
296
- . or_insert_with ( || drops . push ( DropNode { data, next } ) )
295
+ . or_insert_with ( || drop_nodes . push ( DropNode { data, next } ) )
297
296
}
298
297
299
298
/// Registers `from` as an entry point to this drop tree, at `to`.
300
299
///
301
300
/// During [`Self::build_mir`], `from` will be linked to the corresponding
302
301
/// block within the drop tree.
303
302
fn add_entry_point ( & mut self , from : BasicBlock , to : DropIdx ) {
304
- debug_assert ! ( to < self . drops . next_index( ) ) ;
303
+ debug_assert ! ( to < self . drop_nodes . next_index( ) ) ;
305
304
self . entry_points . push ( ( to, from) ) ;
306
305
}
307
306
@@ -341,10 +340,10 @@ impl DropTree {
341
340
Own ,
342
341
}
343
342
344
- let mut blocks = IndexVec :: from_elem ( None , & self . drops ) ;
343
+ let mut blocks = IndexVec :: from_elem ( None , & self . drop_nodes ) ;
345
344
blocks[ ROOT_NODE ] = root_node;
346
345
347
- let mut needs_block = IndexVec :: from_elem ( Block :: None , & self . drops ) ;
346
+ let mut needs_block = IndexVec :: from_elem ( Block :: None , & self . drop_nodes ) ;
348
347
if root_node. is_some ( ) {
349
348
// In some cases (such as drops for `continue`) the root node
350
349
// already has a block. In this case, make sure that we don't
@@ -356,7 +355,7 @@ impl DropTree {
356
355
let entry_points = & mut self . entry_points ;
357
356
entry_points. sort ( ) ;
358
357
359
- for ( drop_idx, drop_node) in self . drops . iter_enumerated ( ) . rev ( ) {
358
+ for ( drop_idx, drop_node) in self . drop_nodes . iter_enumerated ( ) . rev ( ) {
360
359
if entry_points. last ( ) . is_some_and ( |entry_point| entry_point. 0 == drop_idx) {
361
360
let block = * blocks[ drop_idx] . get_or_insert_with ( || T :: make_block ( cfg) ) ;
362
361
needs_block[ drop_idx] = Block :: Own ;
@@ -396,7 +395,7 @@ impl DropTree {
396
395
cfg : & mut CFG < ' tcx > ,
397
396
blocks : & IndexSlice < DropIdx , Option < BasicBlock > > ,
398
397
) {
399
- for ( drop_idx, drop_node) in self . drops . iter_enumerated ( ) . rev ( ) {
398
+ for ( drop_idx, drop_node) in self . drop_nodes . iter_enumerated ( ) . rev ( ) {
400
399
let Some ( block) = blocks[ drop_idx] else { continue } ;
401
400
match drop_node. data . kind {
402
401
DropKind :: Value => {
@@ -726,11 +725,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
726
725
drops
727
726
} ;
728
727
729
- let drop_idx = self . scopes . scopes [ scope_index + 1 ..]
730
- . iter ( )
731
- . flat_map ( |scope| & scope. drops )
732
- . fold ( ROOT_NODE , |drop_idx, & drop| drops. add_drop ( drop, drop_idx) ) ;
733
-
728
+ let mut drop_idx = ROOT_NODE ;
729
+ for scope in & self . scopes . scopes [ scope_index + 1 ..] {
730
+ for drop in & scope. drops {
731
+ drop_idx = drops. add_drop ( * drop, drop_idx) ;
732
+ }
733
+ }
734
734
drops. add_entry_point ( block, drop_idx) ;
735
735
736
736
// `build_drop_trees` doesn't have access to our source_info, so we
@@ -829,9 +829,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
829
829
// `unwind_to` should drop the value that we're about to
830
830
// schedule. If dropping this value panics, then we continue
831
831
// with the *next* value on the unwind path.
832
- debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . data. local, drop_data. local) ;
833
- debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . data. kind, drop_data. kind) ;
834
- unwind_to = unwind_drops. drops [ unwind_to] . next ;
832
+ debug_assert_eq ! (
833
+ unwind_drops. drop_nodes[ unwind_to] . data. local,
834
+ drop_data. local
835
+ ) ;
836
+ debug_assert_eq ! (
837
+ unwind_drops. drop_nodes[ unwind_to] . data. kind,
838
+ drop_data. kind
839
+ ) ;
840
+ unwind_to = unwind_drops. drop_nodes [ unwind_to] . next ;
835
841
836
842
let mut unwind_entry_point = unwind_to;
837
843
@@ -1551,14 +1557,14 @@ where
1551
1557
//
1552
1558
// We adjust this BEFORE we create the drop (e.g., `drops[n]`)
1553
1559
// because `drops[n]` should unwind to `drops[n-1]`.
1554
- debug_assert_eq ! ( unwind_drops. drops [ unwind_to] . data. local, drop_data. local) ;
1555
- debug_assert_eq ! ( unwind_drops. drops [ unwind_to] . data. kind, drop_data. kind) ;
1556
- unwind_to = unwind_drops. drops [ unwind_to] . next ;
1560
+ debug_assert_eq ! ( unwind_drops. drop_nodes [ unwind_to] . data. local, drop_data. local) ;
1561
+ debug_assert_eq ! ( unwind_drops. drop_nodes [ unwind_to] . data. kind, drop_data. kind) ;
1562
+ unwind_to = unwind_drops. drop_nodes [ unwind_to] . next ;
1557
1563
1558
1564
if let Some ( idx) = dropline_to {
1559
- debug_assert_eq ! ( coroutine_drops. drops [ idx] . data. local, drop_data. local) ;
1560
- debug_assert_eq ! ( coroutine_drops. drops [ idx] . data. kind, drop_data. kind) ;
1561
- dropline_to = Some ( coroutine_drops. drops [ idx] . next ) ;
1565
+ debug_assert_eq ! ( coroutine_drops. drop_nodes [ idx] . data. local, drop_data. local) ;
1566
+ debug_assert_eq ! ( coroutine_drops. drop_nodes [ idx] . data. kind, drop_data. kind) ;
1567
+ dropline_to = Some ( coroutine_drops. drop_nodes [ idx] . next ) ;
1562
1568
}
1563
1569
1564
1570
// If the operand has been moved, and we are not on an unwind
@@ -1598,9 +1604,12 @@ where
1598
1604
// cases we emit things ALSO on the unwind path, so we need to adjust
1599
1605
// `unwind_to` in that case.
1600
1606
if storage_dead_on_unwind {
1601
- debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . data. local, drop_data. local) ;
1602
- debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . data. kind, drop_data. kind) ;
1603
- unwind_to = unwind_drops. drops [ unwind_to] . next ;
1607
+ debug_assert_eq ! (
1608
+ unwind_drops. drop_nodes[ unwind_to] . data. local,
1609
+ drop_data. local
1610
+ ) ;
1611
+ debug_assert_eq ! ( unwind_drops. drop_nodes[ unwind_to] . data. kind, drop_data. kind) ;
1612
+ unwind_to = unwind_drops. drop_nodes [ unwind_to] . next ;
1604
1613
}
1605
1614
1606
1615
// If the operand has been moved, and we are not on an unwind
@@ -1629,14 +1638,17 @@ where
1629
1638
// the storage-dead has completed, we need to adjust the `unwind_to` pointer
1630
1639
// so that any future drops we emit will not register storage-dead.
1631
1640
if storage_dead_on_unwind {
1632
- debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . data. local, drop_data. local) ;
1633
- debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . data. kind, drop_data. kind) ;
1634
- unwind_to = unwind_drops. drops [ unwind_to] . next ;
1641
+ debug_assert_eq ! (
1642
+ unwind_drops. drop_nodes[ unwind_to] . data. local,
1643
+ drop_data. local
1644
+ ) ;
1645
+ debug_assert_eq ! ( unwind_drops. drop_nodes[ unwind_to] . data. kind, drop_data. kind) ;
1646
+ unwind_to = unwind_drops. drop_nodes [ unwind_to] . next ;
1635
1647
}
1636
1648
if let Some ( idx) = dropline_to {
1637
- debug_assert_eq ! ( coroutine_drops. drops [ idx] . data. local, drop_data. local) ;
1638
- debug_assert_eq ! ( coroutine_drops. drops [ idx] . data. kind, drop_data. kind) ;
1639
- dropline_to = Some ( coroutine_drops. drops [ idx] . next ) ;
1649
+ debug_assert_eq ! ( coroutine_drops. drop_nodes [ idx] . data. local, drop_data. local) ;
1650
+ debug_assert_eq ! ( coroutine_drops. drop_nodes [ idx] . data. kind, drop_data. kind) ;
1651
+ dropline_to = Some ( coroutine_drops. drop_nodes [ idx] . next ) ;
1640
1652
}
1641
1653
// Only temps and vars need their storage dead.
1642
1654
assert ! ( local. index( ) > arg_count) ;
@@ -1663,10 +1675,10 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
1663
1675
let is_coroutine = self . coroutine . is_some ( ) ;
1664
1676
1665
1677
// Link the exit drop tree to unwind drop tree.
1666
- if drops. drops . iter ( ) . any ( |drop_node| drop_node. data . kind == DropKind :: Value ) {
1678
+ if drops. drop_nodes . iter ( ) . any ( |drop_node| drop_node. data . kind == DropKind :: Value ) {
1667
1679
let unwind_target = self . diverge_cleanup_target ( else_scope, span) ;
1668
1680
let mut unwind_indices = IndexVec :: from_elem_n ( unwind_target, 1 ) ;
1669
- for ( drop_idx, drop_node) in drops. drops . iter_enumerated ( ) . skip ( 1 ) {
1681
+ for ( drop_idx, drop_node) in drops. drop_nodes . iter_enumerated ( ) . skip ( 1 ) {
1670
1682
match drop_node. data . kind {
1671
1683
DropKind :: Storage | DropKind :: ForLint => {
1672
1684
if is_coroutine {
@@ -1695,35 +1707,29 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
1695
1707
}
1696
1708
// Link the exit drop tree to dropline drop tree (coroutine drop path) for async drops
1697
1709
if is_coroutine
1698
- && drops. drops . iter ( ) . any ( |DropNode { data, next : _ } | {
1710
+ && drops. drop_nodes . iter ( ) . any ( |DropNode { data, next : _ } | {
1699
1711
data. kind == DropKind :: Value && self . is_async_drop ( data. local )
1700
1712
} )
1701
1713
{
1702
1714
let dropline_target = self . diverge_dropline_target ( else_scope, span) ;
1703
1715
let mut dropline_indices = IndexVec :: from_elem_n ( dropline_target, 1 ) ;
1704
- for ( drop_idx, drop_data) in drops. drops . iter_enumerated ( ) . skip ( 1 ) {
1716
+ for ( drop_idx, drop_data) in drops. drop_nodes . iter_enumerated ( ) . skip ( 1 ) {
1717
+ let coroutine_drop = self
1718
+ . scopes
1719
+ . coroutine_drops
1720
+ . add_drop ( drop_data. data , dropline_indices[ drop_data. next ] ) ;
1705
1721
match drop_data. data . kind {
1706
- DropKind :: Storage | DropKind :: ForLint => {
1707
- let coroutine_drop = self
1708
- . scopes
1709
- . coroutine_drops
1710
- . add_drop ( drop_data. data , dropline_indices[ drop_data. next ] ) ;
1711
- dropline_indices. push ( coroutine_drop) ;
1712
- }
1722
+ DropKind :: Storage | DropKind :: ForLint => { }
1713
1723
DropKind :: Value => {
1714
- let coroutine_drop = self
1715
- . scopes
1716
- . coroutine_drops
1717
- . add_drop ( drop_data. data , dropline_indices[ drop_data. next ] ) ;
1718
1724
if self . is_async_drop ( drop_data. data . local ) {
1719
1725
self . scopes . coroutine_drops . add_entry_point (
1720
1726
blocks[ drop_idx] . unwrap ( ) ,
1721
1727
dropline_indices[ drop_data. next ] ,
1722
1728
) ;
1723
1729
}
1724
- dropline_indices. push ( coroutine_drop) ;
1725
1730
}
1726
1731
}
1732
+ dropline_indices. push ( coroutine_drop) ;
1727
1733
}
1728
1734
}
1729
1735
blocks[ ROOT_NODE ] . map ( BasicBlock :: unit)
@@ -1769,11 +1775,11 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
1769
1775
// prevent drop elaboration from creating drop flags that would have
1770
1776
// to be captured by the coroutine. I'm not sure how important this
1771
1777
// optimization is, but it is here.
1772
- for ( drop_idx, drop_node) in drops. drops . iter_enumerated ( ) {
1778
+ for ( drop_idx, drop_node) in drops. drop_nodes . iter_enumerated ( ) {
1773
1779
if let DropKind :: Value = drop_node. data . kind
1774
1780
&& let Some ( bb) = blocks[ drop_idx]
1775
1781
{
1776
- debug_assert ! ( drop_node. next < drops. drops . next_index( ) ) ;
1782
+ debug_assert ! ( drop_node. next < drops. drop_nodes . next_index( ) ) ;
1777
1783
drops. entry_points . push ( ( drop_node. next , bb) ) ;
1778
1784
}
1779
1785
}
0 commit comments