@@ -1396,12 +1396,23 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
1396
1396
}
1397
1397
1398
1398
/// Builds drops for `pop_scope` and `leave_top_scope`.
1399
+ ///
1400
+ /// # Parameters
1401
+ ///
1402
+ /// * `unwind_drops`, the drop tree data structure storing what needs to be cleaned up if unwind occurs
1403
+ /// * `scope`, describes the drops that will occur on exiting the scope in regular execution
1404
+ /// * `block`, the block to branch to once drops are complete (assuming no unwind occurs)
1405
+ /// * `unwind_to`, describes the drops that would occur at this point in the code if a
1406
+ /// panic occurred (a subset of the drops in `scope`, since we sometimes elide StorageDead and other
1407
+ /// instructions on unwinding)
1408
+ /// * `storage_dead_on_unwind`, if true, then we should emit `StorageDead` even when unwinding
1409
+ /// * `arg_count`, number of MIR local variables corresponding to fn arguments (used to assert that we don't drop those)
1399
1410
fn build_scope_drops < ' tcx > (
1400
1411
cfg : & mut CFG < ' tcx > ,
1401
1412
unwind_drops : & mut DropTree ,
1402
1413
scope : & Scope ,
1403
- mut block : BasicBlock ,
1404
- mut unwind_to : DropIdx ,
1414
+ block : BasicBlock ,
1415
+ unwind_to : DropIdx ,
1405
1416
storage_dead_on_unwind : bool ,
1406
1417
arg_count : usize ,
1407
1418
) -> BlockAnd < ( ) > {
@@ -1425,6 +1436,18 @@ fn build_scope_drops<'tcx>(
1425
1436
// statement. For other functions we don't worry about StorageDead. The
1426
1437
// drops for the unwind path should have already been generated by
1427
1438
// `diverge_cleanup_gen`.
1439
+
1440
+ // `unwind_to` indicates what needs to be dropped should unwinding occur.
1441
+ // This is a subset of what needs to be dropped when exiting the scope.
1442
+ // As we unwind the scope, we will also move `unwind_to` backwards to match,
1443
+ // so that we can use it should a destructor panic.
1444
+ let mut unwind_to = unwind_to;
1445
+
1446
+ // The block that we should jump to after drops complete. We start by building the final drop (`drops[n]`
1447
+ // in the diagram above) and then build the drops (e.g., `drop[1]`, `drop[0]`) that come before it.
1448
+ // block begins as the successor of `drops[n]` and then becomes `drops[n]` so that `drops[n-1]`
1449
+ // will branch to `drops[n]`.
1450
+ let mut block = block;
1428
1451
1429
1452
for drop_data in scope. drops . iter ( ) . rev ( ) {
1430
1453
let source_info = drop_data. source_info ;
@@ -1435,6 +1458,9 @@ fn build_scope_drops<'tcx>(
1435
1458
// `unwind_to` should drop the value that we're about to
1436
1459
// schedule. If dropping this value panics, then we continue
1437
1460
// with the *next* value on the unwind path.
1461
+ //
1462
+ // We adjust this BEFORE we create the drop (e.g., `drops[n]`)
1463
+ // because `drops[n]` should unwind to `drops[n-1]`.
1438
1464
debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . data. local, drop_data. local) ;
1439
1465
debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . data. kind, drop_data. kind) ;
1440
1466
unwind_to = unwind_drops. drops [ unwind_to] . next ;
@@ -1466,6 +1492,11 @@ fn build_scope_drops<'tcx>(
1466
1492
continue ;
1467
1493
}
1468
1494
1495
+ // As in the `DropKind::Storage` case below:
1496
+ // normally lint-related drops are not emitted for unwind,
1497
+ // so we can just leave `unwind_to` unmodified, but in some
1498
+ // cases we emit things ALSO on the unwind path, so we need to adjust
1499
+ // `unwind_to` in that case.
1469
1500
if storage_dead_on_unwind {
1470
1501
debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . data. local, drop_data. local) ;
1471
1502
debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . data. kind, drop_data. kind) ;
@@ -1481,6 +1512,11 @@ fn build_scope_drops<'tcx>(
1481
1512
} ) ;
1482
1513
}
1483
1514
DropKind :: Storage => {
1515
+ // Ordinarily, storage-dead nodes are not emitted on unwind, so we don't
1516
+ // need to adjust `unwind_to` on this path. However, in some specific cases
1517
+ // we *do* emit storage-dead nodes on the unwind path, and in that case now that
1518
+ // the storage-dead has completed, we need to adjust the `unwind_to` pointer
1519
+ // so that any future drops we emit will not register storage-dead.
1484
1520
if storage_dead_on_unwind {
1485
1521
debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . data. local, drop_data. local) ;
1486
1522
debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . data. kind, drop_data. kind) ;
0 commit comments