@@ -1538,13 +1538,9 @@ impl<T, A: Allocator> Vec<T, A> {
1538
1538
1539
1539
impl < ' a , T , A : core:: alloc:: Allocator > Drop for FillGapOnDrop < ' a , T , A > {
1540
1540
fn drop ( & mut self ) {
1541
- /* This code gets executed either at the end of `dedup_by` or
1542
- * when `same_bucket` panics */
1541
+ /* This code gets executed when `same_bucket` panics */
1543
1542
1544
- /* SAFETY (if finishing successfully): self.read == len, so
1545
- * no data is copied and length is set correctly */
1546
-
1547
- /* SAFETY (if panicing): invariant guarantees that `read - write`
1543
+ /* SAFETY: invariant guarantees that `read - write`
1548
1544
* and `len - read` never overflow and that the copy is always
1549
1545
* in-bounds. */
1550
1546
unsafe {
@@ -1553,7 +1549,7 @@ impl<T, A: Allocator> Vec<T, A> {
1553
1549
1554
1550
/* How many items were left when `same_bucket` paniced.
1555
1551
* Basically vec[read..].len() */
1556
- let items_left = len - self . read ;
1552
+ let items_left = len. wrapping_sub ( self . read ) ;
1557
1553
1558
1554
/* Pointer to first item in vec[write..write+items_left] slice */
1559
1555
let dropped_ptr = ptr. add ( self . write ) ;
@@ -1566,15 +1562,14 @@ impl<T, A: Allocator> Vec<T, A> {
1566
1562
1567
1563
/* How many items have been already dropped
1568
1564
* Basically vec[read..write].len() */
1569
- let dropped = self . read - self . write ;
1565
+ let dropped = self . read . wrapping_sub ( self . write ) ;
1570
1566
1571
1567
self . vec . set_len ( len - dropped) ;
1572
1568
}
1573
1569
}
1574
1570
}
1575
1571
1576
1572
let mut gap = FillGapOnDrop { read : 1 , write : 1 , vec : self } ;
1577
-
1578
1573
let ptr = gap. vec . as_mut_ptr ( ) ;
1579
1574
1580
1575
/* Drop items while going through Vec, it should be more efficient than
@@ -1593,8 +1588,9 @@ impl<T, A: Allocator> Vec<T, A> {
1593
1588
} else {
1594
1589
let write_ptr = ptr. add ( gap. write ) ;
1595
1590
1596
- /* Looks like doing just `copy` can be faster than
1597
- * conditional `copy_nonoverlapping` */
1591
+ /* Because `read_ptr` can be equal to `write_ptr`, we either
1592
+ * have to use `copy` or conditional `copy_nonoverlapping`.
1593
+ * Looks like the first option is faster. */
1598
1594
ptr:: copy ( read_ptr, write_ptr, 1 ) ;
1599
1595
1600
1596
/* We have filled that place, so go further */
0 commit comments