3
3
use std:: borrow:: Cow ;
4
4
use std:: convert:: TryFrom ;
5
5
use std:: iter;
6
- use std:: ops:: { Deref , DerefMut , Range } ;
6
+ use std:: ops:: { Deref , Range } ;
7
7
use std:: ptr;
8
8
9
9
use rustc_ast:: Mutability ;
@@ -25,7 +25,7 @@ use crate::ty;
25
25
/// module provides higher-level access.
26
26
#[ derive( Clone , Debug , Eq , PartialEq , PartialOrd , Ord , Hash , TyEncodable , TyDecodable ) ]
27
27
#[ derive( HashStable ) ]
28
- pub struct Allocation < Tag = ( ) , Extra = ( ) > {
28
+ pub struct Allocation < Tag = AllocId , Extra = ( ) > {
29
29
/// The actual bytes of the allocation.
30
30
/// Note that the bytes of a pointer represent the offset of the pointer.
31
31
bytes : Vec < u8 > ,
@@ -154,26 +154,32 @@ impl<Tag> Allocation<Tag> {
154
154
}
155
155
}
156
156
157
- impl Allocation < ( ) > {
158
- /// Add Tag and Extra fields
159
- pub fn with_tags_and_extra < T , E > (
157
+ impl Allocation {
158
+ /// Convert Tag and add Extra fields
159
+ pub fn convert_tag_add_extra < Tag , Extra > (
160
160
self ,
161
- mut tagger : impl FnMut ( AllocId ) -> T ,
162
- extra : E ,
163
- ) -> Allocation < T , E > {
161
+ cx : & impl HasDataLayout ,
162
+ extra : Extra ,
163
+ mut tagger : impl FnMut ( Pointer < AllocId > ) -> Pointer < Tag > ,
164
+ ) -> Allocation < Tag , Extra > {
165
+ // Compute new pointer tags, which also adjusts the bytes.
166
+ let mut bytes = self . bytes ;
167
+ let mut new_relocations = Vec :: with_capacity ( self . relocations . 0 . len ( ) ) ;
168
+ let ptr_size = cx. data_layout ( ) . pointer_size . bytes_usize ( ) ;
169
+ let endian = cx. data_layout ( ) . endian ;
170
+ for & ( offset, alloc_id) in self . relocations . iter ( ) {
171
+ let idx = offset. bytes_usize ( ) ;
172
+ let ptr_bytes = & mut bytes[ idx..idx + ptr_size] ;
173
+ let bits = read_target_uint ( endian, ptr_bytes) . unwrap ( ) ;
174
+ let ( ptr_tag, ptr_offset) =
175
+ tagger ( Pointer :: new ( alloc_id, Size :: from_bytes ( bits) ) ) . into_parts ( ) ;
176
+ write_target_uint ( endian, ptr_bytes, ptr_offset. bytes ( ) . into ( ) ) . unwrap ( ) ;
177
+ new_relocations. push ( ( offset, ptr_tag) ) ;
178
+ }
179
+ // Create allocation.
164
180
Allocation {
165
- bytes : self . bytes ,
166
- relocations : Relocations :: from_presorted (
167
- self . relocations
168
- . iter ( )
169
- // The allocations in the relocations (pointers stored *inside* this allocation)
170
- // all get the base pointer tag.
171
- . map ( |& ( offset, ( ( ) , alloc) ) | {
172
- let tag = tagger ( alloc) ;
173
- ( offset, ( tag, alloc) )
174
- } )
175
- . collect ( ) ,
176
- ) ,
181
+ bytes,
182
+ relocations : Relocations :: from_presorted ( new_relocations) ,
177
183
init_mask : self . init_mask ,
178
184
align : self . align ,
179
185
mutability : self . mutability ,
@@ -279,6 +285,9 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
279
285
/// A raw pointer variant of `get_bytes_mut` that avoids invalidating existing aliases into this memory.
280
286
pub fn get_bytes_mut_ptr ( & mut self , cx : & impl HasDataLayout , range : AllocRange ) -> * mut [ u8 ] {
281
287
self . mark_init ( range, true ) ;
288
+ // This also clears relocations that just overlap with the written range. So writing to some
289
+ // byte can de-initialize its neighbors! See
290
+ // <https://github.com/rust-lang/rust/issues/87184> for details.
282
291
self . clear_relocations ( cx, range) ;
283
292
284
293
assert ! ( range. end( ) . bytes_usize( ) <= self . bytes. len( ) ) ; // need to do our own bounds-check
@@ -321,7 +330,11 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
321
330
cx : & impl HasDataLayout ,
322
331
range : AllocRange ,
323
332
) -> AllocResult < ScalarMaybeUninit < Tag > > {
324
- // `get_bytes_unchecked` tests relocation edges.
333
+ // `get_bytes_with_uninit_and_ptr` tests relocation edges.
334
+ // We deliberately error when loading data that partially has provenance, or partially
335
+ // initialized data (that's the check below), into a scalar. The LLVM semantics of this are
336
+ // unclear so we are conservative. See <https://github.com/rust-lang/rust/issues/69488> for
337
+ // further discussion.
325
338
let bytes = self . get_bytes_with_uninit_and_ptr ( cx, range) ?;
326
339
// Uninit check happens *after* we established that the alignment is correct.
327
340
// We must not return `Ok()` for unaligned pointers!
@@ -339,9 +352,9 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
339
352
self . check_relocations ( cx, range) ?;
340
353
} else {
341
354
// Maybe a pointer.
342
- if let Some ( & ( tag , alloc_id ) ) = self . relocations . get ( & range. start ) {
343
- let ptr = Pointer :: new_with_tag ( alloc_id , Size :: from_bytes ( bits) , tag ) ;
344
- return Ok ( ScalarMaybeUninit :: Scalar ( ptr. into ( ) ) ) ;
355
+ if let Some ( & prov ) = self . relocations . get ( & range. start ) {
356
+ let ptr = Pointer :: new ( prov , Size :: from_bytes ( bits) ) ;
357
+ return Ok ( ScalarMaybeUninit :: from_pointer ( ptr, cx ) ) ;
345
358
}
346
359
}
347
360
// We don't. Just return the bits.
@@ -371,18 +384,23 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
371
384
}
372
385
} ;
373
386
374
- let bytes = match val. to_bits_or_ptr ( range. size , cx) {
375
- Err ( val) => u128:: from ( val. offset . bytes ( ) ) ,
376
- Ok ( data) => data,
387
+ // `to_bits_or_ptr_internal` is the right method because we just want to store this data
388
+ // as-is into memory.
389
+ let ( bytes, provenance) = match val. to_bits_or_ptr_internal ( range. size ) {
390
+ Err ( val) => {
391
+ let ( provenance, offset) = val. into_parts ( ) ;
392
+ ( u128:: from ( offset. bytes ( ) ) , Some ( provenance) )
393
+ }
394
+ Ok ( data) => ( data, None ) ,
377
395
} ;
378
396
379
397
let endian = cx. data_layout ( ) . endian ;
380
398
let dst = self . get_bytes_mut ( cx, range) ;
381
399
write_target_uint ( endian, dst, bytes) . unwrap ( ) ;
382
400
383
401
// See if we have to also write a relocation.
384
- if let Scalar :: Ptr ( val ) = val {
385
- self . relocations . insert ( range. start , ( val . tag , val . alloc_id ) ) ;
402
+ if let Some ( provenance ) = provenance {
403
+ self . relocations . 0 . insert ( range. start , provenance ) ;
386
404
}
387
405
388
406
Ok ( ( ) )
@@ -392,11 +410,7 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
392
410
/// Relocations.
393
411
impl < Tag : Copy , Extra > Allocation < Tag , Extra > {
394
412
/// Returns all relocations overlapping with the given pointer-offset pair.
395
- pub fn get_relocations (
396
- & self ,
397
- cx : & impl HasDataLayout ,
398
- range : AllocRange ,
399
- ) -> & [ ( Size , ( Tag , AllocId ) ) ] {
413
+ pub fn get_relocations ( & self , cx : & impl HasDataLayout , range : AllocRange ) -> & [ ( Size , Tag ) ] {
400
414
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
401
415
// the beginning of this range.
402
416
let start = range. start . bytes ( ) . saturating_sub ( cx. data_layout ( ) . pointer_size . bytes ( ) - 1 ) ;
@@ -446,7 +460,7 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
446
460
}
447
461
448
462
// Forget all the relocations.
449
- self . relocations . remove_range ( first..last) ;
463
+ self . relocations . 0 . remove_range ( first..last) ;
450
464
}
451
465
452
466
/// Errors if there are relocations overlapping with the edges of the
@@ -582,39 +596,33 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
582
596
}
583
597
}
584
598
585
- /// Relocations.
599
+ /// " Relocations" stores the provenance information of pointers stored in memory .
586
600
#[ derive( Clone , PartialEq , Eq , PartialOrd , Ord , Hash , Debug , TyEncodable , TyDecodable ) ]
587
- pub struct Relocations < Tag = ( ) , Id = AllocId > ( SortedMap < Size , ( Tag , Id ) > ) ;
601
+ pub struct Relocations < Tag = AllocId > ( SortedMap < Size , Tag > ) ;
588
602
589
- impl < Tag , Id > Relocations < Tag , Id > {
603
+ impl < Tag > Relocations < Tag > {
590
604
pub fn new ( ) -> Self {
591
605
Relocations ( SortedMap :: new ( ) )
592
606
}
593
607
594
608
// The caller must guarantee that the given relocations are already sorted
595
609
// by address and contain no duplicates.
596
- pub fn from_presorted ( r : Vec < ( Size , ( Tag , Id ) ) > ) -> Self {
610
+ pub fn from_presorted ( r : Vec < ( Size , Tag ) > ) -> Self {
597
611
Relocations ( SortedMap :: from_presorted_elements ( r) )
598
612
}
599
613
}
600
614
601
615
impl < Tag > Deref for Relocations < Tag > {
602
- type Target = SortedMap < Size , ( Tag , AllocId ) > ;
616
+ type Target = SortedMap < Size , Tag > ;
603
617
604
618
fn deref ( & self ) -> & Self :: Target {
605
619
& self . 0
606
620
}
607
621
}
608
622
609
- impl < Tag > DerefMut for Relocations < Tag > {
610
- fn deref_mut ( & mut self ) -> & mut Self :: Target {
611
- & mut self . 0
612
- }
613
- }
614
-
615
623
/// A partial, owned list of relocations to transfer into another allocation.
616
624
pub struct AllocationRelocations < Tag > {
617
- relative_relocations : Vec < ( Size , ( Tag , AllocId ) ) > ,
625
+ relative_relocations : Vec < ( Size , Tag ) > ,
618
626
}
619
627
620
628
impl < Tag : Copy , Extra > Allocation < Tag , Extra > {
@@ -652,7 +660,7 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
652
660
/// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
653
661
/// to be clear of relocations.
654
662
pub fn mark_relocation_range ( & mut self , relocations : AllocationRelocations < Tag > ) {
655
- self . relocations . insert_presorted ( relocations. relative_relocations ) ;
663
+ self . relocations . 0 . insert_presorted ( relocations. relative_relocations ) ;
656
664
}
657
665
}
658
666
0 commit comments