@@ -6,16 +6,13 @@ use rustc_index::bit_set::DenseBitSet;
6
6
const COMPRESSION_FACTOR : usize = 4 ;
7
7
8
8
/// A dedicated allocator for interpreter memory contents, ensuring they are stored on dedicated
9
- /// pages (not mixed with Miri's own memory). This is very useful for native-lib mode.
9
+ /// pages (not mixed with Miri's own memory). This is used in native-lib mode.
10
10
#[ derive( Debug ) ]
11
11
pub struct IsolatedAlloc {
12
12
/// Pointers to page-aligned memory that has been claimed by the allocator.
13
13
/// Every pointer here must point to a page-sized allocation claimed via
14
- /// the global allocator.
14
+ /// the global allocator. These pointers are used for "small" allocations.
15
15
page_ptrs : Vec < * mut u8 > ,
16
- /// Pointers to multiple-page-sized allocations. These must also be page-aligned,
17
- /// with their size stored as the second element of the vector.
18
- huge_ptrs : Vec < ( * mut u8 , usize ) > ,
19
16
/// Metadata about which bytes have been allocated on each page. The length
20
17
/// of this vector must be the same as that of `page_ptrs`, and the domain
21
18
/// size of the bitset must be exactly `page_size / COMPRESSION_FACTOR`.
@@ -25,6 +22,9 @@ pub struct IsolatedAlloc {
25
22
/// indexing into it should be done with a value one-nth of the corresponding
26
23
/// offset on the matching `page_ptrs` element (n = `COMPRESSION_FACTOR`).
27
24
page_infos : Vec < DenseBitSet < usize > > ,
25
+ /// Pointers to multiple-page-sized allocations. These must also be page-aligned,
26
+ /// with their size stored as the second element of the vector.
27
+ huge_ptrs : Vec < ( * mut u8 , usize ) > ,
28
28
/// The host (not emulated) page size.
29
29
page_size : usize ,
30
30
}
@@ -42,31 +42,23 @@ impl IsolatedAlloc {
42
42
}
43
43
}
44
44
45
- /// Expands the available memory pool by adding one page.
46
- fn add_page ( & mut self ) -> ( * mut u8 , & mut DenseBitSet < usize > ) {
47
- let page_layout = Layout :: from_size_align ( self . page_size , self . page_size ) . unwrap ( ) ;
48
- // SAFETY: The system page size, which is the layout size, cannot be 0
49
- let page_ptr = unsafe { alloc:: alloc ( page_layout) } ;
50
- // `page_infos` has to have one bit for each `COMPRESSION_FACTOR`-sized chunk of bytes in the page.
51
- assert ! ( self . page_size % COMPRESSION_FACTOR == 0 ) ;
52
- self . page_infos . push ( DenseBitSet :: new_empty ( self . page_size / COMPRESSION_FACTOR ) ) ;
53
- self . page_ptrs . push ( page_ptr) ;
54
- ( page_ptr, self . page_infos . last_mut ( ) . unwrap ( ) )
55
- }
56
-
57
45
/// For simplicity, we serve small allocations in multiples of COMPRESSION_FACTOR
58
46
/// bytes with at least that alignment.
59
47
#[ inline]
60
- fn normalized_layout ( layout : Layout ) -> ( usize , usize ) {
48
+ fn normalized_layout ( layout : Layout ) -> Layout {
61
49
let align =
62
50
if layout. align ( ) < COMPRESSION_FACTOR { COMPRESSION_FACTOR } else { layout. align ( ) } ;
63
51
let size = layout. size ( ) . next_multiple_of ( COMPRESSION_FACTOR ) ;
64
- ( size, align)
52
+ Layout :: from_size_align ( size, align) . unwrap ( )
53
+ }
54
+
55
+ /// Returns the layout used to allocate the pages that hold small allocations.
56
+ #[ inline]
57
+ fn page_layout ( & self ) -> Layout {
58
+ Layout :: from_size_align ( self . page_size , self . page_size ) . unwrap ( )
65
59
}
66
60
67
61
/// If the allocation is greater than a page, then round to the nearest page #.
68
- /// Since we pass this into the global allocator, it's more useful to return
69
- /// a `Layout` instead of a pair of usizes.
70
62
#[ inline]
71
63
fn huge_normalized_layout ( layout : Layout , page_size : usize ) -> Layout {
72
64
// Allocate in page-sized chunks
@@ -76,11 +68,11 @@ impl IsolatedAlloc {
76
68
Layout :: from_size_align ( size, align) . unwrap ( )
77
69
}
78
70
79
- /// Determined whether a given (size, align) should be sent to `alloc_huge` /
80
- /// `dealloc_huge`.
71
+ /// Determined whether a given normalized (size, align) should be sent to
72
+ /// `alloc_huge` / ` dealloc_huge`.
81
73
#[ inline]
82
- fn is_huge_alloc ( size : usize , align : usize , page_size : usize ) -> bool {
83
- align >= page_size || size >= page_size
74
+ fn is_huge_alloc ( & self , layout : & Layout ) -> bool {
75
+ layout . align ( ) > self . page_size / 2 || layout . size ( ) >= self . page_size / 2
84
76
}
85
77
86
78
/// Allocates memory as described in `Layout`. This memory should be deallocated
@@ -106,8 +98,8 @@ impl IsolatedAlloc {
106
98
/// SAFETY: See `alloc::alloc()`, with the added restriction that `page_size`
107
99
/// corresponds to the host pagesize.
108
100
unsafe fn allocate ( & mut self , layout : Layout , zeroed : bool ) -> * mut u8 {
109
- let ( size , align ) = IsolatedAlloc :: normalized_layout ( layout) ;
110
- if IsolatedAlloc :: is_huge_alloc ( size , align , self . page_size ) {
101
+ let layout = IsolatedAlloc :: normalized_layout ( layout) ;
102
+ if self . is_huge_alloc ( & layout ) {
111
103
// SAFETY: Validity of `layout` upheld by caller; we checked that
112
104
// the size and alignment are appropriate for being a huge alloc
113
105
unsafe { self . alloc_huge ( layout, zeroed) }
@@ -116,7 +108,7 @@ impl IsolatedAlloc {
116
108
// SAFETY: The value in `self.page_size` is used to allocate
117
109
// `page`, with page alignment
118
110
if let Some ( ptr) =
119
- unsafe { Self :: alloc_from_page ( self . page_size , layout, page, pinfo, zeroed) }
111
+ unsafe { Self :: alloc_small ( self . page_size , layout, page, pinfo, zeroed) }
120
112
{
121
113
return ptr;
122
114
}
@@ -129,44 +121,42 @@ impl IsolatedAlloc {
129
121
let ( page, pinfo) = self . add_page ( ) ;
130
122
131
123
// SAFETY: See comment on `alloc_from_page` above
132
- unsafe { Self :: alloc_from_page ( page_size, layout, page, pinfo, zeroed) . unwrap ( ) }
124
+ unsafe { Self :: alloc_small ( page_size, layout, page, pinfo, zeroed) . unwrap ( ) }
133
125
}
134
126
}
135
127
136
128
/// Used internally by `allocate` to abstract over some logic.
137
129
///
138
130
/// SAFETY: `page` must be a page-aligned pointer to an allocated page,
139
131
/// where the allocation is (at least) `page_size` bytes.
140
- unsafe fn alloc_from_page (
132
+ unsafe fn alloc_small (
141
133
page_size : usize ,
142
134
layout : Layout ,
143
135
page : * mut u8 ,
144
136
pinfo : & mut DenseBitSet < usize > ,
145
137
zeroed : bool ,
146
138
) -> Option < * mut u8 > {
147
- let ( size, align) = IsolatedAlloc :: normalized_layout ( layout) ;
148
-
149
139
// Check every alignment-sized block and see if there exists a `size`
150
140
// chunk of empty space i.e. forall idx . !pinfo.contains(idx / n)
151
- for idx in ( 0 ..page_size) . step_by ( align) {
152
- let idx_pinfo = idx / COMPRESSION_FACTOR ;
153
- let size_pinfo = size / COMPRESSION_FACTOR ;
141
+ for offset in ( 0 ..page_size) . step_by ( layout . align ( ) ) {
142
+ let offset_pinfo = offset / COMPRESSION_FACTOR ;
143
+ let size_pinfo = layout . size ( ) / COMPRESSION_FACTOR ;
154
144
// DenseBitSet::contains() panics if the index is out of bounds
155
- if pinfo. domain_size ( ) < idx_pinfo + size_pinfo {
145
+ if pinfo. domain_size ( ) < offset_pinfo + size_pinfo {
156
146
break ;
157
147
}
158
148
// FIXME: is there a more efficient way to check whether the entire range is unset
159
149
// in the bitset?
160
- let range_avail = !( idx_pinfo..idx_pinfo + size_pinfo) . any ( |idx | pinfo. contains ( idx ) ) ;
150
+ let range_avail = !( offset_pinfo..offset_pinfo + size_pinfo) . any ( |i | pinfo. contains ( i ) ) ;
161
151
if range_avail {
162
- pinfo. insert_range ( idx_pinfo..idx_pinfo + size_pinfo) ;
152
+ pinfo. insert_range ( offset_pinfo..offset_pinfo + size_pinfo) ;
163
153
// SAFETY: We checked the available bytes after `idx` in the call
164
154
// to `domain_size` above and asserted there are at least `idx +
165
155
// layout.size()` bytes available and unallocated after it.
166
156
// `page` must point to the start of the page, so adding `idx`
167
157
// is safe per the above.
168
158
unsafe {
169
- let ptr = page. add ( idx ) ;
159
+ let ptr = page. add ( offset ) ;
170
160
if zeroed {
171
161
// Only write the bytes we were specifically asked to
172
162
// zero out, even if we allocated more
@@ -179,6 +169,17 @@ impl IsolatedAlloc {
179
169
None
180
170
}
181
171
172
+ /// Expands the available memory pool by adding one page.
173
+ fn add_page ( & mut self ) -> ( * mut u8 , & mut DenseBitSet < usize > ) {
174
+ // SAFETY: The system page size, which is the layout size, cannot be 0
175
+ let page_ptr = unsafe { alloc:: alloc ( self . page_layout ( ) ) } ;
176
+ // `page_infos` has to have one bit for each `COMPRESSION_FACTOR`-sized chunk of bytes in the page.
177
+ assert ! ( self . page_size % COMPRESSION_FACTOR == 0 ) ;
178
+ self . page_infos . push ( DenseBitSet :: new_empty ( self . page_size / COMPRESSION_FACTOR ) ) ;
179
+ self . page_ptrs . push ( page_ptr) ;
180
+ ( page_ptr, self . page_infos . last_mut ( ) . unwrap ( ) )
181
+ }
182
+
182
183
/// Allocates in multiples of one page on the host system.
183
184
///
184
185
/// SAFETY: Same as `alloc()`.
@@ -197,54 +198,60 @@ impl IsolatedAlloc {
197
198
/// `alloc_zeroed()`) with the same layout as the one passed on this same
198
199
/// `IsolatedAlloc`.
199
200
pub unsafe fn dealloc ( & mut self , ptr : * mut u8 , layout : Layout ) {
200
- let ( size , align ) = IsolatedAlloc :: normalized_layout ( layout) ;
201
+ let layout = IsolatedAlloc :: normalized_layout ( layout) ;
201
202
202
- if IsolatedAlloc :: is_huge_alloc ( size , align , self . page_size ) {
203
+ if self . is_huge_alloc ( & layout ) {
203
204
// SAFETY: Partly upheld by caller, and we checked that the size
204
205
// and align, meaning this must have been allocated via `alloc_huge`
205
206
unsafe {
206
207
self . dealloc_huge ( ptr, layout) ;
207
208
}
208
209
} else {
209
- // Offset of the pointer in the current page
210
- let ptr_idx = ptr. addr ( ) % self . page_size ;
211
- // And then the page's base address
212
- let page_addr = ptr. addr ( ) - ptr_idx;
213
-
214
- // Find the page this allocation belongs to.
215
- // This could be made faster if the list was sorted -- the allocator isn't fully optimized at the moment.
216
- let pinfo = std:: iter:: zip ( & mut self . page_ptrs , & mut self . page_infos )
217
- . enumerate ( )
218
- . find ( |( _, ( page, _) ) | page. addr ( ) == page_addr) ;
219
- let Some ( ( idx_of_pinfo, ( _, pinfo) ) ) = pinfo else {
220
- panic ! (
221
- "Freeing in an unallocated page: {ptr:?}\n Holding pages {:?}" ,
222
- self . page_ptrs
223
- )
224
- } ;
225
- // Mark this range as available in the page.
226
- let ptr_idx_pinfo = ptr_idx / COMPRESSION_FACTOR ;
227
- let size_pinfo = size / COMPRESSION_FACTOR ;
228
- for idx in ptr_idx_pinfo..ptr_idx_pinfo + size_pinfo {
229
- pinfo. remove ( idx) ;
230
- }
210
+ // SAFETY: It's not a huge allocation, therefore it is a small one.
211
+ let idx = unsafe { self . dealloc_small ( ptr, layout) } ;
231
212
232
213
// This may have been the last allocation on this page. If so, free the entire page.
233
214
// FIXME: this can lead to threshold effects, we should probably add some form
234
215
// of hysteresis.
235
- if pinfo . is_empty ( ) {
236
- let page_layout = Layout :: from_size_align ( self . page_size , self . page_size ) . unwrap ( ) ;
237
- self . page_infos . remove ( idx_of_pinfo ) ;
216
+ if self . page_infos [ idx ] . is_empty ( ) {
217
+ self . page_infos . remove ( idx ) ;
218
+ let page_ptr = self . page_ptrs . remove ( idx ) ;
238
219
// SAFETY: We checked that there are no outstanding allocations
239
220
// from us pointing to this page, and we know it was allocated
240
221
// with this layout
241
222
unsafe {
242
- alloc:: dealloc ( self . page_ptrs . remove ( idx_of_pinfo ) , page_layout ) ;
223
+ alloc:: dealloc ( page_ptr , self . page_layout ( ) ) ;
243
224
}
244
225
}
245
226
}
246
227
}
247
228
229
+ /// Returns the index of the page that this was deallocated from
230
+ ///
231
+ /// SAFETY: the pointer must have been allocated with `alloc_small`.
232
+ unsafe fn dealloc_small ( & mut self , ptr : * mut u8 , layout : Layout ) -> usize {
233
+ // Offset of the pointer in the current page
234
+ let offset = ptr. addr ( ) % self . page_size ;
235
+ // And then the page's base address
236
+ let page_addr = ptr. addr ( ) - offset;
237
+
238
+ // Find the page this allocation belongs to.
239
+ // This could be made faster if the list was sorted -- the allocator isn't fully optimized at the moment.
240
+ let pinfo = std:: iter:: zip ( & mut self . page_ptrs , & mut self . page_infos )
241
+ . enumerate ( )
242
+ . find ( |( _, ( page, _) ) | page. addr ( ) == page_addr) ;
243
+ let Some ( ( idx_of_pinfo, ( _, pinfo) ) ) = pinfo else {
244
+ panic ! ( "Freeing in an unallocated page: {ptr:?}\n Holding pages {:?}" , self . page_ptrs)
245
+ } ;
246
+ // Mark this range as available in the page.
247
+ let ptr_idx_pinfo = offset / COMPRESSION_FACTOR ;
248
+ let size_pinfo = layout. size ( ) / COMPRESSION_FACTOR ;
249
+ for idx in ptr_idx_pinfo..ptr_idx_pinfo + size_pinfo {
250
+ pinfo. remove ( idx) ;
251
+ }
252
+ idx_of_pinfo
253
+ }
254
+
248
255
/// SAFETY: Same as `dealloc()` with the added requirement that `layout`
249
256
/// must ask for a size larger than the host pagesize.
250
257
unsafe fn dealloc_huge ( & mut self , ptr : * mut u8 , layout : Layout ) {
0 commit comments