1
1
use std:: alloc:: { self , Layout } ;
2
+ use std:: collections:: HashMap ;
3
+ use std:: hash:: { BuildHasherDefault , DefaultHasher } ;
2
4
use std:: sync;
3
5
4
6
static ALLOCATOR : sync:: Mutex < IsolatedAlloc > = sync:: Mutex :: new ( IsolatedAlloc :: empty ( ) ) ;
5
7
8
+ pub struct IsolatedAlloc {
9
+ #[ allow( rustc:: default_hash_types) ]
10
+ allocators : HashMap < u64 , IsolatedAllocInner , BuildHasherDefault < DefaultHasher > > ,
11
+ /// The host (not emulated) page size, or 0 if it has not yet been set.
12
+ page_size : usize ,
13
+ }
14
+
6
15
/// A dedicated allocator for interpreter memory contents, ensuring they are stored on dedicated
7
16
/// pages (not mixed with Miri's own memory). This is very useful for native-lib mode.
8
17
#[ derive( Debug ) ]
9
- pub struct IsolatedAlloc {
18
+ pub struct IsolatedAllocInner {
10
19
/// Pointers to page-aligned memory that has been claimed by the allocator.
11
20
/// Every pointer here must point to a page-sized allocation claimed via
12
21
/// the global allocator.
@@ -23,8 +32,6 @@ pub struct IsolatedAlloc {
23
32
/// in 8-byte chunks currently, so the `u8`s are only ever 0 (fully free) or
24
33
/// 255 (fully allocated).
25
34
page_infos : Vec < Box < [ u8 ] > > ,
26
- /// The host (not emulated) page size.
27
- page_size : usize ,
28
35
}
29
36
30
37
// SAFETY: We only point to heap-allocated data
@@ -35,19 +42,80 @@ impl IsolatedAlloc {
35
42
/// allow this function to be `const`; it is updated to its real value on
36
43
/// the first call to `alloc()` or `alloc_zeroed()`.
37
44
const fn empty ( ) -> Self {
38
- Self { page_ptrs : Vec :: new ( ) , huge_ptrs : Vec :: new ( ) , page_infos : Vec :: new ( ) , page_size : 0 }
45
+ #[ allow( rustc:: default_hash_types) ]
46
+ Self { allocators : HashMap :: with_hasher ( BuildHasherDefault :: new ( ) ) , page_size : 0 }
47
+ }
48
+
49
+ /// Allocates memory as described in `Layout`, from the pool marked by
50
+ /// `id`. Note that the same `id` must be used upon calling `dealloc`.
51
+ ///
52
+ /// SAFETY: See `alloc::alloc()`.
53
+ pub unsafe fn alloc ( layout : Layout , id : u64 ) -> * mut u8 {
54
+ unsafe { Self :: alloc_inner ( layout, id, false ) }
55
+ }
56
+
57
+ /// Same as `alloc()`, but zeroes out data before allocating.
58
+ ///
59
+ /// SAFETY: See `alloc::alloc_zeroed()`.
60
+ pub unsafe fn alloc_zeroed ( layout : Layout , id : u64 ) -> * mut u8 {
61
+ unsafe { Self :: alloc_inner ( layout, id, true ) }
62
+ }
63
+
64
+ /// Abstracts over `alloc` and `alloc_zeroed`.
65
+ ///
66
+ /// SAFETY: See `alloc::alloc()`/`alloc::alloc_zeroed()`.
67
+ unsafe fn alloc_inner ( layout : Layout , id : u64 , zeroed : bool ) -> * mut u8 {
68
+ let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
69
+ if alloc. page_size == 0 {
70
+ unsafe {
71
+ alloc. page_size = libc:: sysconf ( libc:: _SC_PAGESIZE) . try_into ( ) . unwrap ( ) ;
72
+ }
73
+ }
74
+ // Store this this AFTER setting the page size
75
+ let page_size = alloc. page_size ;
76
+
77
+ match alloc. allocators . get_mut ( & id) {
78
+ Some ( alloc_inner) => unsafe { alloc_inner. allocate ( layout, page_size, zeroed) } ,
79
+ None => {
80
+ let mut new_inner = IsolatedAllocInner :: new ( ) ;
81
+ let ret = unsafe { new_inner. allocate ( layout, page_size, zeroed) } ;
82
+ alloc. allocators . insert ( id, new_inner) ;
83
+ ret
84
+ }
85
+ }
86
+ }
87
+
88
+ /// Deallocates a pointer from the memory pool associated with a given `id`.
89
+ ///
90
+ /// SAFETY: See `alloc::dealloc()`, with the extra caveat that `id` must
91
+ /// correspond to the `id` used upon first allocating the memory.
92
+ pub unsafe fn dealloc ( ptr : * mut u8 , layout : Layout , id : u64 ) {
93
+ let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
94
+ let page_size = alloc. page_size ;
95
+ let alloc_inner = alloc. allocators . get_mut ( & id) . unwrap ( ) ;
96
+ unsafe { alloc_inner. deallocate ( ptr, layout, page_size) } ;
97
+ // Remove if the machine with that id no longer has any memory
98
+ if alloc_inner. huge_ptrs . is_empty ( ) && alloc_inner. page_ptrs . is_empty ( ) {
99
+ alloc. allocators . remove ( & id) ;
100
+ }
101
+ }
102
+ }
103
+
104
+ impl IsolatedAllocInner {
105
+ /// Creates an empty allocator.
106
+ const fn new ( ) -> Self {
107
+ Self { page_ptrs : Vec :: new ( ) , huge_ptrs : Vec :: new ( ) , page_infos : Vec :: new ( ) }
39
108
}
40
109
41
110
/// Expands the available memory pool by adding one page.
42
- fn add_page ( & mut self ) -> ( * mut u8 , & mut Box < [ u8 ] > ) {
43
- assert_ne ! ( self . page_size, 0 ) ;
111
+ fn add_page ( & mut self , page_size : usize ) -> ( * mut u8 , & mut Box < [ u8 ] > ) {
112
+ assert_ne ! ( page_size, 0 ) ;
44
113
45
- let page_layout =
46
- unsafe { Layout :: from_size_align_unchecked ( self . page_size , self . page_size ) } ;
114
+ let page_layout = unsafe { Layout :: from_size_align_unchecked ( page_size, page_size) } ;
47
115
// We don't overwrite the bytes we hand out so make sure they're zeroed by default!
48
116
let page_ptr = unsafe { alloc:: alloc ( page_layout) } ;
49
117
// `page_infos` has to be one-eighth of the pagesize per the field docs
50
- self . page_infos . push ( vec ! [ 0u8 ; self . page_size / 8 ] . into_boxed_slice ( ) ) ;
118
+ self . page_infos . push ( vec ! [ 0u8 ; page_size / 8 ] . into_boxed_slice ( ) ) ;
51
119
self . page_ptrs . push ( page_ptr) ;
52
120
( page_ptr, self . page_infos . last_mut ( ) . unwrap ( ) )
53
121
}
@@ -61,38 +129,15 @@ impl IsolatedAlloc {
61
129
( size, align)
62
130
}
63
131
64
- /// Allocates memory as described in `Layout`.
65
- ///
66
- /// SAFETY: `See alloc::alloc()`
67
- #[ inline]
68
- pub unsafe fn alloc ( layout : Layout ) -> * mut u8 {
69
- let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
70
- unsafe { alloc. alloc_inner ( layout, false ) }
71
- }
72
-
73
- /// Same as `alloc()`, but zeroes out data before allocating.
74
- ///
75
- /// SAFETY: See `alloc::alloc_zeroed()`
76
- pub unsafe fn alloc_zeroed ( layout : Layout ) -> * mut u8 {
77
- let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
78
- unsafe { alloc. alloc_inner ( layout, true ) }
79
- }
80
-
81
132
/// Abstracts over the logic of `alloc_zeroed` vs `alloc`, as determined by
82
133
/// the `zeroed` argument.
83
134
///
84
- /// SAFETY: See `alloc::alloc()`
85
- unsafe fn alloc_inner ( & mut self , layout : Layout , zeroed : bool ) -> * mut u8 {
86
- if self . page_size == 0 {
87
- unsafe {
88
- self . page_size = libc:: sysconf ( libc:: _SC_PAGESIZE) . try_into ( ) . unwrap ( ) ;
89
- }
90
- }
91
-
92
- if layout. align ( ) > self . page_size || layout. size ( ) > self . page_size {
135
+ /// SAFETY: See `alloc::alloc()`, with the added restriction that `page_size`
136
+ /// corresponds to the host pagesize.
137
+ unsafe fn allocate ( & mut self , layout : Layout , page_size : usize , zeroed : bool ) -> * mut u8 {
138
+ if layout. align ( ) > page_size || layout. size ( ) > page_size {
93
139
unsafe { self . alloc_multi_page ( layout, zeroed) }
94
140
} else {
95
- let page_size = self . page_size ;
96
141
for ( & mut page, pinfo) in std:: iter:: zip ( & mut self . page_ptrs , & mut self . page_infos ) {
97
142
if let Some ( ptr) =
98
143
unsafe { Self :: alloc_from_page ( page_size, layout, page, pinfo, zeroed) }
@@ -102,12 +147,12 @@ impl IsolatedAlloc {
102
147
}
103
148
104
149
// We get here only if there's no space in our existing pages
105
- let ( page, pinfo) = self . add_page ( ) ;
150
+ let ( page, pinfo) = self . add_page ( page_size ) ;
106
151
unsafe { Self :: alloc_from_page ( page_size, layout, page, pinfo, zeroed) . unwrap ( ) }
107
152
}
108
153
}
109
154
110
- /// Used internally by `alloc_inner ` to abstract over some logic.
155
+ /// Used internally by `allocate ` to abstract over some logic.
111
156
///
112
157
/// SAFETY: `page` must be a page-aligned pointer to an allocated page,
113
158
/// where the allocation is (at least) `page_size` bytes.
@@ -118,7 +163,7 @@ impl IsolatedAlloc {
118
163
pinfo : & mut Box < [ u8 ] > ,
119
164
zeroed : bool ,
120
165
) -> Option < * mut u8 > {
121
- let ( size, align) = IsolatedAlloc :: normalized_layout ( layout) ;
166
+ let ( size, align) = IsolatedAllocInner :: normalized_layout ( layout) ;
122
167
123
168
for idx in ( 0 ..page_size) . step_by ( align) {
124
169
let idx_pinfo = idx / 8 ;
@@ -151,31 +196,19 @@ impl IsolatedAlloc {
151
196
ret
152
197
}
153
198
154
- /// Deallocates a pointer from the isolated allocator.
155
- ///
156
- /// SAFETY: This pointer must have been allocated with `IsolatedAlloc::alloc()`
157
- /// (or `alloc_zeroed()`) with the same layout as the one passed.
158
- pub unsafe fn dealloc ( ptr : * mut u8 , layout : Layout ) {
159
- let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
160
- unsafe {
161
- alloc. dealloc_inner ( ptr, layout) ;
162
- }
163
- }
164
-
165
- /// Same as `dealloc`, but from a specific allocator. Useful for tests.
199
+ /// Deallocates a pointer from this allocator.
166
200
///
167
- /// SAFETY: See `dealloc` above.
168
- unsafe fn dealloc_inner ( & mut self , ptr : * mut u8 , layout : Layout ) {
169
- let ( size, align) = IsolatedAlloc :: normalized_layout ( layout) ;
170
-
171
- if size == 0 || ptr. is_null ( ) {
172
- return ;
173
- }
174
-
175
- let ptr_idx = ptr. addr ( ) % self . page_size ;
201
+ /// SAFETY: This pointer must have been allocated by calling `alloc()` (or
202
+ /// `alloc_zeroed()`) with the same layout as the one passed on this same
203
+ /// `IsolatedAllocInner`, and `page_size` must correspond to the host
204
+ /// pagesize.
205
+ unsafe fn deallocate ( & mut self , ptr : * mut u8 , layout : Layout , page_size : usize ) {
206
+ let ( size, align) = IsolatedAllocInner :: normalized_layout ( layout) ;
207
+
208
+ let ptr_idx = ptr. addr ( ) % page_size;
176
209
let page_addr = ptr. addr ( ) - ptr_idx;
177
210
178
- if align > self . page_size || size > self . page_size {
211
+ if align > page_size || size > page_size {
179
212
unsafe {
180
213
self . dealloc_multi_page ( ptr, layout) ;
181
214
}
@@ -195,8 +228,7 @@ impl IsolatedAlloc {
195
228
}
196
229
197
230
let mut free = vec ! [ ] ;
198
- let page_layout =
199
- unsafe { Layout :: from_size_align_unchecked ( self . page_size , self . page_size ) } ;
231
+ let page_layout = unsafe { Layout :: from_size_align_unchecked ( page_size, page_size) } ;
200
232
for ( idx, pinfo) in self . page_infos . iter ( ) . enumerate ( ) {
201
233
if pinfo. iter ( ) . all ( |p| * p == 0 ) {
202
234
free. push ( idx) ;
@@ -240,54 +272,44 @@ mod tests {
240
272
241
273
#[ test]
242
274
fn small_zeroes ( ) {
243
- // We can't use the global one because it'll be shared between tests
244
- let mut allocator = IsolatedAlloc :: empty ( ) ;
245
-
246
275
let layout = Layout :: from_size_align ( 256 , 32 ) . unwrap ( ) ;
247
276
// allocate_zeroed
248
- let ptr = unsafe { allocator . alloc_inner ( layout, true ) } ;
277
+ let ptr = unsafe { IsolatedAlloc :: alloc_zeroed ( layout, 0 ) } ;
249
278
assert_zeroes ( ptr, layout) ;
250
279
unsafe {
251
- allocator . dealloc_inner ( ptr, layout) ;
280
+ IsolatedAlloc :: dealloc ( ptr, layout, 0 ) ;
252
281
}
253
282
}
254
283
255
284
#[ test]
256
285
fn big_zeroes ( ) {
257
- let mut allocator = IsolatedAlloc :: empty ( ) ;
258
-
259
286
let layout = Layout :: from_size_align ( 16 * 1024 , 128 ) . unwrap ( ) ;
260
- let ptr = unsafe { allocator . alloc_inner ( layout, true ) } ;
287
+ let ptr = unsafe { IsolatedAlloc :: alloc_zeroed ( layout, 1 ) } ;
261
288
assert_zeroes ( ptr, layout) ;
262
289
unsafe {
263
- allocator . dealloc_inner ( ptr, layout) ;
290
+ IsolatedAlloc :: dealloc ( ptr, layout, 1 ) ;
264
291
}
265
292
}
266
293
267
294
#[ test]
268
295
fn repeated_allocs ( ) {
269
- let mut allocator = IsolatedAlloc :: empty ( ) ;
270
-
271
296
for sz in ( 1 ..=( 16 * 1024 ) ) . step_by ( 128 ) {
272
297
let layout = Layout :: from_size_align ( sz, 1 ) . unwrap ( ) ;
273
- let ptr = unsafe { allocator . alloc_inner ( layout, true ) } ;
298
+ let ptr = unsafe { IsolatedAlloc :: alloc_zeroed ( layout, 2 ) } ;
274
299
assert_zeroes ( ptr, layout) ;
275
- eprintln ! ( "Success at size {sz}" ) ;
276
300
unsafe {
277
301
ptr. write_bytes ( 255 , sz) ;
278
- allocator . dealloc_inner ( ptr, layout) ;
302
+ IsolatedAlloc :: dealloc ( ptr, layout, 2 ) ;
279
303
}
280
304
}
281
305
}
282
306
283
307
#[ test]
284
308
fn no_overlaps ( ) {
285
- let mut allocator = IsolatedAlloc :: empty ( ) ;
286
- no_overlaps_inner ( & mut allocator) ;
309
+ no_overlaps_inner ( 3 ) ;
287
310
}
288
311
289
- // So we can reuse this in `check_leaks`
290
- fn no_overlaps_inner ( allocator : & mut IsolatedAlloc ) {
312
+ fn no_overlaps_inner ( id : u64 ) {
291
313
// Some random sizes and aligns
292
314
let mut sizes = vec ! [ 32 ; 10 ] ;
293
315
sizes. append ( & mut vec ! [ 15 ; 4 ] ) ;
@@ -304,29 +326,28 @@ mod tests {
304
326
let layouts: Vec < _ > = std:: iter:: zip ( sizes, aligns)
305
327
. map ( |( sz, al) | Layout :: from_size_align ( sz, al) . unwrap ( ) )
306
328
. collect ( ) ;
307
- let ptrs: Vec < _ > =
308
- layouts. iter ( ) . map ( |layout| unsafe { allocator. alloc_inner ( * layout, true ) } ) . collect ( ) ;
329
+ let ptrs: Vec < _ > = layouts
330
+ . iter ( )
331
+ . map ( |layout| unsafe { IsolatedAlloc :: alloc_zeroed ( * layout, id) } )
332
+ . collect ( ) ;
309
333
310
334
for ( & ptr, & layout) in std:: iter:: zip ( & ptrs, & layouts) {
311
335
// Make sure we don't allocate overlapping ranges
312
336
unsafe {
313
337
assert_zeroes ( ptr, layout) ;
314
338
ptr. write_bytes ( 255 , layout. size ( ) ) ;
315
- allocator . dealloc_inner ( ptr, layout) ;
339
+ IsolatedAlloc :: dealloc ( ptr, layout, id ) ;
316
340
}
317
341
}
318
342
}
319
343
320
344
#[ test]
321
345
fn check_leaks ( ) {
322
346
// Generate some noise first
323
- let mut allocator = IsolatedAlloc :: empty ( ) ;
324
- no_overlaps_inner ( & mut allocator ) ;
347
+ no_overlaps_inner ( 4 ) ;
348
+ let alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
325
349
326
- for pinfo in & allocator. page_infos {
327
- for eight_bytes in 0 ..pinfo. len ( ) {
328
- assert_eq ! ( eight_bytes, 0 ) ;
329
- }
330
- }
350
+ // Should get auto-deleted if the allocations are empty
351
+ assert ! ( !alloc. allocators. contains_key( & 4 ) ) ;
331
352
}
332
353
}
0 commit comments