11use std:: alloc:: { self , Layout } ;
2+ use std:: collections:: HashMap ;
3+ use std:: hash:: { BuildHasherDefault , DefaultHasher } ;
24use std:: sync;
35
46static ALLOCATOR : sync:: Mutex < IsolatedAlloc > = sync:: Mutex :: new ( IsolatedAlloc :: empty ( ) ) ;
57
8+ pub struct IsolatedAlloc {
9+ #[ allow( rustc:: default_hash_types) ]
10+ allocators : HashMap < u64 , IsolatedAllocInner , BuildHasherDefault < DefaultHasher > > ,
11+ /// The host (not emulated) page size, or 0 if it has not yet been set.
12+ page_size : usize ,
13+ }
14+
615/// A dedicated allocator for interpreter memory contents, ensuring they are stored on dedicated
716/// pages (not mixed with Miri's own memory). This is very useful for native-lib mode.
817#[ derive( Debug ) ]
9- pub struct IsolatedAlloc {
18+ pub struct IsolatedAllocInner {
1019 /// Pointers to page-aligned memory that has been claimed by the allocator.
1120 /// Every pointer here must point to a page-sized allocation claimed via
1221 /// the global allocator.
@@ -23,8 +32,6 @@ pub struct IsolatedAlloc {
2332 /// in 8-byte chunks currently, so the `u8`s are only ever 0 (fully free) or
2433 /// 255 (fully allocated).
2534 page_infos : Vec < Box < [ u8 ] > > ,
26- /// The host (not emulated) page size.
27- page_size : usize ,
2835}
2936
3037// SAFETY: We only point to heap-allocated data
@@ -35,19 +42,80 @@ impl IsolatedAlloc {
3542 /// allow this function to be `const`; it is updated to its real value on
3643 /// the first call to `alloc()` or `alloc_zeroed()`.
3744 const fn empty ( ) -> Self {
38- Self { page_ptrs : Vec :: new ( ) , huge_ptrs : Vec :: new ( ) , page_infos : Vec :: new ( ) , page_size : 0 }
45+ #[ allow( rustc:: default_hash_types) ]
46+ Self { allocators : HashMap :: with_hasher ( BuildHasherDefault :: new ( ) ) , page_size : 0 }
47+ }
48+
49+ /// Allocates memory as described in `Layout`, from the pool marked by
50+ /// `id`. Note that the same `id` must be used upon calling `dealloc`.
51+ ///
52+ /// SAFETY: See `alloc::alloc()`.
53+ pub unsafe fn alloc ( layout : Layout , id : u64 ) -> * mut u8 {
54+ unsafe { Self :: alloc_inner ( layout, id, false ) }
55+ }
56+
57+ /// Same as `alloc()`, but zeroes out data before allocating.
58+ ///
59+ /// SAFETY: See `alloc::alloc_zeroed()`.
60+ pub unsafe fn alloc_zeroed ( layout : Layout , id : u64 ) -> * mut u8 {
61+ unsafe { Self :: alloc_inner ( layout, id, true ) }
62+ }
63+
64+ /// Abstracts over `alloc` and `alloc_zeroed`.
65+ ///
66+ /// SAFETY: See `alloc::alloc()`/`alloc::alloc_zeroed()`.
67+ unsafe fn alloc_inner ( layout : Layout , id : u64 , zeroed : bool ) -> * mut u8 {
68+ let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
69+ if alloc. page_size == 0 {
70+ unsafe {
71+ alloc. page_size = libc:: sysconf ( libc:: _SC_PAGESIZE) . try_into ( ) . unwrap ( ) ;
72+ }
73+ }
74+ // Store this this AFTER setting the page size
75+ let page_size = alloc. page_size ;
76+
77+ match alloc. allocators . get_mut ( & id) {
78+ Some ( alloc_inner) => unsafe { alloc_inner. allocate ( layout, page_size, zeroed) } ,
79+ None => {
80+ let mut new_inner = IsolatedAllocInner :: new ( ) ;
81+ let ret = unsafe { new_inner. allocate ( layout, page_size, zeroed) } ;
82+ alloc. allocators . insert ( id, new_inner) ;
83+ ret
84+ }
85+ }
86+ }
87+
88+ /// Deallocates a pointer from the memory pool associated with a given `id`.
89+ ///
90+ /// SAFETY: See `alloc::dealloc()`, with the extra caveat that `id` must
91+ /// correspond to the `id` used upon first allocating the memory.
92+ pub unsafe fn dealloc ( ptr : * mut u8 , layout : Layout , id : u64 ) {
93+ let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
94+ let page_size = alloc. page_size ;
95+ let alloc_inner = alloc. allocators . get_mut ( & id) . unwrap ( ) ;
96+ unsafe { alloc_inner. deallocate ( ptr, layout, page_size) } ;
97+ // Remove if the machine with that id no longer has any memory
98+ if alloc_inner. huge_ptrs . is_empty ( ) && alloc_inner. page_ptrs . is_empty ( ) {
99+ alloc. allocators . remove ( & id) ;
100+ }
101+ }
102+ }
103+
104+ impl IsolatedAllocInner {
105+ /// Creates an empty allocator.
106+ const fn new ( ) -> Self {
107+ Self { page_ptrs : Vec :: new ( ) , huge_ptrs : Vec :: new ( ) , page_infos : Vec :: new ( ) }
39108 }
40109
41110 /// Expands the available memory pool by adding one page.
42- fn add_page ( & mut self ) -> ( * mut u8 , & mut Box < [ u8 ] > ) {
43- assert_ne ! ( self . page_size, 0 ) ;
111+ fn add_page ( & mut self , page_size : usize ) -> ( * mut u8 , & mut Box < [ u8 ] > ) {
112+ assert_ne ! ( page_size, 0 ) ;
44113
45- let page_layout =
46- unsafe { Layout :: from_size_align_unchecked ( self . page_size , self . page_size ) } ;
114+ let page_layout = unsafe { Layout :: from_size_align_unchecked ( page_size, page_size) } ;
47115 // We don't overwrite the bytes we hand out so make sure they're zeroed by default!
48116 let page_ptr = unsafe { alloc:: alloc ( page_layout) } ;
49117 // `page_infos` has to be one-eighth of the pagesize per the field docs
50- self . page_infos . push ( vec ! [ 0u8 ; self . page_size / 8 ] . into_boxed_slice ( ) ) ;
118+ self . page_infos . push ( vec ! [ 0u8 ; page_size / 8 ] . into_boxed_slice ( ) ) ;
51119 self . page_ptrs . push ( page_ptr) ;
52120 ( page_ptr, self . page_infos . last_mut ( ) . unwrap ( ) )
53121 }
@@ -61,38 +129,15 @@ impl IsolatedAlloc {
61129 ( size, align)
62130 }
63131
64- /// Allocates memory as described in `Layout`.
65- ///
66- /// SAFETY: `See alloc::alloc()`
67- #[ inline]
68- pub unsafe fn alloc ( layout : Layout ) -> * mut u8 {
69- let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
70- unsafe { alloc. alloc_inner ( layout, false ) }
71- }
72-
73- /// Same as `alloc()`, but zeroes out data before allocating.
74- ///
75- /// SAFETY: See `alloc::alloc_zeroed()`
76- pub unsafe fn alloc_zeroed ( layout : Layout ) -> * mut u8 {
77- let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
78- unsafe { alloc. alloc_inner ( layout, true ) }
79- }
80-
81132 /// Abstracts over the logic of `alloc_zeroed` vs `alloc`, as determined by
82133 /// the `zeroed` argument.
83134 ///
84- /// SAFETY: See `alloc::alloc()`
85- unsafe fn alloc_inner ( & mut self , layout : Layout , zeroed : bool ) -> * mut u8 {
86- if self . page_size == 0 {
87- unsafe {
88- self . page_size = libc:: sysconf ( libc:: _SC_PAGESIZE) . try_into ( ) . unwrap ( ) ;
89- }
90- }
91-
92- if layout. align ( ) > self . page_size || layout. size ( ) > self . page_size {
135+ /// SAFETY: See `alloc::alloc()`, with the added restriction that `page_size`
136+ /// corresponds to the host pagesize.
137+ unsafe fn allocate ( & mut self , layout : Layout , page_size : usize , zeroed : bool ) -> * mut u8 {
138+ if layout. align ( ) > page_size || layout. size ( ) > page_size {
93139 unsafe { self . alloc_multi_page ( layout, zeroed) }
94140 } else {
95- let page_size = self . page_size ;
96141 for ( & mut page, pinfo) in std:: iter:: zip ( & mut self . page_ptrs , & mut self . page_infos ) {
97142 if let Some ( ptr) =
98143 unsafe { Self :: alloc_from_page ( page_size, layout, page, pinfo, zeroed) }
@@ -102,12 +147,12 @@ impl IsolatedAlloc {
102147 }
103148
104149 // We get here only if there's no space in our existing pages
105- let ( page, pinfo) = self . add_page ( ) ;
150+ let ( page, pinfo) = self . add_page ( page_size ) ;
106151 unsafe { Self :: alloc_from_page ( page_size, layout, page, pinfo, zeroed) . unwrap ( ) }
107152 }
108153 }
109154
110- /// Used internally by `alloc_inner ` to abstract over some logic.
155+ /// Used internally by `allocate ` to abstract over some logic.
111156 ///
112157 /// SAFETY: `page` must be a page-aligned pointer to an allocated page,
113158 /// where the allocation is (at least) `page_size` bytes.
@@ -118,7 +163,7 @@ impl IsolatedAlloc {
118163 pinfo : & mut Box < [ u8 ] > ,
119164 zeroed : bool ,
120165 ) -> Option < * mut u8 > {
121- let ( size, align) = IsolatedAlloc :: normalized_layout ( layout) ;
166+ let ( size, align) = IsolatedAllocInner :: normalized_layout ( layout) ;
122167
123168 for idx in ( 0 ..page_size) . step_by ( align) {
124169 let idx_pinfo = idx / 8 ;
@@ -151,31 +196,19 @@ impl IsolatedAlloc {
151196 ret
152197 }
153198
154- /// Deallocates a pointer from the isolated allocator.
155- ///
156- /// SAFETY: This pointer must have been allocated with `IsolatedAlloc::alloc()`
157- /// (or `alloc_zeroed()`) with the same layout as the one passed.
158- pub unsafe fn dealloc ( ptr : * mut u8 , layout : Layout ) {
159- let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
160- unsafe {
161- alloc. dealloc_inner ( ptr, layout) ;
162- }
163- }
164-
165- /// Same as `dealloc`, but from a specific allocator. Useful for tests.
199+ /// Deallocates a pointer from this allocator.
166200 ///
167- /// SAFETY: See `dealloc` above.
168- unsafe fn dealloc_inner ( & mut self , ptr : * mut u8 , layout : Layout ) {
169- let ( size, align) = IsolatedAlloc :: normalized_layout ( layout) ;
170-
171- if size == 0 || ptr. is_null ( ) {
172- return ;
173- }
174-
175- let ptr_idx = ptr. addr ( ) % self . page_size ;
201+ /// SAFETY: This pointer must have been allocated by calling `alloc()` (or
202+ /// `alloc_zeroed()`) with the same layout as the one passed on this same
203+ /// `IsolatedAllocInner`, and `page_size` must correspond to the host
204+ /// pagesize.
205+ unsafe fn deallocate ( & mut self , ptr : * mut u8 , layout : Layout , page_size : usize ) {
206+ let ( size, align) = IsolatedAllocInner :: normalized_layout ( layout) ;
207+
208+ let ptr_idx = ptr. addr ( ) % page_size;
176209 let page_addr = ptr. addr ( ) - ptr_idx;
177210
178- if align > self . page_size || size > self . page_size {
211+ if align > page_size || size > page_size {
179212 unsafe {
180213 self . dealloc_multi_page ( ptr, layout) ;
181214 }
@@ -195,8 +228,7 @@ impl IsolatedAlloc {
195228 }
196229
197230 let mut free = vec ! [ ] ;
198- let page_layout =
199- unsafe { Layout :: from_size_align_unchecked ( self . page_size , self . page_size ) } ;
231+ let page_layout = unsafe { Layout :: from_size_align_unchecked ( page_size, page_size) } ;
200232 for ( idx, pinfo) in self . page_infos . iter ( ) . enumerate ( ) {
201233 if pinfo. iter ( ) . all ( |p| * p == 0 ) {
202234 free. push ( idx) ;
@@ -240,54 +272,44 @@ mod tests {
240272
241273 #[ test]
242274 fn small_zeroes ( ) {
243- // We can't use the global one because it'll be shared between tests
244- let mut allocator = IsolatedAlloc :: empty ( ) ;
245-
246275 let layout = Layout :: from_size_align ( 256 , 32 ) . unwrap ( ) ;
247276 // allocate_zeroed
248- let ptr = unsafe { allocator . alloc_inner ( layout, true ) } ;
277+ let ptr = unsafe { IsolatedAlloc :: alloc_zeroed ( layout, 0 ) } ;
249278 assert_zeroes ( ptr, layout) ;
250279 unsafe {
251- allocator . dealloc_inner ( ptr, layout) ;
280+ IsolatedAlloc :: dealloc ( ptr, layout, 0 ) ;
252281 }
253282 }
254283
255284 #[ test]
256285 fn big_zeroes ( ) {
257- let mut allocator = IsolatedAlloc :: empty ( ) ;
258-
259286 let layout = Layout :: from_size_align ( 16 * 1024 , 128 ) . unwrap ( ) ;
260- let ptr = unsafe { allocator . alloc_inner ( layout, true ) } ;
287+ let ptr = unsafe { IsolatedAlloc :: alloc_zeroed ( layout, 1 ) } ;
261288 assert_zeroes ( ptr, layout) ;
262289 unsafe {
263- allocator . dealloc_inner ( ptr, layout) ;
290+ IsolatedAlloc :: dealloc ( ptr, layout, 1 ) ;
264291 }
265292 }
266293
267294 #[ test]
268295 fn repeated_allocs ( ) {
269- let mut allocator = IsolatedAlloc :: empty ( ) ;
270-
271296 for sz in ( 1 ..=( 16 * 1024 ) ) . step_by ( 128 ) {
272297 let layout = Layout :: from_size_align ( sz, 1 ) . unwrap ( ) ;
273- let ptr = unsafe { allocator . alloc_inner ( layout, true ) } ;
298+ let ptr = unsafe { IsolatedAlloc :: alloc_zeroed ( layout, 2 ) } ;
274299 assert_zeroes ( ptr, layout) ;
275- eprintln ! ( "Success at size {sz}" ) ;
276300 unsafe {
277301 ptr. write_bytes ( 255 , sz) ;
278- allocator . dealloc_inner ( ptr, layout) ;
302+ IsolatedAlloc :: dealloc ( ptr, layout, 2 ) ;
279303 }
280304 }
281305 }
282306
283307 #[ test]
284308 fn no_overlaps ( ) {
285- let mut allocator = IsolatedAlloc :: empty ( ) ;
286- no_overlaps_inner ( & mut allocator) ;
309+ no_overlaps_inner ( 3 ) ;
287310 }
288311
289- // So we can reuse this in `check_leaks`
290- fn no_overlaps_inner ( allocator : & mut IsolatedAlloc ) {
312+ fn no_overlaps_inner ( id : u64 ) {
291313 // Some random sizes and aligns
292314 let mut sizes = vec ! [ 32 ; 10 ] ;
293315 sizes. append ( & mut vec ! [ 15 ; 4 ] ) ;
@@ -304,29 +326,28 @@ mod tests {
304326 let layouts: Vec < _ > = std:: iter:: zip ( sizes, aligns)
305327 . map ( |( sz, al) | Layout :: from_size_align ( sz, al) . unwrap ( ) )
306328 . collect ( ) ;
307- let ptrs: Vec < _ > =
308- layouts. iter ( ) . map ( |layout| unsafe { allocator. alloc_inner ( * layout, true ) } ) . collect ( ) ;
329+ let ptrs: Vec < _ > = layouts
330+ . iter ( )
331+ . map ( |layout| unsafe { IsolatedAlloc :: alloc_zeroed ( * layout, id) } )
332+ . collect ( ) ;
309333
310334 for ( & ptr, & layout) in std:: iter:: zip ( & ptrs, & layouts) {
311335 // Make sure we don't allocate overlapping ranges
312336 unsafe {
313337 assert_zeroes ( ptr, layout) ;
314338 ptr. write_bytes ( 255 , layout. size ( ) ) ;
315- allocator . dealloc_inner ( ptr, layout) ;
339+ IsolatedAlloc :: dealloc ( ptr, layout, id ) ;
316340 }
317341 }
318342 }
319343
320344 #[ test]
321345 fn check_leaks ( ) {
322346 // Generate some noise first
323- let mut allocator = IsolatedAlloc :: empty ( ) ;
324- no_overlaps_inner ( & mut allocator ) ;
347+ no_overlaps_inner ( 4 ) ;
348+ let alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
325349
326- for pinfo in & allocator. page_infos {
327- for eight_bytes in 0 ..pinfo. len ( ) {
328- assert_eq ! ( eight_bytes, 0 ) ;
329- }
330- }
350+ // Should get auto-deleted if the allocations are empty
351+ assert ! ( !alloc. allocators. contains_key( & 4 ) ) ;
331352 }
332353}
0 commit comments