11// This code is adapted from https://github.com/wenyuzhao/bump-allocator-rs
2+ //
3+ // We've altered it to work entirely with usize values internally and only cast
4+ // back to an exposed-provenance pointer when returning from alloc. This gives
5+ // us a richer checked-arithmetic API we can use to trap overflows internally,
6+ // and also avoids some potential UB issues with pointer provenance. Since the
7+ // provenance of __heap_base is a 1-byte value anyway, and all the rest of the
8+ // wasm heap is considered to have exposed provenance, we think this is the best
9+ // we can do. Writing allocators is tricky!
10+ //
11+ // NB: technically these alterations only handle corner cases that cannot be hit
12+ // using safe client code. Safe clients pass `Layout` structs that always meet
13+ // additional size and alignment constraints. But hardening the code to tolerate
14+ // even _unsafe_ inputs -- malformed `Layout` inputs one can only create by
15+ // calling unsafe methods -- is not only easy to do, it makes the code simpler
16+ // and more readable, so we went ahead and did it.
217
18+ use crate :: unwrap:: UnwrapOptimized ;
319use core:: alloc:: { GlobalAlloc , Layout } ;
420
5- pub static mut LOCAL_ALLOCATOR : BumpPointerLocal = BumpPointerLocal :: new ( ) ;
21+ static mut LOCAL_ALLOCATOR : BumpPointerLocal = BumpPointerLocal :: new ( ) ;
622
7- pub struct BumpPointerLocal {
8- cursor : * mut u8 ,
9- limit : * mut u8 ,
23+ struct BumpPointerLocal {
24+ cursor : usize ,
25+ limit : usize ,
1026}
1127
1228impl BumpPointerLocal {
@@ -16,24 +32,20 @@ impl BumpPointerLocal {
1632
1733 pub const fn new ( ) -> Self {
1834 Self {
19- cursor : 0 as _ ,
20- limit : 0 as _ ,
35+ cursor : 0 ,
36+ limit : 0 ,
2137 }
2238 }
2339
24- #[ inline( always) ]
25- fn align_allocation ( cursor : * mut u8 , align : usize ) -> * mut u8 {
26- let mask = align - 1 ;
27- ( ( ( cursor as usize ) + mask) & !mask) as _
28- }
29-
3040 #[ inline( always) ]
3141 fn maybe_init_inline ( & mut self ) {
32- if self . limit as usize == 0 {
42+ if self . limit == 0 {
3343 // This is a slight over-estimate and ideally we would use __heap_base
3444 // but that seems not to be easy to access and in any case it is just a
3545 // convention whereas this is more guaranteed by the wasm spec to work.
36- self . cursor = ( core:: arch:: wasm32:: memory_size ( Self :: MEM ) * Self :: PAGE_SIZE ) as _ ;
46+ self . cursor = core:: arch:: wasm32:: memory_size ( Self :: MEM )
47+ . checked_mul ( Self :: PAGE_SIZE )
48+ . unwrap_optimized ( ) ;
3749 self . limit = self . cursor ;
3850 }
3951 }
@@ -43,11 +55,15 @@ impl BumpPointerLocal {
4355 self . maybe_init_inline ( )
4456 }
4557
58+ // Allocate `bytes` bytes with `align` alignment.
4659 #[ inline( always) ]
47- pub fn alloc ( & mut self , bytes : usize , align : usize ) -> * mut u8 {
60+ fn alloc ( & mut self , bytes : usize , align : usize ) -> usize {
4861 self . maybe_init ( ) ;
49- let start = Self :: align_allocation ( self . cursor , align) ;
50- let new_cursor = unsafe { start. add ( bytes) } ;
62+ let start = self
63+ . cursor
64+ . checked_next_multiple_of ( align)
65+ . unwrap_optimized ( ) ;
66+ let new_cursor = start. checked_add ( bytes) . unwrap_optimized ( ) ;
5167 if new_cursor <= self . limit {
5268 self . cursor = new_cursor;
5369 start
@@ -57,17 +73,18 @@ impl BumpPointerLocal {
5773 }
5874
5975 #[ inline( always) ]
60- fn alloc_slow_inline ( & mut self , bytes : usize , align : usize ) -> * mut u8 {
61- let pages = ( bytes + Self :: PAGE_SIZE - 1 ) / Self :: PAGE_SIZE ;
76+ fn alloc_slow_inline ( & mut self , bytes : usize , align : usize ) -> usize {
77+ let pages = bytes. div_ceil ( Self :: PAGE_SIZE ) ;
6278 if core:: arch:: wasm32:: memory_grow ( Self :: MEM , pages) == usize:: MAX {
63- panic ! ( )
79+ core :: arch :: wasm32 :: unreachable ( ) ;
6480 }
65- self . limit = unsafe { self . limit . add ( pages * Self :: PAGE_SIZE ) } ;
81+ let bytes_grown = pages. checked_mul ( Self :: PAGE_SIZE ) . unwrap_optimized ( ) ;
82+ self . limit = self . limit . checked_add ( bytes_grown) . unwrap_optimized ( ) ;
6683 self . alloc ( bytes, align)
6784 }
6885
6986 #[ inline( never) ]
70- fn alloc_slow ( & mut self , bytes : usize , align : usize ) -> * mut u8 {
87+ fn alloc_slow ( & mut self , bytes : usize , align : usize ) -> usize {
7188 self . alloc_slow_inline ( bytes, align)
7289 }
7390}
@@ -80,7 +97,7 @@ unsafe impl GlobalAlloc for BumpPointer {
8097 unsafe fn alloc ( & self , layout : Layout ) -> * mut u8 {
8198 let ( bytes, align) = ( layout. size ( ) , layout. align ( ) ) ;
8299 let ptr = LOCAL_ALLOCATOR . alloc ( bytes, align) ;
83- ptr
100+ core :: ptr:: with_exposed_provenance_mut ( ptr )
84101 }
85102
86103 #[ inline( always) ]
0 commit comments