@@ -56,6 +56,10 @@ pub struct PoolAllocator<'alloc> {
5656 pub ( crate ) free_cache : Cell < usize > ,
5757 // per size class cached index of the last pool used by alloc_slot
5858 pub ( crate ) alloc_cache : [ Cell < usize > ; 12 ] ,
59+ // empty slot pools kept alive to avoid OS reallocation on the next cycle
60+ pub ( crate ) recycled_pools : Vec < SlotPool > ,
61+ // maximum number of idle pages held across all size classes
62+ pub ( crate ) max_recycled : usize ,
5963 _marker : core:: marker:: PhantomData < & ' alloc ( ) > ,
6064}
6165
@@ -82,6 +86,9 @@ impl<'alloc> Default for PoolAllocator<'alloc> {
8286 Cell :: new ( usize:: MAX ) ,
8387 Cell :: new ( usize:: MAX ) ,
8488 ] ,
89+ recycled_pools : Vec :: new ( ) ,
90+ // one idle page per size class keeps memory pressure manageable
91+ max_recycled : SIZE_CLASSES . len ( ) ,
8592 _marker : core:: marker:: PhantomData ,
8693 }
8794 }
@@ -155,6 +162,29 @@ impl<'alloc> PoolAllocator<'alloc> {
155162 }
156163
157164 // need a new pool for this size class
165+ // try the recycle list first
166+ // to avoid a round trip through the OS allocator
167+ if let Some ( pos) = self
168+ . recycled_pools
169+ . iter ( )
170+ . rposition ( |p| p. slot_size == slot_size)
171+ {
172+ let pool = self . recycled_pools . swap_remove ( pos) ;
173+ // pool.reset() was already called in drop_empty_pools when it was parked
174+ let slot_ptr = pool. alloc_slot ( ) . ok_or ( PoolAllocError :: OutOfMemory ) ?;
175+ let insert_idx = self . slot_pools . len ( ) ;
176+ self . slot_pools . push ( pool) ;
177+ self . alloc_cache [ sc_idx] . set ( insert_idx) ;
178+
179+ // SAFETY: slot_ptr was successfully allocated for this size class
180+ return unsafe {
181+ let dst = slot_ptr. as_ptr ( ) as * mut PoolItem < T > ;
182+ dst. write ( PoolItem ( value) ) ;
183+ Ok ( PoolPointer :: from_raw ( NonNull :: new_unchecked ( dst) ) )
184+ } ;
185+ }
186+
187+ // Recycle list had no match, allocate a fresh page from the OS.
158188 let total = self . page_size . max ( slot_size * 4 ) ;
159189 let new_pool = SlotPool :: try_init ( slot_size, total, 16 ) ?;
160190 self . current_heap_size += new_pool. layout . size ( ) ;
@@ -267,16 +297,22 @@ impl<'alloc> PoolAllocator<'alloc> {
267297 false
268298 }
269299
270- /// drop empty slot pools and bump pages
300+ /// Reclaim slot pool pages that became empty after a GC sweep.
301+ ///
302+ /// Empty pages are parked in a recycle list (up to `max_recycled`)
303+ /// to avoid global allocator round trips on the next allocation.
271304 pub fn drop_empty_pools ( & mut self ) {
272- self . slot_pools . retain ( |p| {
273- if p. run_drop_check ( ) {
274- self . current_heap_size = self . current_heap_size . saturating_sub ( p. layout . size ( ) ) ;
275- false
305+ // Drain fully empty slot pools into the recycle list.
306+ for pool in self . slot_pools . extract_if ( .., |p| p. run_drop_check ( ) ) {
307+ if self . recycled_pools . len ( ) < self . max_recycled {
308+ pool. reset ( ) ;
309+ self . recycled_pools . push ( pool) ;
276310 } else {
277- true
311+ self . current_heap_size = self . current_heap_size . saturating_sub ( pool . layout . size ( ) ) ;
278312 }
279- } ) ;
313+ }
314+
315+ // Bump pages have no size class affinity so we always free them.
280316 self . bump_pages . retain ( |p| {
281317 if p. run_drop_check ( ) {
282318 self . current_heap_size = self . current_heap_size . saturating_sub ( p. layout . size ( ) ) ;
0 commit comments