@@ -93,8 +93,12 @@ slab_t *create_slab(bucket_t *bucket) {
9393 memset (slab -> chunks , 0 , sizeof (bool ) * slab -> num_chunks );
9494
9595 slab -> slab_size = bucket_slab_alloc_size (bucket );
96- umf_result_t res = umfMemoryProviderAlloc (
97- bucket_get_mem_handle (bucket ), slab -> slab_size , 0 , & slab -> mem_ptr );
96+
97+ // NOTE: originally slabs memory were allocated without alignment
98+ // with this registering a slab is simpler and doesn't require multimap
99+ umf_result_t res =
100+ umfMemoryProviderAlloc (bucket_get_mem_handle (bucket ), slab -> slab_size ,
101+ bucket_slab_min_size (bucket ), & slab -> mem_ptr );
98102
99103 if (res == UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ) {
100104 destroy_slab (slab );
@@ -115,6 +119,8 @@ void destroy_slab(slab_t *slab) {
115119 umf_result_t res = umfMemoryProviderFree (
116120 bucket_get_mem_handle (slab -> bucket ), slab -> mem_ptr , slab -> slab_size );
117121 assert (res == UMF_RESULT_SUCCESS );
122+ (void )res ;
123+
118124 umf_ba_global_free (slab -> chunks );
119125 umf_ba_global_free (slab -> iter );
120126 umf_ba_global_free (slab );
@@ -211,19 +217,29 @@ bool slab_has_avail(const slab_t *slab) {
211217
212218void slab_reg (slab_t * slab ) {
213219 bucket_t * bucket = slab_get_bucket (slab );
220+ // NOTE: changed vs original - slab is already aligned to bucket_slab_min_size
221+ // I also decr end_addr by 1
214222 void * start_addr = (void * )ALIGN_DOWN ((size_t )slab_get (slab ),
215223 bucket_slab_min_size (bucket ));
216- void * end_addr = (uint8_t * )(start_addr ) + bucket_slab_min_size (bucket );
224+ void * end_addr = (uint8_t * )(start_addr ) + bucket_slab_min_size (bucket ) - 1 ;
225+
226+ fprintf (stderr , "[DP slab_reg] slab: %p, start: %p, end %p\n" , (void * )slab ,
227+ start_addr , end_addr );
217228
218229 slab_reg_by_addr (start_addr , slab );
219230 slab_reg_by_addr (end_addr , slab );
220231}
221232
222233void slab_unreg (slab_t * slab ) {
223234 bucket_t * bucket = slab_get_bucket (slab );
235+ // NOTE: changed vs original - slab is already aligned to bucket_slab_min_size
236+ // I also decr end_addr by 1
224237 void * start_addr = (void * )ALIGN_DOWN ((size_t )slab_get (slab ),
225238 bucket_slab_min_size (bucket ));
226- void * end_addr = (uint8_t * )(start_addr ) + bucket_slab_min_size (bucket );
239+ void * end_addr = (uint8_t * )(start_addr ) + bucket_slab_min_size (bucket ) - 1 ;
240+
241+ fprintf (stderr , "[DP slab_unreg] slab: %p, start: %p, end %p\n" ,
242+ (void * )slab , start_addr , end_addr );
227243
228244 slab_unreg_by_addr (start_addr , slab );
229245 slab_unreg_by_addr (end_addr , slab );
@@ -485,6 +501,7 @@ void bucket_decrement_pool(bucket_t *bucket, bool *from_pool) {
485501
486502bool bucket_can_pool (bucket_t * bucket , bool * to_pool ) {
487503 size_t NewFreeSlabsInBucket ;
504+
488505 // Check if this bucket is used in chunked form or as full slabs.
489506 bool chunkedBucket =
490507 bucket_get_size (bucket ) <= bucket_chunk_cut_off (bucket );
@@ -497,6 +514,7 @@ bool bucket_can_pool(bucket_t *bucket, bool *to_pool) {
497514 DL_FOREACH (bucket -> AvailableSlabs , it ) { avail_num ++ ; }
498515 NewFreeSlabsInBucket = avail_num + 1 ;
499516 }
517+
500518 if (bucket_capacity (bucket ) >= NewFreeSlabsInBucket ) {
501519 size_t pool_size = bucket -> shared_limits -> total_size ;
502520 while (true) {
0 commit comments