18
18
// Forward declarations
19
19
static void bucket_update_stats (bucket_t * bucket , int in_use , int in_pool );
20
20
static bool bucket_can_pool (bucket_t * bucket );
21
- static void bucket_decrement_pool (bucket_t * bucket , bool * from_pool );
21
+ static void bucket_decrement_pool (bucket_t * bucket );
22
22
static slab_list_item_t * bucket_get_avail_slab (bucket_t * bucket ,
23
23
bool * from_pool );
24
24
@@ -52,7 +52,7 @@ static size_t bucket_slab_alloc_size(bucket_t *bucket) {
52
52
return utils_max (bucket -> size , bucket_slab_min_size (bucket ));
53
53
}
54
54
55
- static slab_t * create_slab (bucket_t * bucket , bool full_size ) {
55
+ static slab_t * create_slab (bucket_t * bucket ) {
56
56
assert (bucket );
57
57
58
58
umf_result_t res = UMF_RESULT_SUCCESS ;
@@ -76,19 +76,16 @@ static slab_t *create_slab(bucket_t *bucket, bool full_size) {
76
76
slab -> iter -> val = slab ;
77
77
slab -> iter -> prev = slab -> iter -> next = NULL ;
78
78
79
- if (full_size ) {
80
- slab -> num_chunks_total = 0 ;
81
- slab -> chunks = NULL ;
82
- } else {
83
- slab -> num_chunks_total = bucket_slab_min_size (bucket ) / bucket -> size ;
84
- slab -> chunks =
85
- umf_ba_global_alloc (sizeof (* slab -> chunks ) * slab -> num_chunks_total );
86
- if (slab -> chunks == NULL ) {
87
- LOG_ERR ("allocation of slab chunks failed!" );
88
- goto free_slab_iter ;
89
- }
90
- memset (slab -> chunks , 0 , sizeof (* slab -> chunks ) * slab -> num_chunks_total );
79
+ slab -> num_chunks_total =
80
+ utils_max (bucket_slab_min_size (bucket ) / bucket -> size , 1 );
81
+ slab -> chunks =
82
+ umf_ba_global_alloc (sizeof (* slab -> chunks ) * slab -> num_chunks_total );
83
+ if (slab -> chunks == NULL ) {
84
+ LOG_ERR ("allocation of slab chunks failed!" );
85
+ goto free_slab_iter ;
91
86
}
87
+ memset (slab -> chunks , 0 , sizeof (* slab -> chunks ) * slab -> num_chunks_total );
88
+
92
89
// if slab_min_size is not a multiple of bucket size, we would have some
93
90
// padding at the end of the slab
94
91
slab -> slab_size = bucket_slab_alloc_size (bucket );
@@ -157,9 +154,6 @@ static size_t slab_find_first_available_chunk_idx(const slab_t *slab) {
157
154
}
158
155
159
156
static void * slab_get_chunk (slab_t * slab ) {
160
- // slab has to be allocated in chunk mode
161
- assert (slab -> chunks && slab -> num_chunks_total > 0 );
162
-
163
157
// free chunk must exist, otherwise we would have allocated another slab
164
158
const size_t chunk_idx = slab_find_first_available_chunk_idx (slab );
165
159
assert (chunk_idx != SIZE_MAX );
@@ -356,8 +350,8 @@ static size_t bucket_chunk_cut_off(bucket_t *bucket) {
356
350
return bucket_slab_min_size (bucket ) / 2 ;
357
351
}
358
352
359
- static slab_t * bucket_create_slab (bucket_t * bucket , bool full_size ) {
360
- slab_t * slab = create_slab (bucket , full_size );
353
+ static slab_t * bucket_create_slab (bucket_t * bucket ) {
354
+ slab_t * slab = create_slab (bucket );
361
355
if (slab == NULL ) {
362
356
LOG_ERR ("create_slab failed!" )
363
357
return NULL ;
@@ -377,69 +371,20 @@ static slab_t *bucket_create_slab(bucket_t *bucket, bool full_size) {
377
371
return slab ;
378
372
}
379
373
380
- static slab_list_item_t * bucket_get_avail_full_slab (bucket_t * bucket ,
381
- bool * from_pool ) {
382
- // return a slab that will be used for a single allocation
383
- if (bucket -> available_slabs == NULL ) {
384
- bucket_create_slab (bucket , true /* full size */ );
385
- * from_pool = false;
386
- } else {
387
- bucket_decrement_pool (bucket , from_pool );
388
- }
389
-
390
- return bucket -> available_slabs ;
391
- }
392
-
393
- // NOTE: this function must be called under bucket->bucket_lock
394
- static void * bucket_get_free_slab (bucket_t * bucket , bool * from_pool ) {
395
- slab_list_item_t * slab_it = bucket_get_avail_full_slab (bucket , from_pool );
396
- if (slab_it == NULL ) {
397
- return NULL ;
398
- }
399
-
400
- slab_t * slab = slab_it -> val ;
401
- void * ptr = slab_get (slab );
402
-
403
- DL_DELETE (bucket -> available_slabs , slab_it );
404
- bucket -> available_slabs_num -- ;
405
- slab_it -> prev = NULL ;
406
- DL_PREPEND (bucket -> unavailable_slabs , slab_it );
407
-
408
- return ptr ;
409
- }
410
-
411
- // NOTE: this function must be called under bucket->bucket_lock
412
- static void bucket_free_slab (bucket_t * bucket , slab_t * slab , bool * to_pool ) {
413
- slab_list_item_t * slab_it = slab -> iter ;
414
- assert (slab_it -> val != NULL );
415
- * to_pool = bucket_can_pool (bucket );
416
- if (* to_pool ) {
417
- DL_DELETE (bucket -> unavailable_slabs , slab_it );
418
- slab_it -> prev = NULL ;
419
- DL_PREPEND (bucket -> available_slabs , slab_it );
420
- bucket -> available_slabs_num ++ ;
421
- } else {
422
- slab_unreg (slab_it -> val );
423
- DL_DELETE (bucket -> unavailable_slabs , slab_it );
424
- destroy_slab (slab_it -> val );
425
- }
426
- }
427
-
428
374
static slab_list_item_t * bucket_get_avail_slab (bucket_t * bucket ,
429
375
bool * from_pool ) {
430
376
if (bucket -> available_slabs == NULL ) {
431
- bucket_create_slab (bucket , false /* chunked */ );
377
+ bucket_create_slab (bucket );
432
378
* from_pool = false;
433
379
} else {
434
380
slab_t * slab = bucket -> available_slabs -> val ;
381
+ // Allocation from existing slab is treated as from pool for statistics.
382
+ * from_pool = true;
435
383
if (slab -> num_chunks_allocated == 0 ) {
436
384
// If this was an empty slab, it was in the pool.
437
385
// Now it is no longer in the pool, so update count.
438
386
-- bucket -> chunked_slabs_in_pool ;
439
- bucket_decrement_pool (bucket , from_pool );
440
- } else {
441
- // Allocation from existing slab is treated as from pool for statistics.
442
- * from_pool = true;
387
+ bucket_decrement_pool (bucket );
443
388
}
444
389
}
445
390
@@ -475,10 +420,7 @@ static void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool) {
475
420
in_pool * bucket_slab_alloc_size (bucket );
476
421
}
477
422
478
- static void bucket_decrement_pool (bucket_t * bucket , bool * from_pool ) {
479
- // If a slab was available in the pool then note that the current pooled
480
- // size has reduced by the size of a slab in this bucket.
481
- * from_pool = true;
423
+ static void bucket_decrement_pool (bucket_t * bucket ) {
482
424
bucket_update_stats (bucket , 1 , -1 );
483
425
utils_fetch_and_add64 (& bucket -> shared_limits -> total_size ,
484
426
- (long long )bucket_slab_alloc_size (bucket ));
@@ -487,13 +429,7 @@ static void bucket_decrement_pool(bucket_t *bucket, bool *from_pool) {
487
429
static bool bucket_can_pool (bucket_t * bucket ) {
488
430
size_t new_free_slabs_in_bucket ;
489
431
490
- // check if this bucket is used in chunked form or as full slabs
491
- bool chunked_bucket = bucket -> size <= bucket_chunk_cut_off (bucket );
492
- if (chunked_bucket ) {
493
- new_free_slabs_in_bucket = bucket -> chunked_slabs_in_pool + 1 ;
494
- } else {
495
- new_free_slabs_in_bucket = bucket -> available_slabs_num + 1 ;
496
- }
432
+ new_free_slabs_in_bucket = bucket -> chunked_slabs_in_pool + 1 ;
497
433
498
434
// we keep at most params.capacity slabs in the pool
499
435
if (bucket_capacity (bucket ) >= new_free_slabs_in_bucket ) {
@@ -509,9 +445,7 @@ static bool bucket_can_pool(bucket_t *bucket) {
509
445
510
446
if (utils_compare_exchange (& bucket -> shared_limits -> total_size ,
511
447
& pool_size , & new_pool_size )) {
512
- if (chunked_bucket ) {
513
- ++ bucket -> chunked_slabs_in_pool ;
514
- }
448
+ ++ bucket -> chunked_slabs_in_pool ;
515
449
516
450
bucket_update_stats (bucket , -1 , 1 );
517
451
return true;
@@ -614,11 +548,7 @@ static void *disjoint_pool_allocate(disjoint_pool_t *pool, size_t size) {
614
548
utils_mutex_lock (& bucket -> bucket_lock );
615
549
616
550
bool from_pool = false;
617
- if (size > bucket_chunk_cut_off (bucket )) {
618
- ptr = bucket_get_free_slab (bucket , & from_pool );
619
- } else {
620
- ptr = bucket_get_free_chunk (bucket , & from_pool );
621
- }
551
+ ptr = bucket_get_free_chunk (bucket , & from_pool );
622
552
623
553
if (ptr == NULL ) {
624
554
TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
@@ -799,11 +729,7 @@ void *disjoint_pool_aligned_malloc(void *pool, size_t size, size_t alignment) {
799
729
800
730
utils_mutex_lock (& bucket -> bucket_lock );
801
731
802
- if (aligned_size > bucket_chunk_cut_off (bucket )) {
803
- ptr = bucket_get_free_slab (bucket , & from_pool );
804
- } else {
805
- ptr = bucket_get_free_chunk (bucket , & from_pool );
806
- }
732
+ ptr = bucket_get_free_chunk (bucket , & from_pool );
807
733
808
734
if (ptr == NULL ) {
809
735
TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
@@ -889,11 +815,7 @@ umf_result_t disjoint_pool_free(void *pool, void *ptr) {
889
815
utils_mutex_lock (& bucket -> bucket_lock );
890
816
891
817
utils_annotate_memory_inaccessible (ptr , bucket -> size );
892
- if (bucket -> size <= bucket_chunk_cut_off (bucket )) {
893
- bucket_free_chunk (bucket , ptr , slab , & to_pool );
894
- } else {
895
- bucket_free_slab (bucket , slab , & to_pool );
896
- }
818
+ bucket_free_chunk (bucket , ptr , slab , & to_pool );
897
819
898
820
if (disjoint_pool -> params .pool_trace > 1 ) {
899
821
bucket -> free_count ++ ;
0 commit comments