Skip to content

Commit 75dea95

Browse files
lplewabratpiorka
authored andcommitted
remove distiguation between "chunked slab" and "full slab"
Instead having two modes - we can have only chunked slabs and full slabs are just a chunked slab with one chunk. This removes extra complexity in the code. Should not have performance impact, as we added few extra steps for big allocations, but removed extra branch in the code. Signed-off-by: Łukasz Plewa <[email protected]>
1 parent 9ec909e commit 75dea95

File tree

1 file changed

+23
-101
lines changed

1 file changed

+23
-101
lines changed

src/pool/pool_disjoint.c

Lines changed: 23 additions & 101 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
// Forward declarations
1919
static void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool);
2020
static bool bucket_can_pool(bucket_t *bucket);
21-
static void bucket_decrement_pool(bucket_t *bucket, bool *from_pool);
21+
static void bucket_decrement_pool(bucket_t *bucket);
2222
static slab_list_item_t *bucket_get_avail_slab(bucket_t *bucket,
2323
bool *from_pool);
2424

@@ -52,7 +52,7 @@ static size_t bucket_slab_alloc_size(bucket_t *bucket) {
5252
return utils_max(bucket->size, bucket_slab_min_size(bucket));
5353
}
5454

55-
static slab_t *create_slab(bucket_t *bucket, bool full_size) {
55+
static slab_t *create_slab(bucket_t *bucket) {
5656
assert(bucket);
5757

5858
umf_result_t res = UMF_RESULT_SUCCESS;
@@ -76,19 +76,16 @@ static slab_t *create_slab(bucket_t *bucket, bool full_size) {
7676
slab->iter->val = slab;
7777
slab->iter->prev = slab->iter->next = NULL;
7878

79-
if (full_size) {
80-
slab->num_chunks_total = 0;
81-
slab->chunks = NULL;
82-
} else {
83-
slab->num_chunks_total = bucket_slab_min_size(bucket) / bucket->size;
84-
slab->chunks =
85-
umf_ba_global_alloc(sizeof(*slab->chunks) * slab->num_chunks_total);
86-
if (slab->chunks == NULL) {
87-
LOG_ERR("allocation of slab chunks failed!");
88-
goto free_slab_iter;
89-
}
90-
memset(slab->chunks, 0, sizeof(*slab->chunks) * slab->num_chunks_total);
79+
slab->num_chunks_total =
80+
utils_max(bucket_slab_min_size(bucket) / bucket->size, 1);
81+
slab->chunks =
82+
umf_ba_global_alloc(sizeof(*slab->chunks) * slab->num_chunks_total);
83+
if (slab->chunks == NULL) {
84+
LOG_ERR("allocation of slab chunks failed!");
85+
goto free_slab_iter;
9186
}
87+
memset(slab->chunks, 0, sizeof(*slab->chunks) * slab->num_chunks_total);
88+
9289
// if slab_min_size is not a multiple of bucket size, we would have some
9390
// padding at the end of the slab
9491
slab->slab_size = bucket_slab_alloc_size(bucket);
@@ -157,9 +154,6 @@ static size_t slab_find_first_available_chunk_idx(const slab_t *slab) {
157154
}
158155

159156
static void *slab_get_chunk(slab_t *slab) {
160-
// slab has to be allocated in chunk mode
161-
assert(slab->chunks && slab->num_chunks_total > 0);
162-
163157
// free chunk must exist, otherwise we would have allocated another slab
164158
const size_t chunk_idx = slab_find_first_available_chunk_idx(slab);
165159
assert(chunk_idx != SIZE_MAX);
@@ -356,8 +350,8 @@ static size_t bucket_chunk_cut_off(bucket_t *bucket) {
356350
return bucket_slab_min_size(bucket) / 2;
357351
}
358352

359-
static slab_t *bucket_create_slab(bucket_t *bucket, bool full_size) {
360-
slab_t *slab = create_slab(bucket, full_size);
353+
static slab_t *bucket_create_slab(bucket_t *bucket) {
354+
slab_t *slab = create_slab(bucket);
361355
if (slab == NULL) {
362356
LOG_ERR("create_slab failed!")
363357
return NULL;
@@ -377,69 +371,20 @@ static slab_t *bucket_create_slab(bucket_t *bucket, bool full_size) {
377371
return slab;
378372
}
379373

380-
static slab_list_item_t *bucket_get_avail_full_slab(bucket_t *bucket,
381-
bool *from_pool) {
382-
// return a slab that will be used for a single allocation
383-
if (bucket->available_slabs == NULL) {
384-
bucket_create_slab(bucket, true /* full size */);
385-
*from_pool = false;
386-
} else {
387-
bucket_decrement_pool(bucket, from_pool);
388-
}
389-
390-
return bucket->available_slabs;
391-
}
392-
393-
// NOTE: this function must be called under bucket->bucket_lock
394-
static void *bucket_get_free_slab(bucket_t *bucket, bool *from_pool) {
395-
slab_list_item_t *slab_it = bucket_get_avail_full_slab(bucket, from_pool);
396-
if (slab_it == NULL) {
397-
return NULL;
398-
}
399-
400-
slab_t *slab = slab_it->val;
401-
void *ptr = slab_get(slab);
402-
403-
DL_DELETE(bucket->available_slabs, slab_it);
404-
bucket->available_slabs_num--;
405-
slab_it->prev = NULL;
406-
DL_PREPEND(bucket->unavailable_slabs, slab_it);
407-
408-
return ptr;
409-
}
410-
411-
// NOTE: this function must be called under bucket->bucket_lock
412-
static void bucket_free_slab(bucket_t *bucket, slab_t *slab, bool *to_pool) {
413-
slab_list_item_t *slab_it = slab->iter;
414-
assert(slab_it->val != NULL);
415-
*to_pool = bucket_can_pool(bucket);
416-
if (*to_pool) {
417-
DL_DELETE(bucket->unavailable_slabs, slab_it);
418-
slab_it->prev = NULL;
419-
DL_PREPEND(bucket->available_slabs, slab_it);
420-
bucket->available_slabs_num++;
421-
} else {
422-
slab_unreg(slab_it->val);
423-
DL_DELETE(bucket->unavailable_slabs, slab_it);
424-
destroy_slab(slab_it->val);
425-
}
426-
}
427-
428374
static slab_list_item_t *bucket_get_avail_slab(bucket_t *bucket,
429375
bool *from_pool) {
430376
if (bucket->available_slabs == NULL) {
431-
bucket_create_slab(bucket, false /* chunked */);
377+
bucket_create_slab(bucket);
432378
*from_pool = false;
433379
} else {
434380
slab_t *slab = bucket->available_slabs->val;
381+
// Allocation from existing slab is treated as from pool for statistics.
382+
*from_pool = true;
435383
if (slab->num_chunks_allocated == 0) {
436384
// If this was an empty slab, it was in the pool.
437385
// Now it is no longer in the pool, so update count.
438386
--bucket->chunked_slabs_in_pool;
439-
bucket_decrement_pool(bucket, from_pool);
440-
} else {
441-
// Allocation from existing slab is treated as from pool for statistics.
442-
*from_pool = true;
387+
bucket_decrement_pool(bucket);
443388
}
444389
}
445390

@@ -475,10 +420,7 @@ static void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool) {
475420
in_pool * bucket_slab_alloc_size(bucket);
476421
}
477422

478-
static void bucket_decrement_pool(bucket_t *bucket, bool *from_pool) {
479-
// If a slab was available in the pool then note that the current pooled
480-
// size has reduced by the size of a slab in this bucket.
481-
*from_pool = true;
423+
static void bucket_decrement_pool(bucket_t *bucket) {
482424
bucket_update_stats(bucket, 1, -1);
483425
utils_fetch_and_add64(&bucket->shared_limits->total_size,
484426
-(long long)bucket_slab_alloc_size(bucket));
@@ -487,13 +429,7 @@ static void bucket_decrement_pool(bucket_t *bucket, bool *from_pool) {
487429
static bool bucket_can_pool(bucket_t *bucket) {
488430
size_t new_free_slabs_in_bucket;
489431

490-
// check if this bucket is used in chunked form or as full slabs
491-
bool chunked_bucket = bucket->size <= bucket_chunk_cut_off(bucket);
492-
if (chunked_bucket) {
493-
new_free_slabs_in_bucket = bucket->chunked_slabs_in_pool + 1;
494-
} else {
495-
new_free_slabs_in_bucket = bucket->available_slabs_num + 1;
496-
}
432+
new_free_slabs_in_bucket = bucket->chunked_slabs_in_pool + 1;
497433

498434
// we keep at most params.capacity slabs in the pool
499435
if (bucket_capacity(bucket) >= new_free_slabs_in_bucket) {
@@ -509,9 +445,7 @@ static bool bucket_can_pool(bucket_t *bucket) {
509445

510446
if (utils_compare_exchange(&bucket->shared_limits->total_size,
511447
&pool_size, &new_pool_size)) {
512-
if (chunked_bucket) {
513-
++bucket->chunked_slabs_in_pool;
514-
}
448+
++bucket->chunked_slabs_in_pool;
515449

516450
bucket_update_stats(bucket, -1, 1);
517451
return true;
@@ -614,11 +548,7 @@ static void *disjoint_pool_allocate(disjoint_pool_t *pool, size_t size) {
614548
utils_mutex_lock(&bucket->bucket_lock);
615549

616550
bool from_pool = false;
617-
if (size > bucket_chunk_cut_off(bucket)) {
618-
ptr = bucket_get_free_slab(bucket, &from_pool);
619-
} else {
620-
ptr = bucket_get_free_chunk(bucket, &from_pool);
621-
}
551+
ptr = bucket_get_free_chunk(bucket, &from_pool);
622552

623553
if (ptr == NULL) {
624554
TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY;
@@ -799,11 +729,7 @@ void *disjoint_pool_aligned_malloc(void *pool, size_t size, size_t alignment) {
799729

800730
utils_mutex_lock(&bucket->bucket_lock);
801731

802-
if (aligned_size > bucket_chunk_cut_off(bucket)) {
803-
ptr = bucket_get_free_slab(bucket, &from_pool);
804-
} else {
805-
ptr = bucket_get_free_chunk(bucket, &from_pool);
806-
}
732+
ptr = bucket_get_free_chunk(bucket, &from_pool);
807733

808734
if (ptr == NULL) {
809735
TLS_last_allocation_error = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY;
@@ -889,11 +815,7 @@ umf_result_t disjoint_pool_free(void *pool, void *ptr) {
889815
utils_mutex_lock(&bucket->bucket_lock);
890816

891817
utils_annotate_memory_inaccessible(ptr, bucket->size);
892-
if (bucket->size <= bucket_chunk_cut_off(bucket)) {
893-
bucket_free_chunk(bucket, ptr, slab, &to_pool);
894-
} else {
895-
bucket_free_slab(bucket, slab, &to_pool);
896-
}
818+
bucket_free_chunk(bucket, ptr, slab, &to_pool);
897819

898820
if (disjoint_pool->params.pool_trace > 1) {
899821
bucket->free_count++;

0 commit comments

Comments
 (0)