Skip to content

Commit ddb901b

Browse files
committed
prevent from deadlock in DP bucket_can_pool()
1 parent 6932964 commit ddb901b

File tree

1 file changed

+34
-31
lines changed

1 file changed

+34
-31
lines changed

src/pool/pool_disjoint.c

Lines changed: 34 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#include "provider/provider_tracking.h"
2121
#include "uthash/utlist.h"
2222
#include "utils_common.h"
23+
#include "utils_concurrency.h"
2324
#include "utils_log.h"
2425
#include "utils_math.h"
2526

@@ -34,7 +35,6 @@
3435
// Forward declarations
3536
static void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool);
3637
static bool bucket_can_pool(bucket_t *bucket);
37-
static void bucket_decrement_pool(bucket_t *bucket);
3838
static slab_list_item_t *bucket_get_avail_slab(bucket_t *bucket,
3939
bool *from_pool);
4040

@@ -316,6 +316,7 @@ static void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *slab,
316316
assert(slab_it->val != NULL);
317317
pool_unregister_slab(bucket->pool, slab_it->val);
318318
DL_DELETE(bucket->available_slabs, slab_it);
319+
assert(bucket->available_slabs_num > 0);
319320
bucket->available_slabs_num--;
320321
destroy_slab(slab_it->val);
321322
}
@@ -381,10 +382,20 @@ static slab_list_item_t *bucket_get_avail_slab(bucket_t *bucket,
381382
// Allocation from existing slab is treated as from pool for statistics.
382383
*from_pool = true;
383384
if (slab->num_chunks_allocated == 0) {
385+
assert(bucket->chunked_slabs_in_pool > 0);
386+
#ifndef NDEBUG
387+
uint64_t total_size_check;
388+
utils_atomic_load_acquire_u64(&bucket->shared_limits->total_size,
389+
&total_size_check);
390+
assert(total_size_check >= bucket_slab_alloc_size(bucket));
391+
#endif
384392
// If this was an empty slab, it was in the pool.
385393
// Now it is no longer in the pool, so update count.
386394
--bucket->chunked_slabs_in_pool;
387-
bucket_decrement_pool(bucket);
395+
ALIGNED_8 uint64_t size_to_add = bucket_slab_alloc_size(bucket);
396+
utils_fetch_and_sub_u64(&bucket->shared_limits->total_size,
397+
size_to_add);
398+
bucket_update_stats(bucket, 1, -1);
388399
}
389400
}
390401

@@ -420,36 +431,26 @@ static void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool) {
420431
in_pool * bucket_slab_alloc_size(bucket);
421432
}
422433

423-
static void bucket_decrement_pool(bucket_t *bucket) {
424-
bucket_update_stats(bucket, 1, -1);
425-
utils_fetch_and_add64(&bucket->shared_limits->total_size,
426-
-(long long)bucket_slab_alloc_size(bucket));
427-
}
428-
429434
static bool bucket_can_pool(bucket_t *bucket) {
430435
size_t new_free_slabs_in_bucket;
431436

432437
new_free_slabs_in_bucket = bucket->chunked_slabs_in_pool + 1;
433438

434439
// we keep at most params.capacity slabs in the pool
435440
if (bucket_max_pooled_slabs(bucket) >= new_free_slabs_in_bucket) {
436-
size_t pool_size = 0;
437-
utils_atomic_load_acquire(&bucket->shared_limits->total_size,
438-
&pool_size);
439-
while (true) {
440-
size_t new_pool_size = pool_size + bucket_slab_alloc_size(bucket);
441-
442-
if (bucket->shared_limits->max_size < new_pool_size) {
443-
break;
444-
}
445-
446-
if (utils_compare_exchange(&bucket->shared_limits->total_size,
447-
&pool_size, &new_pool_size)) {
448-
++bucket->chunked_slabs_in_pool;
449-
450-
bucket_update_stats(bucket, -1, 1);
451-
return true;
452-
}
441+
442+
ALIGNED_8 uint64_t size_to_add = bucket_slab_alloc_size(bucket);
443+
size_t previous_size = utils_fetch_and_add_u64(
444+
&bucket->shared_limits->total_size, size_to_add);
445+
446+
if (previous_size + size_to_add <= bucket->shared_limits->max_size) {
447+
448+
++bucket->chunked_slabs_in_pool;
449+
bucket_update_stats(bucket, -1, 1);
450+
return true;
451+
} else {
452+
utils_fetch_and_sub_u64(&bucket->shared_limits->total_size,
453+
size_to_add);
453454
}
454455
}
455456

@@ -523,8 +524,8 @@ static void disjoint_pool_print_stats(disjoint_pool_t *pool) {
523524
utils_mutex_unlock(&bucket->bucket_lock);
524525
}
525526

526-
LOG_DEBUG("current pool size: %zu",
527-
disjoint_pool_get_limits(pool)->total_size);
527+
LOG_DEBUG("current pool size: %llu",
528+
(unsigned long long)disjoint_pool_get_limits(pool)->total_size);
528529
LOG_DEBUG("suggested setting=;%c%s:%zu,%zu,64K", (char)tolower(name[0]),
529530
(name + 1), high_bucket_size, high_peak_slabs_in_use);
530531
}
@@ -839,11 +840,12 @@ umf_result_t disjoint_pool_free(void *pool, void *ptr) {
839840

840841
if (disjoint_pool->params.pool_trace > 2) {
841842
const char *name = disjoint_pool->params.name;
842-
LOG_DEBUG("freed %s %p to %s, current total pool size: %zu, current "
843+
LOG_DEBUG("freed %s %p to %s, current total pool size: %llu, current "
843844
"pool size for %s: %zu",
844845
name, ptr, (to_pool ? "pool" : "provider"),
845-
disjoint_pool_get_limits(disjoint_pool)->total_size, name,
846-
disjoint_pool->params.cur_pool_size);
846+
(unsigned long long)disjoint_pool_get_limits(disjoint_pool)
847+
->total_size,
848+
name, disjoint_pool->params.cur_pool_size);
847849
}
848850

849851
return UMF_RESULT_SUCCESS;
@@ -895,7 +897,8 @@ umf_memory_pool_ops_t *umfDisjointPoolOps(void) {
895897

896898
umf_disjoint_pool_shared_limits_t *
897899
umfDisjointPoolSharedLimitsCreate(size_t max_size) {
898-
umf_disjoint_pool_shared_limits_t *ptr = umf_ba_global_alloc(sizeof(*ptr));
900+
umf_disjoint_pool_shared_limits_t *ptr =
901+
umf_ba_global_aligned_alloc(sizeof(*ptr), 8);
899902
if (ptr == NULL) {
900903
LOG_ERR("cannot allocate memory for disjoint pool shared limits");
901904
return NULL;

0 commit comments

Comments
 (0)