Skip to content

Commit 4730d57

Browse files
committed
make shared limits C structure
1 parent dbacd44 commit 4730d57

File tree

4 files changed

+118
-74
lines changed

4 files changed

+118
-74
lines changed

src/pool/pool_disjoint.c

Lines changed: 72 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,23 @@ extern "C" {
5151
#endif
5252
}
5353

54+
typedef struct umf_disjoint_pool_shared_limits_t {
55+
size_t max_size;
56+
_Atomic(size_t) total_size;
57+
} umf_disjoint_pool_shared_limits_t;
58+
59+
umf_disjoint_pool_shared_limits_t *shared_limits_create(size_t max_size) {
60+
umf_disjoint_pool_shared_limits_t *ptr =
61+
umf_ba_global_alloc(sizeof(umf_disjoint_pool_shared_limits_t));
62+
ptr->max_size = max_size;
63+
ptr->total_size = 0;
64+
return ptr;
65+
}
66+
67+
void shared_limits_destroy(umf_disjoint_pool_shared_limits_t *shared_limits) {
68+
umf_ba_global_free(shared_limits);
69+
}
70+
5471
size_t bucket_get_size(bucket_t *bucket);
5572

5673
void slab_reg(slab_t *slab);
@@ -212,7 +229,8 @@ void slab_unreg(slab_t *slab) {
212229
slab_unreg_by_addr(end_addr, slab);
213230
}
214231

215-
bucket_t *create_bucket(size_t Sz, void *AllocCtx) {
232+
bucket_t *create_bucket(size_t Sz, void *AllocCtx,
233+
umf_disjoint_pool_shared_limits_t *shared_limits) {
216234
bucket_t *bucket = (bucket_t *)umf_ba_global_alloc(sizeof(bucket_t));
217235

218236
bucket->Size = Sz;
@@ -228,6 +246,9 @@ bucket_t *create_bucket(size_t Sz, void *AllocCtx) {
228246
bucket->allocCount = 0;
229247
bucket->maxSlabsInUse = 0;
230248

249+
bucket->shared_limits = shared_limits;
250+
assert(shared_limits);
251+
231252
utils_mutex_init(&bucket->bucket_lock);
232253

233254
return bucket;
@@ -453,6 +474,56 @@ void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool) {
453474
in_pool * bucket_slab_alloc_size(bucket);
454475
}
455476

477+
// If a slab was available in the pool then note that the current pooled
478+
// size has reduced by the size of a slab in this bucket.
479+
void bucket_decrement_pool(bucket_t *bucket, bool *from_pool) {
480+
*from_pool = true;
481+
bucket_update_stats(bucket, 1, -1);
482+
utils_fetch_and_add64(&bucket->shared_limits->total_size,
483+
-bucket_slab_alloc_size(bucket));
484+
}
485+
486+
bool bucket_can_pool(bucket_t *bucket, bool *to_pool) {
487+
size_t NewFreeSlabsInBucket;
488+
// Check if this bucket is used in chunked form or as full slabs.
489+
bool chunkedBucket =
490+
bucket_get_size(bucket) <= bucket_chunk_cut_off(bucket);
491+
if (chunkedBucket) {
492+
NewFreeSlabsInBucket = bucket->chunkedSlabsInPool + 1;
493+
} else {
494+
// TODO optimize
495+
size_t avail_num = 0;
496+
slab_list_item_t *it = NULL;
497+
DL_FOREACH(bucket->AvailableSlabs, it) { avail_num++; }
498+
NewFreeSlabsInBucket = avail_num + 1;
499+
}
500+
if (bucket_capacity(bucket) >= NewFreeSlabsInBucket) {
501+
size_t pool_size = bucket->shared_limits->total_size;
502+
while (true) {
503+
size_t new_pool_size = pool_size + bucket_slab_alloc_size(bucket);
504+
505+
if (bucket->shared_limits->max_size < new_pool_size) {
506+
break;
507+
}
508+
509+
if (utils_compare_exchange(&bucket->shared_limits->total_size,
510+
&pool_size, &new_pool_size)) {
511+
if (chunkedBucket) {
512+
++bucket->chunkedSlabsInPool;
513+
}
514+
515+
bucket_update_stats(bucket, -1, 1);
516+
*to_pool = true;
517+
return true;
518+
}
519+
}
520+
}
521+
522+
bucket_update_stats(bucket, -1, 0);
523+
*to_pool = false;
524+
return false;
525+
}
526+
456527
#ifdef __cplusplus
457528
}
458529
#endif

src/pool/pool_disjoint.cpp

Lines changed: 19 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,8 @@
3535
#include "utils_math.h"
3636
#include "utils_sanitizers.h"
3737

38+
#include "utils_concurrency.h"
39+
3840
// TODO remove
3941
#ifdef __cplusplus
4042
extern "C" {
@@ -49,11 +51,6 @@ struct slab_t;
4951
#endif
5052
// end TODO remove
5153

52-
typedef struct umf_disjoint_pool_shared_limits_t {
53-
size_t MaxSize;
54-
std::atomic<size_t> TotalSize;
55-
} umf_disjoint_pool_shared_limits_t;
56-
5754
class DisjointPool {
5855
public:
5956
class AllocImpl;
@@ -78,12 +75,12 @@ class DisjointPool {
7875

7976
umf_disjoint_pool_shared_limits_t *
8077
umfDisjointPoolSharedLimitsCreate(size_t MaxSize) {
81-
return new umf_disjoint_pool_shared_limits_t{MaxSize, 0};
78+
return shared_limits_create(MaxSize);
8279
}
8380

8481
void umfDisjointPoolSharedLimitsDestroy(
8582
umf_disjoint_pool_shared_limits_t *limits) {
86-
delete limits;
83+
shared_limits_destroy(limits);
8784
}
8885

8986
// Allocations are a minimum of 4KB/64KB/2MB even when a smaller size is
@@ -125,8 +122,7 @@ class DisjointPool::AllocImpl {
125122
// Configuration for this instance
126123
umf_disjoint_pool_params_t params;
127124

128-
umf_disjoint_pool_shared_limits_t DefaultSharedLimits = {
129-
(std::numeric_limits<size_t>::max)(), 0};
125+
umf_disjoint_pool_shared_limits_t *DefaultSharedLimits;
130126

131127
// Used in algorithm for finding buckets
132128
std::size_t MinBucketSizeExp;
@@ -150,13 +146,14 @@ class DisjointPool::AllocImpl {
150146
Size1 = std::max(Size1, UMF_DISJOINT_POOL_MIN_BUCKET_DEFAULT_SIZE);
151147
// Calculate the exponent for MinBucketSize used for finding buckets.
152148
MinBucketSizeExp = (size_t)log2Utils(Size1);
149+
DefaultSharedLimits = shared_limits_create(SIZE_MAX);
153150
auto Size2 = Size1 + Size1 / 2;
154151
for (; Size2 < CutOff; Size1 *= 2, Size2 *= 2) {
155152
// TODO copy allocimpl
156-
Buckets.push_back(create_bucket(Size1, this));
157-
Buckets.push_back(create_bucket(Size2, this));
153+
Buckets.push_back(create_bucket(Size1, this, this->getLimits()));
154+
Buckets.push_back(create_bucket(Size2, this, this->getLimits()));
158155
}
159-
Buckets.push_back(create_bucket(CutOff, this));
156+
Buckets.push_back(create_bucket(CutOff, this, this->getLimits()));
160157

161158
auto ret = umfMemoryProviderGetMinPageSize(hProvider, nullptr,
162159
&ProviderMinPageSize);
@@ -166,6 +163,8 @@ class DisjointPool::AllocImpl {
166163
}
167164

168165
~AllocImpl() {
166+
// TODO
167+
// destroy DefaultSharedLimits
169168

170169
for (auto it = Buckets.begin(); it != Buckets.end(); it++) {
171170
destroy_bucket(*it);
@@ -196,7 +195,7 @@ class DisjointPool::AllocImpl {
196195
if (params.SharedLimits) {
197196
return params.SharedLimits;
198197
} else {
199-
return &DefaultSharedLimits;
198+
return DefaultSharedLimits;
200199
}
201200
};
202201

@@ -254,14 +253,6 @@ std::ostream &operator<<(std::ostream &Os, slab_t &Slab) {
254253
}
255254
*/
256255

257-
// If a slab was available in the pool then note that the current pooled
258-
// size has reduced by the size of a slab in this bucket.
259-
void bucket_decrement_pool(bucket_t *bucket, bool *FromPool) {
260-
*FromPool = true;
261-
bucket_update_stats(bucket, 1, -1);
262-
bucket_get_limits(bucket)->TotalSize -= bucket_slab_alloc_size(bucket);
263-
}
264-
265256
/*
266257
void Bucket::printStats(bool &TitlePrinted, const std::string &Label) {
267258
if (allocCount) {
@@ -553,7 +544,7 @@ size_t DisjointPool::malloc_usable_size(void *) {
553544
umf_result_t DisjointPool::free(void *ptr) {
554545
bool ToPool;
555546
umf_result_t ret = impl->deallocate(ptr, ToPool);
556-
547+
/*
557548
if (ret == UMF_RESULT_SUCCESS) {
558549
559550
if (impl->getParams().PoolTrace > 2) {
@@ -565,7 +556,7 @@ umf_result_t DisjointPool::free(void *ptr) {
565556
<< ", Current pool size for " << MT << " "
566557
<< impl->getParams().CurPoolSize << "\n";
567558
}
568-
}
559+
}*/
569560
return ret;
570561
}
571562

@@ -577,10 +568,11 @@ DisjointPool::DisjointPool() {}
577568

578569
// Define destructor for use with unique_ptr
579570
DisjointPool::~DisjointPool() {
580-
bool TitlePrinted = false;
581-
size_t HighBucketSize;
582-
size_t HighPeakSlabsInUse;
571+
/*
583572
if (impl->getParams().PoolTrace > 1) {
573+
bool TitlePrinted = false;
574+
size_t HighBucketSize;
575+
size_t HighPeakSlabsInUse;
584576
auto name = impl->getParams().Name;
585577
//try { // cannot throw in destructor
586578
impl->printStats(TitlePrinted, HighBucketSize, HighPeakSlabsInUse,
@@ -596,6 +588,7 @@ DisjointPool::~DisjointPool() {
596588
//} catch (...) { // ignore exceptions
597589
// }
598590
}
591+
*/
599592
}
600593

601594
static umf_memory_pool_ops_t UMF_DISJOINT_POOL_OPS =
@@ -666,47 +659,6 @@ void slab_unreg_by_addr(void *addr, slab_t *slab) {
666659
assert(false && "Slab is not found");
667660
}
668661

669-
bool bucket_can_pool(bucket_t *bucket, bool *ToPool) {
670-
size_t NewFreeSlabsInBucket;
671-
// Check if this bucket is used in chunked form or as full slabs.
672-
bool chunkedBucket =
673-
bucket_get_size(bucket) <= bucket_chunk_cut_off(bucket);
674-
if (chunkedBucket) {
675-
NewFreeSlabsInBucket = bucket->chunkedSlabsInPool + 1;
676-
} else {
677-
// TODO optimize
678-
size_t avail_num = 0;
679-
slab_list_item_t *it = NULL;
680-
DL_FOREACH(bucket->AvailableSlabs, it) { avail_num++; }
681-
NewFreeSlabsInBucket = avail_num + 1;
682-
}
683-
if (bucket_capacity(bucket) >= NewFreeSlabsInBucket) {
684-
size_t PoolSize = bucket_get_limits(bucket)->TotalSize;
685-
while (true) {
686-
size_t NewPoolSize = PoolSize + bucket_slab_alloc_size(bucket);
687-
688-
if (bucket_get_limits(bucket)->MaxSize < NewPoolSize) {
689-
break;
690-
}
691-
692-
if (bucket_get_limits(bucket)->TotalSize.compare_exchange_strong(
693-
PoolSize, NewPoolSize)) {
694-
if (chunkedBucket) {
695-
++bucket->chunkedSlabsInPool;
696-
}
697-
698-
bucket_update_stats(bucket, -1, 1);
699-
*ToPool = true;
700-
return true;
701-
}
702-
}
703-
}
704-
705-
bucket_update_stats(bucket, -1, 0);
706-
*ToPool = false;
707-
return false;
708-
}
709-
710662
#ifdef __cplusplus
711663
}
712664
#endif

src/pool/pool_disjoint_temp.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@
66
void annotate_memory_inaccessible(void *ptr, size_t size);
77
void annotate_memory_undefined(void *ptr, size_t size);
88

9+
umf_disjoint_pool_shared_limits_t *shared_limits_create(size_t max_size);
10+
void shared_limits_destroy(umf_disjoint_pool_shared_limits_t *shared_limits);
11+
912
typedef struct slab_list_item_t slab_list_item_t;
1013

1114
typedef struct bucket_t {
@@ -24,6 +27,8 @@ typedef struct bucket_t {
2427
// routines, slab map and etc.
2528
void *OwnAllocCtx;
2629

30+
umf_disjoint_pool_shared_limits_t *shared_limits;
31+
2732
// For buckets used in chunked mode, a counter of slabs in the pool.
2833
// For allocations that use an entire slab each, the entries in the Available
2934
// list are entries in the pool.Each slab is available for a new
@@ -109,7 +114,8 @@ void slab_reg_by_addr(void *addr, slab_t *slab);
109114
void slab_unreg(slab_t *slab);
110115
void slab_unreg_by_addr(void *addr, slab_t *slab);
111116

112-
bucket_t *create_bucket(size_t sz, void *alloc_ctx);
117+
bucket_t *create_bucket(size_t sz, void *alloc_ctx,
118+
umf_disjoint_pool_shared_limits_t *shared_limits);
113119
void destroy_bucket(bucket_t *bucket);
114120

115121
void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool);

src/utils/utils_concurrency.h

Lines changed: 20 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -61,11 +61,13 @@ int utils_mutex_unlock(utils_mutex_t *mutex);
6161
void utils_init_once(UTIL_ONCE_FLAG *flag, void (*onceCb)(void));
6262

6363
#if defined(_WIN32)
64+
6465
static __inline unsigned char utils_lssb_index(long long value) {
6566
unsigned long ret;
6667
_BitScanForward64(&ret, value);
6768
return (unsigned char)ret;
6869
}
70+
6971
static __inline unsigned char utils_mssb_index(long long value) {
7072
unsigned long ret;
7173
_BitScanReverse64(&ret, value);
@@ -81,15 +83,21 @@ static __inline unsigned char utils_mssb_index(long long value) {
8183

8284
#define utils_atomic_store_release(object, desired) \
8385
InterlockedExchange64((LONG64 volatile *)object, (LONG64)desired)
86+
8487
#define utils_atomic_increment(object) \
8588
InterlockedIncrement64((LONG64 volatile *)object)
89+
8690
#define utils_atomic_decrement(object) \
8791
InterlockedDecrement64((LONG64 volatile *)object)
92+
8893
#define utils_fetch_and_add64(ptr, value) \
8994
InterlockedExchangeAdd64((LONG64 *)(ptr), value)
90-
#else
95+
96+
#else // !defined(_WIN32)
97+
9198
#define utils_lssb_index(x) ((unsigned char)__builtin_ctzll(x))
9299
#define utils_mssb_index(x) ((unsigned char)(63 - __builtin_clzll(x)))
100+
93101
#define utils_atomic_load_acquire(object, dest) \
94102
do { \
95103
utils_annotate_acquire((void *)object); \
@@ -103,12 +111,19 @@ static __inline unsigned char utils_mssb_index(long long value) {
103111
} while (0)
104112

105113
#define utils_atomic_increment(object) \
106-
__atomic_add_fetch(object, 1, __ATOMIC_ACQ_REL)
114+
__atomic_add_fetch(object, 1, memory_order_acq_rel)
115+
107116
#define utils_atomic_decrement(object) \
108-
__atomic_sub_fetch(object, 1, __ATOMIC_ACQ_REL)
109-
#define utils_fetch_and_add64 __sync_fetch_and_add
117+
__atomic_sub_fetch(object, 1, memory_order_acq_rel)
110118

111-
#endif
119+
#define utils_fetch_and_add64(object, value) \
120+
__atomic_fetch_add(object, value, memory_order_acq_rel)
121+
122+
#define utils_compare_exchange(object, expected, desired) \
123+
__atomic_compare_exchange(object, expected, desired, 0 /* strong */, \
124+
memory_order_acq_rel, memory_order_relaxed)
125+
126+
#endif // !defined(_WIN32)
112127

113128
#ifdef __cplusplus
114129
}

0 commit comments

Comments
 (0)