Skip to content

Commit 97df9e5

Browse files
committed
use list implementation from utlist
1 parent 7f7dbb5 commit 97df9e5

File tree

3 files changed

+82
-87
lines changed

3 files changed

+82
-87
lines changed

src/pool/pool_disjoint.c

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ umf_memory_provider_handle_t bucket_get_provider(const bucket_t bucket);
5858
void slab_reg(slab_t *slab);
5959
void slab_unreg(slab_t *slab);
6060

61-
slab_t *create_slab(bucket_t bucket, size_t iter_size) {
61+
slab_t *create_slab(bucket_t bucket) {
6262
// In case bucket size is not a multiple of SlabMinSize, we would have
6363
// some padding at the end of the slab.
6464
slab_t *slab = umf_ba_global_alloc(sizeof(slab_t));
@@ -67,9 +67,11 @@ slab_t *create_slab(bucket_t bucket, size_t iter_size) {
6767
slab->num_allocated = 0;
6868
slab->first_free_chunk_idx = 0;
6969
slab->bucket = bucket;
70-
slab->slab_list_iter = umf_ba_global_alloc(iter_size);
71-
slab->slab_list_iter_size = iter_size;
72-
memset(slab->slab_list_iter, 0, iter_size);
70+
71+
slab->iter =
72+
(slab_list_item_t *)umf_ba_global_alloc(sizeof(slab_list_item_t));
73+
slab->iter->val = slab;
74+
slab->iter->prev = slab->iter->next = NULL;
7375

7476
slab->num_chunks =
7577
bucket_get_slab_min_size(bucket) / bucket_get_size(bucket);
@@ -100,7 +102,7 @@ void destroy_slab(slab_t *slab) {
100102
slab->mem_ptr, slab->slab_size);
101103
assert(res == UMF_RESULT_SUCCESS);
102104
umf_ba_global_free(slab->chunks);
103-
umf_ba_global_free(slab->slab_list_iter);
105+
umf_ba_global_free(slab->iter);
104106
umf_ba_global_free(slab);
105107
}
106108

@@ -171,7 +173,8 @@ void slab_free_chunk(slab_t *slab, void *ptr) {
171173

172174
// Even if the pointer p was previously aligned, it's still inside the
173175
// corresponding chunk, so we get the correct index here.
174-
size_t chunk_idx = (ptr - slab->mem_ptr) / slab_get_chunk_size(slab);
176+
size_t chunk_idx =
177+
((uint8_t *)ptr - (uint8_t *)slab->mem_ptr) / slab_get_chunk_size(slab);
175178

176179
// Make sure that the chunk was allocated
177180
assert(slab->chunks[chunk_idx] && "double free detected");
@@ -192,11 +195,6 @@ bool slab_has_avail(const slab_t *slab) {
192195
return slab->num_allocated != slab->num_chunks;
193196
}
194197

195-
void *slab_get_iterator(const slab_t *slab) { return slab->slab_list_iter; }
196-
void slab_set_iterator(slab_t *slab, void *it) {
197-
memcpy(slab->slab_list_iter, it, slab->slab_list_iter_size);
198-
}
199-
200198
#ifdef __cplusplus
201199
}
202200
#endif

src/pool/pool_disjoint.cpp

Lines changed: 65 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@
2424

2525
#include "provider/provider_tracking.h"
2626

27+
#include "uthash/utlist.h"
28+
2729
#include "../cpp_helpers.hpp"
2830
#include "pool_disjoint.h"
2931
#include "umf.h"
@@ -112,10 +114,10 @@ class Bucket {
112114
const size_t Size;
113115

114116
// List of slabs which have at least 1 available chunk.
115-
std::list<slab_t *> AvailableSlabs;
117+
slab_list_item_t *AvailableSlabs;
116118

117119
// List of slabs with 0 available chunk.
118-
std::list<slab_t *> UnavailableSlabs;
120+
slab_list_item_t *UnavailableSlabs;
119121

120122
// Protects the bucket and all the corresponding slabs
121123
std::mutex BucketLock;
@@ -156,21 +158,25 @@ class Bucket {
156158
size_t maxSlabsInUse;
157159

158160
Bucket(size_t Sz, DisjointPool::AllocImpl &AllocCtx)
159-
: Size{Sz}, OwnAllocCtx{AllocCtx}, chunkedSlabsInPool(0),
160-
allocPoolCount(0), freeCount(0), currSlabsInUse(0),
161-
currSlabsInPool(0), maxSlabsInPool(0), allocCount(0),
162-
maxSlabsInUse(0) {}
161+
: Size{Sz}, OwnAllocCtx{AllocCtx} {
162+
AvailableSlabs = NULL;
163+
UnavailableSlabs = NULL;
164+
chunkedSlabsInPool = 0;
165+
allocPoolCount = 0;
166+
freeCount = 0;
167+
currSlabsInUse = 0;
168+
currSlabsInPool = 0;
169+
maxSlabsInPool = 0;
170+
allocCount = 0;
171+
maxSlabsInUse = 0;
172+
}
163173

164174
~Bucket() {
165-
for (auto it = AvailableSlabs.begin(); it != AvailableSlabs.end();
166-
it++) {
167-
destroy_slab(*it);
168-
}
169-
170-
for (auto it = UnavailableSlabs.begin(); it != UnavailableSlabs.end();
171-
it++) {
172-
destroy_slab(*it);
173-
}
175+
slab_list_item_t *it = NULL, *tmp = NULL;
176+
// TODO check eng
177+
// use extra tmp to store next iterator before the slab is destroyed
178+
LL_FOREACH_SAFE(AvailableSlabs, it, tmp) { destroy_slab(it->val); }
179+
LL_FOREACH_SAFE(UnavailableSlabs, it, tmp) { destroy_slab(it->val); }
174180
}
175181

176182
// Get pointer to allocation that is one piece of an available slab in this
@@ -231,10 +237,10 @@ class Bucket {
231237
void decrementPool(bool &FromPool);
232238

233239
// Get a slab to be used for chunked allocations.
234-
decltype(AvailableSlabs.begin()) getAvailSlab(bool &FromPool);
240+
slab_list_item_t *getAvailSlab(bool &FromPool);
235241

236242
// Get a slab that will be used as a whole for a single allocation.
237-
decltype(AvailableSlabs.begin()) getAvailFullSlab(bool &FromPool);
243+
slab_list_item_t *getAvailFullSlab(bool &FromPool);
238244
};
239245

240246
class DisjointPool::AllocImpl {
@@ -374,75 +380,65 @@ void Bucket::decrementPool(bool &FromPool) {
374380
OwnAllocCtx.getLimits()->TotalSize -= SlabAllocSize();
375381
}
376382

377-
std::list<slab_t *>::iterator Bucket::getAvailFullSlab(bool &FromPool) {
383+
slab_list_item_t *Bucket::getAvailFullSlab(bool &FromPool) {
378384
// Return a slab that will be used for a single allocation.
379-
if (AvailableSlabs.size() == 0) {
380-
slab_t *slab = create_slab((bucket_t *)this,
381-
sizeof(std::list<slab_t *>::iterator));
385+
if (AvailableSlabs == NULL) {
386+
slab_t *slab = create_slab((bucket_t *)this);
382387
if (slab == NULL) {
383388
throw MemoryProviderError{UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY};
384389
}
385390

386391
slab_reg(slab);
387-
auto It = AvailableSlabs.insert(AvailableSlabs.begin(), slab);
388-
slab_set_iterator(slab, &It);
389-
392+
DL_PREPEND(AvailableSlabs, slab->iter);
390393
FromPool = false;
391394
updateStats(1, 0);
392395
} else {
393396
decrementPool(FromPool);
394397
}
395398

396-
return AvailableSlabs.begin();
399+
return AvailableSlabs;
397400
}
398401

399402
void *Bucket::getSlab(bool &FromPool) {
400403
std::lock_guard<std::mutex> Lg(BucketLock);
401404

402-
auto SlabIt = getAvailFullSlab(FromPool);
403-
slab_t *slab = *SlabIt;
404-
405+
slab_list_item_t *slab_it = getAvailFullSlab(FromPool);
406+
slab_t *slab = slab_it->val;
405407
void *ptr = slab_get(slab);
406-
auto It = UnavailableSlabs.insert(UnavailableSlabs.begin(), slab);
407-
AvailableSlabs.erase(SlabIt);
408-
slab_set_iterator(slab, &It);
408+
409+
DL_DELETE(AvailableSlabs, slab_it);
410+
DL_PREPEND(UnavailableSlabs, slab_it);
409411
return ptr;
410412
}
411413

412414
void Bucket::freeSlab(slab_t *slab, bool &ToPool) {
413415
std::lock_guard<std::mutex> Lg(BucketLock);
414416

415-
auto SlabIter = *(std::list<slab_t *>::iterator *)slab_get_iterator(slab);
416-
assert(SlabIter != UnavailableSlabs.end());
417+
slab_list_item_t *slab_it = slab->iter;
418+
assert(slab_it->val != NULL);
417419
if (CanPool(ToPool)) {
418-
auto It =
419-
AvailableSlabs.insert(AvailableSlabs.begin(), std::move(*SlabIter));
420-
UnavailableSlabs.erase(SlabIter);
421-
slab_set_iterator(*It, &It);
420+
DL_DELETE(UnavailableSlabs, slab_it);
421+
DL_PREPEND(AvailableSlabs, slab_it);
422422
} else {
423-
slab_unreg(*SlabIter);
424-
destroy_slab(*SlabIter);
425-
UnavailableSlabs.erase(SlabIter);
423+
slab_unreg(slab_it->val);
424+
DL_DELETE(UnavailableSlabs, slab_it);
425+
destroy_slab(slab_it->val);
426426
}
427427
}
428428

429-
auto Bucket::getAvailSlab(bool &FromPool) -> decltype(AvailableSlabs.begin()) {
430-
431-
if (AvailableSlabs.size() == 0) {
432-
slab_t *slab = create_slab((bucket_t *)this,
433-
sizeof(std::list<slab_t *>::iterator));
429+
slab_list_item_t *Bucket::getAvailSlab(bool &FromPool) {
430+
if (AvailableSlabs == NULL) {
431+
slab_t *slab = create_slab((bucket_t *)this);
434432
if (slab == NULL) {
435433
throw MemoryProviderError{UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY};
436434
}
437435

438436
slab_reg(slab);
439-
auto It = AvailableSlabs.insert(AvailableSlabs.begin(), slab);
440-
slab_set_iterator(slab, &It);
441-
437+
DL_PREPEND(AvailableSlabs, slab->iter);
442438
updateStats(1, 0);
443439
FromPool = false;
444440
} else {
445-
if (slab_get_num_allocated(*(AvailableSlabs.begin())) == 0) {
441+
if (slab_get_num_allocated(AvailableSlabs->val) == 0) {
446442
// If this was an empty slab, it was in the pool.
447443
// Now it is no longer in the pool, so update count.
448444
--chunkedSlabsInPool;
@@ -453,20 +449,19 @@ auto Bucket::getAvailSlab(bool &FromPool) -> decltype(AvailableSlabs.begin()) {
453449
}
454450
}
455451

456-
return AvailableSlabs.begin();
452+
return AvailableSlabs;
457453
}
458454

459455
void *Bucket::getChunk(bool &FromPool) {
460456
std::lock_guard<std::mutex> Lg(BucketLock);
461457

462-
auto SlabIt = getAvailSlab(FromPool);
463-
auto *FreeChunk = slab_get_chunk((*SlabIt));
458+
slab_list_item_t *slab_it = getAvailSlab(FromPool);
459+
auto *FreeChunk = slab_get_chunk(slab_it->val);
464460

465461
// If the slab is full, move it to unavailable slabs and update its iterator
466-
if (!(slab_has_avail(*SlabIt))) {
467-
auto It = UnavailableSlabs.insert(UnavailableSlabs.begin(), *SlabIt);
468-
AvailableSlabs.erase(SlabIt);
469-
slab_set_iterator(*It, &It);
462+
if (!(slab_has_avail(slab_it->val))) {
463+
DL_DELETE(AvailableSlabs, slab_it);
464+
DL_PREPEND(UnavailableSlabs, slab_it);
470465
}
471466

472467
return FreeChunk;
@@ -487,14 +482,10 @@ void Bucket::onFreeChunk(slab_t *slab, bool &ToPool) {
487482
// In case if the slab was previously full and now has 1 available
488483
// chunk, it should be moved to the list of available slabs
489484
if (slab_get_num_allocated(slab) == (slab_get_num_chunks(slab) - 1)) {
490-
auto SlabIter =
491-
*(std::list<slab_t *>::iterator *)slab_get_iterator(slab);
492-
assert(SlabIter != UnavailableSlabs.end());
493-
494-
slab_t *slab = *SlabIter;
495-
auto It = AvailableSlabs.insert(AvailableSlabs.begin(), slab);
496-
UnavailableSlabs.erase(SlabIter);
497-
slab_set_iterator(slab, &It);
485+
slab_list_item_t *slab_it = slab->iter;
486+
assert(slab_it->val != NULL);
487+
DL_DELETE(UnavailableSlabs, slab_it);
488+
DL_PREPEND(AvailableSlabs, slab_it);
498489
}
499490

500491
// Check if slab is empty, and pool it if we can.
@@ -506,11 +497,11 @@ void Bucket::onFreeChunk(slab_t *slab, bool &ToPool) {
506497
if (!CanPool(ToPool)) {
507498
// Note: since the slab is stored as unique_ptr, just remove it from
508499
// the list to destroy the object.
509-
auto It = *(std::list<slab_t *>::iterator *)slab_get_iterator(slab);
510-
assert(It != AvailableSlabs.end());
511-
slab_unreg(*It);
512-
destroy_slab(*It);
513-
AvailableSlabs.erase(It);
500+
slab_list_item_t *slab_it = slab->iter;
501+
assert(slab_it->val != NULL);
502+
slab_unreg(slab_it->val);
503+
DL_DELETE(AvailableSlabs, slab_it);
504+
destroy_slab(slab_it->val);
514505
}
515506
}
516507
}
@@ -522,7 +513,11 @@ bool Bucket::CanPool(bool &ToPool) {
522513
if (chunkedBucket) {
523514
NewFreeSlabsInBucket = chunkedSlabsInPool + 1;
524515
} else {
525-
NewFreeSlabsInBucket = AvailableSlabs.size() + 1;
516+
// TODO optimize
517+
size_t avail_num = 0;
518+
slab_list_item_t *it = NULL;
519+
DL_FOREACH(AvailableSlabs, it) { avail_num++; }
520+
NewFreeSlabsInBucket = avail_num + 1;
526521
}
527522
if (Capacity() >= NewFreeSlabsInBucket) {
528523
size_t PoolSize = OwnAllocCtx.getLimits()->TotalSize;

src/pool/pool_disjoint_temp.h

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ void annotate_memory_inaccessible(void *ptr, size_t size);
77
void annotate_memory_undefined(void *ptr, size_t size);
88

99
typedef void *bucket_t;
10+
typedef struct slab_list_item_t slab_list_item_t;
1011

1112
// Represents the allocated memory block of size 'slab_min_size'
1213
// Internally, it splits the memory block into chunks. The number of
@@ -34,11 +35,15 @@ typedef struct slab_t {
3435

3536
// Store iterator to the corresponding node in avail/unavail list
3637
// to achieve O(1) removal
37-
void *slab_list_iter;
38-
size_t slab_list_iter_size;
38+
slab_list_item_t *iter;
3939
} slab_t;
4040

41-
slab_t *create_slab(bucket_t bucket, size_t iter_size);
41+
typedef struct slab_list_item_t {
42+
slab_t *val;
43+
struct slab_list_item_t *prev, *next;
44+
} slab_list_item_t;
45+
46+
slab_t *create_slab(bucket_t bucket);
4247
void destroy_slab(slab_t *slab);
4348

4449
void *slab_get(const slab_t *slab);
@@ -49,9 +54,6 @@ size_t slab_get_num_chunks(const slab_t *slab);
4954
size_t slab_get_chunk_size(const slab_t *slab);
5055
size_t slab_get_num_allocated(const slab_t *slab);
5156

52-
void *slab_get_iterator(const slab_t *slab);
53-
void slab_set_iterator(slab_t *slab, void *it);
54-
5557
bool slab_has_avail(const slab_t *slab);
5658
void slab_free_chunk(slab_t *slab, void *ptr);
5759

0 commit comments

Comments
 (0)