2424
2525#include " provider/provider_tracking.h"
2626
27+ #include " uthash/utlist.h"
28+
2729#include " ../cpp_helpers.hpp"
2830#include " pool_disjoint.h"
2931#include " umf.h"
@@ -112,10 +114,10 @@ class Bucket {
112114 const size_t Size;
113115
114116 // List of slabs which have at least 1 available chunk.
115- std::list< slab_t *> AvailableSlabs;
117+ slab_list_item_t * AvailableSlabs;
116118
117119 // List of slabs with 0 available chunk.
118- std::list< slab_t *> UnavailableSlabs;
120+ slab_list_item_t * UnavailableSlabs;
119121
120122 // Protects the bucket and all the corresponding slabs
121123 std::mutex BucketLock;
@@ -156,21 +158,25 @@ class Bucket {
156158 size_t maxSlabsInUse;
157159
158160 Bucket (size_t Sz, DisjointPool::AllocImpl &AllocCtx)
159- : Size{Sz}, OwnAllocCtx{AllocCtx}, chunkedSlabsInPool(0 ),
160- allocPoolCount (0 ), freeCount(0 ), currSlabsInUse(0 ),
161- currSlabsInPool (0 ), maxSlabsInPool(0 ), allocCount(0 ),
162- maxSlabsInUse (0 ) {}
161+ : Size{Sz}, OwnAllocCtx{AllocCtx} {
162+ AvailableSlabs = NULL ;
163+ UnavailableSlabs = NULL ;
164+ chunkedSlabsInPool = 0 ;
165+ allocPoolCount = 0 ;
166+ freeCount = 0 ;
167+ currSlabsInUse = 0 ;
168+ currSlabsInPool = 0 ;
169+ maxSlabsInPool = 0 ;
170+ allocCount = 0 ;
171+ maxSlabsInUse = 0 ;
172+ }
163173
164174 ~Bucket () {
165- for (auto it = AvailableSlabs.begin (); it != AvailableSlabs.end ();
166- it++) {
167- destroy_slab (*it);
168- }
169-
170- for (auto it = UnavailableSlabs.begin (); it != UnavailableSlabs.end ();
171- it++) {
172- destroy_slab (*it);
173- }
175+ slab_list_item_t *it = NULL , *tmp = NULL ;
176+ // TODO check eng
177+ // use extra tmp to store next iterator before the slab is destroyed
178+ LL_FOREACH_SAFE (AvailableSlabs, it, tmp) { destroy_slab (it->val ); }
179+ LL_FOREACH_SAFE (UnavailableSlabs, it, tmp) { destroy_slab (it->val ); }
174180 }
175181
176182 // Get pointer to allocation that is one piece of an available slab in this
@@ -231,10 +237,10 @@ class Bucket {
231237 void decrementPool (bool &FromPool);
232238
233239 // Get a slab to be used for chunked allocations.
234- decltype (AvailableSlabs.begin()) getAvailSlab(bool &FromPool);
240+ slab_list_item_t * getAvailSlab (bool &FromPool);
235241
236242 // Get a slab that will be used as a whole for a single allocation.
237- decltype (AvailableSlabs.begin()) getAvailFullSlab(bool &FromPool);
243+ slab_list_item_t * getAvailFullSlab (bool &FromPool);
238244};
239245
240246class DisjointPool ::AllocImpl {
@@ -374,75 +380,65 @@ void Bucket::decrementPool(bool &FromPool) {
374380 OwnAllocCtx.getLimits ()->TotalSize -= SlabAllocSize ();
375381}
376382
377- std::list< slab_t *>::iterator Bucket::getAvailFullSlab (bool &FromPool) {
383+ slab_list_item_t * Bucket::getAvailFullSlab (bool &FromPool) {
378384 // Return a slab that will be used for a single allocation.
379- if (AvailableSlabs.size () == 0 ) {
380- slab_t *slab = create_slab ((bucket_t *)this ,
381- sizeof (std::list<slab_t *>::iterator));
385+ if (AvailableSlabs == NULL ) {
386+ slab_t *slab = create_slab ((bucket_t *)this );
382387 if (slab == NULL ) {
383388 throw MemoryProviderError{UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY};
384389 }
385390
386391 slab_reg (slab);
387- auto It = AvailableSlabs.insert (AvailableSlabs.begin (), slab);
388- slab_set_iterator (slab, &It);
389-
392+ DL_PREPEND (AvailableSlabs, slab->iter );
390393 FromPool = false ;
391394 updateStats (1 , 0 );
392395 } else {
393396 decrementPool (FromPool);
394397 }
395398
396- return AvailableSlabs. begin () ;
399+ return AvailableSlabs;
397400}
398401
399402void *Bucket::getSlab (bool &FromPool) {
400403 std::lock_guard<std::mutex> Lg (BucketLock);
401404
402- auto SlabIt = getAvailFullSlab (FromPool);
403- slab_t *slab = *SlabIt;
404-
405+ slab_list_item_t *slab_it = getAvailFullSlab (FromPool);
406+ slab_t *slab = slab_it->val ;
405407 void *ptr = slab_get (slab);
406- auto It = UnavailableSlabs. insert (UnavailableSlabs. begin (), slab);
407- AvailableSlabs. erase (SlabIt );
408- slab_set_iterator (slab, &It );
408+
409+ DL_DELETE (AvailableSlabs, slab_it );
410+ DL_PREPEND (UnavailableSlabs, slab_it );
409411 return ptr;
410412}
411413
412414void Bucket::freeSlab (slab_t *slab, bool &ToPool) {
413415 std::lock_guard<std::mutex> Lg (BucketLock);
414416
415- auto SlabIter = *(std::list< slab_t *>::iterator *) slab_get_iterator ( slab) ;
416- assert (SlabIter != UnavailableSlabs. end () );
417+ slab_list_item_t *slab_it = slab-> iter ;
418+ assert (slab_it-> val != NULL );
417419 if (CanPool (ToPool)) {
418- auto It =
419- AvailableSlabs.insert (AvailableSlabs.begin (), std::move (*SlabIter));
420- UnavailableSlabs.erase (SlabIter);
421- slab_set_iterator (*It, &It);
420+ DL_DELETE (UnavailableSlabs, slab_it);
421+ DL_PREPEND (AvailableSlabs, slab_it);
422422 } else {
423- slab_unreg (*SlabIter );
424- destroy_slab (*SlabIter );
425- UnavailableSlabs. erase (SlabIter );
423+ slab_unreg (slab_it-> val );
424+ DL_DELETE (UnavailableSlabs, slab_it );
425+ destroy_slab (slab_it-> val );
426426 }
427427}
428428
429- auto Bucket::getAvailSlab (bool &FromPool) -> decltype(AvailableSlabs.begin()) {
430-
431- if (AvailableSlabs.size () == 0 ) {
432- slab_t *slab = create_slab ((bucket_t *)this ,
433- sizeof (std::list<slab_t *>::iterator));
429+ slab_list_item_t *Bucket::getAvailSlab (bool &FromPool) {
430+ if (AvailableSlabs == NULL ) {
431+ slab_t *slab = create_slab ((bucket_t *)this );
434432 if (slab == NULL ) {
435433 throw MemoryProviderError{UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY};
436434 }
437435
438436 slab_reg (slab);
439- auto It = AvailableSlabs.insert (AvailableSlabs.begin (), slab);
440- slab_set_iterator (slab, &It);
441-
437+ DL_PREPEND (AvailableSlabs, slab->iter );
442438 updateStats (1 , 0 );
443439 FromPool = false ;
444440 } else {
445- if (slab_get_num_allocated (*( AvailableSlabs. begin ()) ) == 0 ) {
441+ if (slab_get_num_allocated (AvailableSlabs-> val ) == 0 ) {
446442 // If this was an empty slab, it was in the pool.
447443 // Now it is no longer in the pool, so update count.
448444 --chunkedSlabsInPool;
@@ -453,20 +449,19 @@ auto Bucket::getAvailSlab(bool &FromPool) -> decltype(AvailableSlabs.begin()) {
453449 }
454450 }
455451
456- return AvailableSlabs. begin () ;
452+ return AvailableSlabs;
457453}
458454
459455void *Bucket::getChunk (bool &FromPool) {
460456 std::lock_guard<std::mutex> Lg (BucketLock);
461457
462- auto SlabIt = getAvailSlab (FromPool);
463- auto *FreeChunk = slab_get_chunk ((*SlabIt) );
458+ slab_list_item_t *slab_it = getAvailSlab (FromPool);
459+ auto *FreeChunk = slab_get_chunk (slab_it-> val );
464460
465461 // If the slab is full, move it to unavailable slabs and update its iterator
466- if (!(slab_has_avail (*SlabIt))) {
467- auto It = UnavailableSlabs.insert (UnavailableSlabs.begin (), *SlabIt);
468- AvailableSlabs.erase (SlabIt);
469- slab_set_iterator (*It, &It);
462+ if (!(slab_has_avail (slab_it->val ))) {
463+ DL_DELETE (AvailableSlabs, slab_it);
464+ DL_PREPEND (UnavailableSlabs, slab_it);
470465 }
471466
472467 return FreeChunk;
@@ -487,14 +482,10 @@ void Bucket::onFreeChunk(slab_t *slab, bool &ToPool) {
487482 // In case if the slab was previously full and now has 1 available
488483 // chunk, it should be moved to the list of available slabs
489484 if (slab_get_num_allocated (slab) == (slab_get_num_chunks (slab) - 1 )) {
490- auto SlabIter =
491- *(std::list<slab_t *>::iterator *)slab_get_iterator (slab);
492- assert (SlabIter != UnavailableSlabs.end ());
493-
494- slab_t *slab = *SlabIter;
495- auto It = AvailableSlabs.insert (AvailableSlabs.begin (), slab);
496- UnavailableSlabs.erase (SlabIter);
497- slab_set_iterator (slab, &It);
485+ slab_list_item_t *slab_it = slab->iter ;
486+ assert (slab_it->val != NULL );
487+ DL_DELETE (UnavailableSlabs, slab_it);
488+ DL_PREPEND (AvailableSlabs, slab_it);
498489 }
499490
500491 // Check if slab is empty, and pool it if we can.
@@ -506,11 +497,11 @@ void Bucket::onFreeChunk(slab_t *slab, bool &ToPool) {
506497 if (!CanPool (ToPool)) {
507498 // Note: since the slab is stored as unique_ptr, just remove it from
508499 // the list to destroy the object.
509- auto It = *(std::list< slab_t *>::iterator *) slab_get_iterator ( slab) ;
510- assert (It != AvailableSlabs. end () );
511- slab_unreg (*It );
512- destroy_slab (*It );
513- AvailableSlabs. erase (It );
500+ slab_list_item_t *slab_it = slab-> iter ;
501+ assert (slab_it-> val != NULL );
502+ slab_unreg (slab_it-> val );
503+ DL_DELETE (AvailableSlabs, slab_it );
504+ destroy_slab (slab_it-> val );
514505 }
515506 }
516507}
@@ -522,7 +513,11 @@ bool Bucket::CanPool(bool &ToPool) {
522513 if (chunkedBucket) {
523514 NewFreeSlabsInBucket = chunkedSlabsInPool + 1 ;
524515 } else {
525- NewFreeSlabsInBucket = AvailableSlabs.size () + 1 ;
516+ // TODO optimize
517+ size_t avail_num = 0 ;
518+ slab_list_item_t *it = NULL ;
519+ DL_FOREACH (AvailableSlabs, it) { avail_num++; }
520+ NewFreeSlabsInBucket = avail_num + 1 ;
526521 }
527522 if (Capacity () >= NewFreeSlabsInBucket) {
528523 size_t PoolSize = OwnAllocCtx.getLimits ()->TotalSize ;
0 commit comments