@@ -44,8 +44,6 @@ extern "C" {
4444
4545#include " pool_disjoint_temp.h"
4646
47- struct slab_t ;
48-
4947#ifdef __cplusplus
5048}
5149#endif
@@ -111,7 +109,9 @@ class DisjointPool::AllocImpl {
111109 // It's important for the map to be destroyed last after buckets and their
112110 // slabs This is because slab's destructor removes the object from the map.
113111 std::unordered_multimap<void *, slab_t *> KnownSlabs;
114- std::shared_timed_mutex KnownSlabsMapLock;
112+
113+ // prev std::shared_timed_mutex - ok?
114+ utils_mutex_t known_slabs_map_lock;
115115
116116 // Handle to the memory provider
117117 umf_memory_provider_handle_t MemHandle;
@@ -125,7 +125,7 @@ class DisjointPool::AllocImpl {
125125 umf_disjoint_pool_shared_limits_t *DefaultSharedLimits;
126126
127127 // Used in algorithm for finding buckets
128- std:: size_t MinBucketSizeExp;
128+ size_t MinBucketSizeExp;
129129
130130 // Coarse-grain allocation min alignment
131131 size_t ProviderMinPageSize;
@@ -137,6 +137,8 @@ class DisjointPool::AllocImpl {
137137
138138 VALGRIND_DO_CREATE_MEMPOOL (this , 0 , 0 );
139139
140+ utils_mutex_init (&known_slabs_map_lock);
141+
140142 // Generate buckets sized such as: 64, 96, 128, 192, ..., CutOff.
141143 // Powers of 2 and the value halfway between the powers of 2.
142144 auto Size1 = this ->params .MinBucketSize ;
@@ -147,6 +149,7 @@ class DisjointPool::AllocImpl {
147149 // Calculate the exponent for MinBucketSize used for finding buckets.
148150 MinBucketSizeExp = (size_t )log2Utils (Size1);
149151 DefaultSharedLimits = shared_limits_create (SIZE_MAX);
152+
150153 auto Size2 = Size1 + Size1 / 2 ;
151154 for (; Size2 < CutOff; Size1 *= 2 , Size2 *= 2 ) {
152155 // TODO copy allocimpl
@@ -171,6 +174,8 @@ class DisjointPool::AllocImpl {
171174 }
172175
173176 VALGRIND_DO_DESTROY_MEMPOOL (this );
177+
178+ utils_mutex_destroy_not_free (&known_slabs_map_lock);
174179 }
175180
176181 void *allocate (size_t Size, size_t Alignment, bool &FromPool);
@@ -179,9 +184,7 @@ class DisjointPool::AllocImpl {
179184
180185 umf_memory_provider_handle_t getMemHandle () { return MemHandle; }
181186
182- std::shared_timed_mutex &getKnownSlabsMapLock () {
183- return KnownSlabsMapLock;
184- }
187+ utils_mutex_t *getKnownSlabsMapLock () { return &known_slabs_map_lock; }
185188
186189 std::unordered_multimap<void *, slab_t *> &getKnownSlabs () {
187190 return KnownSlabs;
@@ -204,7 +207,7 @@ class DisjointPool::AllocImpl {
204207
205208 private:
206209 bucket_t *findBucket (size_t Size);
207- std:: size_t sizeToIdx (size_t Size);
210+ size_t sizeToIdx (size_t Size);
208211};
209212
210213static void *memoryProviderAlloc (umf_memory_provider_handle_t hProvider,
@@ -398,9 +401,12 @@ bucket_t *DisjointPool::AllocImpl::findBucket(size_t Size) {
398401 auto calculatedIdx = sizeToIdx (Size);
399402 bucket_t *bucket = Buckets[calculatedIdx];
400403 assert (bucket_get_size (bucket) >= Size);
404+ (void )bucket;
405+
401406 if (calculatedIdx > 0 ) {
402407 bucket_t *bucket_prev = Buckets[calculatedIdx - 1 ];
403408 assert (bucket_get_size (bucket_prev) < Size);
409+ (void )bucket_prev;
404410 }
405411
406412 return Buckets[calculatedIdx];
@@ -414,12 +420,12 @@ umf_result_t DisjointPool::AllocImpl::deallocate(void *Ptr, bool &ToPool) {
414420 auto *SlabPtr = (void *)ALIGN_DOWN ((size_t )Ptr, SlabMinSize ());
415421
416422 // Lock the map on read
417- std::shared_lock<std::shared_timed_mutex> Lk (getKnownSlabsMapLock ());
423+ utils_mutex_lock (getKnownSlabsMapLock ());
418424
419425 ToPool = false ;
420426 auto Slabs = getKnownSlabs ().equal_range (SlabPtr);
421427 if (Slabs.first == Slabs.second ) {
422- Lk. unlock ( );
428+ utils_mutex_unlock ( getKnownSlabsMapLock () );
423429 umf_result_t ret = memoryProviderFree (getMemHandle (), Ptr);
424430 return ret;
425431 }
@@ -431,7 +437,7 @@ umf_result_t DisjointPool::AllocImpl::deallocate(void *Ptr, bool &ToPool) {
431437 if (Ptr >= slab_get (Slab) && Ptr < slab_get_end (Slab)) {
432438 // Unlock the map before freeing the chunk, it may be locked on write
433439 // there
434- Lk. unlock ( );
440+ utils_mutex_unlock ( getKnownSlabsMapLock () );
435441 bucket_t *bucket = slab_get_bucket (Slab);
436442
437443 if (getParams ().PoolTrace > 1 ) {
@@ -450,7 +456,7 @@ umf_result_t DisjointPool::AllocImpl::deallocate(void *Ptr, bool &ToPool) {
450456 }
451457 }
452458
453- Lk. unlock ( );
459+ utils_mutex_unlock ( getKnownSlabsMapLock () );
454460 // There is a rare case when we have a pointer from system allocation next
455461 // to some slab with an entry in the map. So we find a slab
456462 // but the range checks fail.
@@ -608,11 +614,6 @@ umf_disjoint_pool_params_t *bucket_get_params(bucket_t *bucket) {
608614 return &t->getParams ();
609615}
610616
611- umf_disjoint_pool_shared_limits_t *bucket_get_limits (bucket_t *bucket) {
612- auto t = (DisjointPool::AllocImpl *)bucket->OwnAllocCtx ;
613- return t->getLimits ();
614- }
615-
616617umf_memory_provider_handle_t bucket_get_mem_handle (bucket_t *bucket) {
617618 auto t = (DisjointPool::AllocImpl *)bucket->OwnAllocCtx ;
618619 return t->getMemHandle ();
@@ -624,26 +625,27 @@ bucket_get_known_slabs(bucket_t *bucket) {
624625 return &t->getKnownSlabs ();
625626}
626627
627- std::shared_timed_mutex *bucket_get_known_slabs_map_lock (bucket_t *bucket) {
628+ utils_mutex_t *bucket_get_known_slabs_map_lock (bucket_t *bucket) {
628629 auto t = (DisjointPool::AllocImpl *)bucket->OwnAllocCtx ;
629- return & t->getKnownSlabsMapLock ();
630+ return t->getKnownSlabsMapLock ();
630631}
631632
632633void slab_reg_by_addr (void *addr, slab_t *slab) {
633634 bucket_t *bucket = slab_get_bucket (slab);
634635 auto Lock = bucket_get_known_slabs_map_lock (bucket);
635636 auto Map = bucket_get_known_slabs (bucket);
636637
637- std::lock_guard<std::shared_timed_mutex> Lg (* Lock);
638+ utils_mutex_lock ( Lock);
638639 Map->insert ({addr, slab});
640+ utils_mutex_unlock (Lock);
639641}
640642
641643void slab_unreg_by_addr (void *addr, slab_t *slab) {
642644 bucket_t *bucket = slab_get_bucket (slab);
643645 auto Lock = bucket_get_known_slabs_map_lock (bucket);
644646 auto Map = bucket_get_known_slabs (bucket);
645647
646- std::lock_guard<std::shared_timed_mutex> Lg (* Lock);
648+ utils_mutex_lock ( Lock);
647649
648650 auto Slabs = Map->equal_range (addr);
649651 // At least the must get the current slab from the map.
@@ -652,11 +654,13 @@ void slab_unreg_by_addr(void *addr, slab_t *slab) {
652654 for (auto It = Slabs.first ; It != Slabs.second ; ++It) {
653655 if (It->second == slab) {
654656 Map->erase (It);
657+ utils_mutex_unlock (Lock);
655658 return ;
656659 }
657660 }
658661
659662 assert (false && " Slab is not found" );
663+ utils_mutex_unlock (Lock);
660664}
661665
662666#ifdef __cplusplus
0 commit comments