Skip to content

Commit 3331b22

Browse files
committed
AllocImpl
1 parent a95fe58 commit 3331b22

File tree

4 files changed

+242
-216
lines changed

4 files changed

+242
-216
lines changed

src/pool/pool_disjoint.c

Lines changed: 116 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
#include "utils_common.h"
2323
#include "utils_concurrency.h"
2424
#include "utils_log.h"
25+
#include "utils_math.h"
2526
#include "utils_sanitizers.h"
2627

2728
#include "pool_disjoint_temp.h"
@@ -30,6 +31,8 @@
3031
extern "C" {
3132
#endif
3233

34+
static size_t CutOff = (size_t)1 << 31; // 2GB
35+
3336
// Temporary solution for disabling memory poisoning. This is needed because
3437
// AddressSanitizer does not support memory poisoning for GPU allocations.
3538
// More info: https://github.com/oneapi-src/unified-memory-framework/issues/634
@@ -526,8 +529,12 @@ bool bucket_can_pool(bucket_t *bucket, bool *to_pool) {
526529
break;
527530
}
528531

529-
if (utils_compare_exchange(&bucket->shared_limits->total_size,
530-
&pool_size, &new_pool_size)) {
532+
// TODO!!!
533+
//if (utils_compare_exchange(&bucket->shared_limits->total_size,
534+
// pool_size, new_pool_size)) {
535+
if (bucket->shared_limits->total_size != new_pool_size) {
536+
bucket->shared_limits->total_size = new_pool_size;
537+
531538
if (chunkedBucket) {
532539
++bucket->chunkedSlabsInPool;
533540
}
@@ -544,6 +551,26 @@ bool bucket_can_pool(bucket_t *bucket, bool *to_pool) {
544551
return false;
545552
}
546553

554+
umf_disjoint_pool_params_t *bucket_get_params(bucket_t *bucket) {
555+
AllocImpl *t = (AllocImpl *)bucket->OwnAllocCtx;
556+
return AllocImpl_getParams(t);
557+
}
558+
559+
umf_memory_provider_handle_t bucket_get_mem_handle(bucket_t *bucket) {
560+
AllocImpl *t = (AllocImpl *)bucket->OwnAllocCtx;
561+
return AllocImpl_getMemHandle(t);
562+
}
563+
564+
critnib *bucket_get_known_slabs(bucket_t *bucket) {
565+
AllocImpl *t = (AllocImpl *)bucket->OwnAllocCtx;
566+
return AllocImpl_getKnownSlabs(t);
567+
}
568+
569+
utils_mutex_t *bucket_get_known_slabs_map_lock(bucket_t *bucket) {
570+
AllocImpl *t = (AllocImpl *)bucket->OwnAllocCtx;
571+
return AllocImpl_getKnownSlabsMapLock(t);
572+
}
573+
547574
void slab_reg_by_addr(void *addr, slab_t *slab) {
548575
bucket_t *bucket = slab_get_bucket(slab);
549576
utils_mutex_t *lock = bucket_get_known_slabs_map_lock(bucket);
@@ -584,6 +611,93 @@ void slab_unreg_by_addr(void *addr, slab_t *slab) {
584611
utils_mutex_unlock(lock);
585612
}
586613

614+
AllocImpl *create_AllocImpl(umf_memory_provider_handle_t hProvider,
615+
umf_disjoint_pool_params_t *params) {
616+
617+
AllocImpl *ai = (AllocImpl *)umf_ba_global_alloc(sizeof(AllocImpl));
618+
619+
VALGRIND_DO_CREATE_MEMPOOL(ai, 0, 0);
620+
ai->MemHandle = hProvider;
621+
ai->params = *params;
622+
623+
utils_mutex_init(&ai->known_slabs_map_lock);
624+
ai->known_slabs = critnib_new();
625+
626+
// Generate buckets sized such as: 64, 96, 128, 192, ..., CutOff.
627+
// Powers of 2 and the value halfway between the powers of 2.
628+
size_t Size1 = ai->params.MinBucketSize;
629+
630+
// MinBucketSize cannot be larger than CutOff.
631+
Size1 = utils_min(Size1, CutOff);
632+
633+
// Buckets sized smaller than the bucket default size- 8 aren't needed.
634+
Size1 = utils_max(Size1, UMF_DISJOINT_POOL_MIN_BUCKET_DEFAULT_SIZE);
635+
636+
// Calculate the exponent for MinBucketSize used for finding buckets.
637+
ai->MinBucketSizeExp = (size_t)log2Utils(Size1);
638+
ai->DefaultSharedLimits = shared_limits_create(SIZE_MAX);
639+
640+
// count number of buckets, start from 1
641+
ai->buckets_num = 1;
642+
size_t Size2 = Size1 + Size1 / 2;
643+
size_t ts2 = Size2, ts1 = Size1;
644+
for (; Size2 < CutOff; Size1 *= 2, Size2 *= 2) {
645+
ai->buckets_num += 2;
646+
}
647+
ai->buckets =
648+
(bucket_t **)umf_ba_global_alloc(sizeof(bucket_t *) * ai->buckets_num);
649+
650+
int i = 0;
651+
Size1 = ts1;
652+
Size2 = ts2;
653+
for (; Size2 < CutOff; Size1 *= 2, Size2 *= 2, i += 2) {
654+
ai->buckets[i] = create_bucket(Size1, ai, AllocImpl_getLimits(ai));
655+
ai->buckets[i + 1] = create_bucket(Size2, ai, AllocImpl_getLimits(ai));
656+
}
657+
ai->buckets[i] = create_bucket(CutOff, ai, AllocImpl_getLimits(ai));
658+
659+
umf_result_t ret = umfMemoryProviderGetMinPageSize(
660+
hProvider, NULL, &ai->ProviderMinPageSize);
661+
if (ret != UMF_RESULT_SUCCESS) {
662+
ai->ProviderMinPageSize = 0;
663+
}
664+
665+
return ai;
666+
}
667+
668+
void destroy_AllocImpl(AllocImpl *ai) {
669+
// TODO
670+
// destroy DefaultSharedLimits
671+
672+
for (size_t i = 0; i < ai->buckets_num; i++) {
673+
destroy_bucket(ai->buckets[i]);
674+
}
675+
676+
VALGRIND_DO_DESTROY_MEMPOOL(ai);
677+
678+
critnib_delete(ai->known_slabs);
679+
680+
utils_mutex_destroy_not_free(&ai->known_slabs_map_lock);
681+
682+
umf_ba_global_free(ai);
683+
}
684+
685+
umf_memory_provider_handle_t AllocImpl_getMemHandle(AllocImpl *ai) {
686+
return ai->MemHandle;
687+
}
688+
689+
utils_mutex_t *AllocImpl_getKnownSlabsMapLock(AllocImpl *ai) {
690+
return &ai->known_slabs_map_lock;
691+
}
692+
693+
critnib *AllocImpl_getKnownSlabs(AllocImpl *ai) { return ai->known_slabs; }
694+
695+
size_t AllocImpl_SlabMinSize(AllocImpl *ai) { return ai->params.SlabMinSize; };
696+
697+
umf_disjoint_pool_params_t *AllocImpl_getParams(AllocImpl *ai) {
698+
return &ai->params;
699+
}
700+
587701
#ifdef __cplusplus
588702
}
589703
#endif

0 commit comments

Comments
 (0)