diff --git a/src/pool/pool_disjoint.c b/src/pool/pool_disjoint.c index 7aeee71658..7b03ea79eb 100644 --- a/src/pool/pool_disjoint.c +++ b/src/pool/pool_disjoint.c @@ -5,7 +5,23 @@ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception */ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "base_alloc_global.h" #include "pool_disjoint_internal.h" +#include "provider/provider_tracking.h" +#include "uthash/utlist.h" +#include "utils_common.h" +#include "utils_log.h" +#include "utils_math.h" // Temporary solution for disabling memory poisoning. This is needed because // AddressSanitizer does not support memory poisoning for GPU allocations. @@ -94,9 +110,6 @@ static slab_t *create_slab(bucket_t *bucket) { goto free_slab_chunks; } - // TODO - // ASSERT_IS_ALIGNED((uintptr_t)slab->mem_ptr, bucket->size); - // raw allocation is not available for user so mark it as inaccessible utils_annotate_memory_inaccessible(slab->mem_ptr, slab->slab_size); @@ -175,10 +188,10 @@ static void slab_free_chunk(slab_t *slab, void *ptr) { // Make sure that we're in the right slab assert(ptr >= slab_get(slab) && ptr < slab_get_end(slab)); - // Even if the pointer p was previously aligned, it's still inside the - // corresponding chunk, so we get the correct index here. - size_t chunk_idx = - ((uintptr_t)ptr - (uintptr_t)slab->mem_ptr) / slab->bucket->size; + // Get the chunk index + uintptr_t ptr_diff = (uintptr_t)ptr - (uintptr_t)slab->mem_ptr; + assert((ptr_diff % slab->bucket->size) == 0); + size_t chunk_idx = ptr_diff / slab->bucket->size; // Make sure that the chunk was allocated assert(slab->chunks[chunk_idx] && "double free detected"); @@ -738,6 +751,10 @@ void *disjoint_pool_aligned_malloc(void *pool, size_t size, size_t alignment) { } } + void *aligned_ptr = (void *)ALIGN_UP_SAFE((size_t)ptr, alignment); + VALGRIND_DO_MEMPOOL_ALLOC(disjoint_pool, aligned_ptr, size); + utils_annotate_memory_undefined(aligned_ptr, size); + utils_mutex_unlock(&bucket->bucket_lock); if (disjoint_pool->params.pool_trace > 2) { @@ -746,9 +763,6 @@ void *disjoint_pool_aligned_malloc(void *pool, size_t size, size_t alignment) { (from_pool ? "pool" : "provider"), ptr); } - void *aligned_ptr = (void *)ALIGN_UP_SAFE((size_t)ptr, alignment); - VALGRIND_DO_MEMPOOL_ALLOC(disjoint_pool, aligned_ptr, size); - utils_annotate_memory_undefined(aligned_ptr, size); return aligned_ptr; } @@ -804,11 +818,18 @@ umf_result_t disjoint_pool_free(void *pool, void *ptr) { bucket_t *bucket = slab->bucket; - VALGRIND_DO_MEMPOOL_FREE(pool, ptr); utils_mutex_lock(&bucket->bucket_lock); + VALGRIND_DO_MEMPOOL_FREE(pool, ptr); + + // Get the unaligned pointer + // NOTE: the base pointer slab->mem_ptr needn't to be aligned to bucket size + size_t chunk_idx = + (((uintptr_t)ptr - (uintptr_t)slab->mem_ptr) / slab->bucket->size); + void *unaligned_ptr = + (void *)((uintptr_t)slab->mem_ptr + chunk_idx * slab->bucket->size); - utils_annotate_memory_inaccessible(ptr, bucket->size); - bucket_free_chunk(bucket, ptr, slab, &to_pool); + utils_annotate_memory_inaccessible(unaligned_ptr, bucket->size); + bucket_free_chunk(bucket, unaligned_ptr, slab, &to_pool); if (disjoint_pool->params.pool_trace > 1) { bucket->free_count++; diff --git a/src/pool/pool_disjoint_internal.h b/src/pool/pool_disjoint_internal.h index 3d656689cf..86460509b4 100644 --- a/src/pool/pool_disjoint_internal.h +++ b/src/pool/pool_disjoint_internal.h @@ -8,27 +8,12 @@ #ifndef UMF_POOL_DISJOINT_INTERNAL_H #define UMF_POOL_DISJOINT_INTERNAL_H 1 -#include -#include -#include #include -#include -#include -#include -#include -#include #include #include "critnib/critnib.h" -#include "uthash/utlist.h" - -#include "base_alloc_global.h" -#include "provider/provider_tracking.h" -#include "utils_common.h" #include "utils_concurrency.h" -#include "utils_log.h" -#include "utils_math.h" typedef struct bucket_t bucket_t; typedef struct slab_t slab_t;