1010// Temporary solution for disabling memory poisoning. This is needed because
1111// AddressSanitizer does not support memory poisoning for GPU allocations.
1212// More info: https://github.com/oneapi-src/unified-memory-framework/issues/634
13+
14+ // TODO - add a param to disjoint pool to disable memory poisoning
1315#ifndef POISON_MEMORY
1416#undef __SANITIZE_ADDRESS__
1517#endif
@@ -94,9 +96,6 @@ static slab_t *create_slab(bucket_t *bucket) {
9496 goto free_slab_chunks ;
9597 }
9698
97- // TODO
98- // ASSERT_IS_ALIGNED((uintptr_t)slab->mem_ptr, bucket->size);
99-
10099 // raw allocation is not available for user so mark it as inaccessible
101100 utils_annotate_memory_inaccessible (slab -> mem_ptr , slab -> slab_size );
102101
@@ -175,10 +174,10 @@ static void slab_free_chunk(slab_t *slab, void *ptr) {
175174 // Make sure that we're in the right slab
176175 assert (ptr >= slab_get (slab ) && ptr < slab_get_end (slab ));
177176
178- // Even if the pointer p was previously aligned, it's still inside the
179- // corresponding chunk, so we get the correct index here.
180- size_t chunk_idx =
181- (( uintptr_t ) ptr - ( uintptr_t ) slab -> mem_ptr ) / slab -> bucket -> size ;
177+ // Get the chunk index
178+ uintptr_t ptr_diff = ( uintptr_t ) ptr - ( uintptr_t ) slab -> mem_ptr ;
179+ assert (( ptr_diff % slab -> bucket -> size ) == 0 );
180+ size_t chunk_idx = ptr_diff / slab -> bucket -> size ;
182181
183182 // Make sure that the chunk was allocated
184183 assert (slab -> chunks [chunk_idx ] && "double free detected" );
@@ -738,6 +737,10 @@ void *disjoint_pool_aligned_malloc(void *pool, size_t size, size_t alignment) {
738737 }
739738 }
740739
740+ void * aligned_ptr = (void * )ALIGN_UP_SAFE ((size_t )ptr , alignment );
741+ VALGRIND_DO_MEMPOOL_ALLOC (disjoint_pool , aligned_ptr , size );
742+ utils_annotate_memory_undefined (aligned_ptr , size );
743+
741744 utils_mutex_unlock (& bucket -> bucket_lock );
742745
743746 if (disjoint_pool -> params .pool_trace > 2 ) {
@@ -746,9 +749,6 @@ void *disjoint_pool_aligned_malloc(void *pool, size_t size, size_t alignment) {
746749 (from_pool ? "pool" : "provider" ), ptr );
747750 }
748751
749- void * aligned_ptr = (void * )ALIGN_UP_SAFE ((size_t )ptr , alignment );
750- VALGRIND_DO_MEMPOOL_ALLOC (disjoint_pool , aligned_ptr , size );
751- utils_annotate_memory_undefined (aligned_ptr , size );
752752 return aligned_ptr ;
753753}
754754
@@ -804,11 +804,21 @@ umf_result_t disjoint_pool_free(void *pool, void *ptr) {
804804
805805 bucket_t * bucket = slab -> bucket ;
806806
807- VALGRIND_DO_MEMPOOL_FREE (pool , ptr );
808807 utils_mutex_lock (& bucket -> bucket_lock );
809808
810- utils_annotate_memory_inaccessible (ptr , bucket -> size );
811- bucket_free_chunk (bucket , ptr , slab , & to_pool );
809+ // TODO valgrind
810+ VALGRIND_DO_MEMPOOL_FREE (pool , ptr );
811+
812+ // Get the unaligned pointer
813+ // NOTE: the base pointer slab->mem_ptr needn't to be aligned to bucket size
814+ size_t chunk_idx =
815+ (size_t )floor ((double )((uintptr_t )ptr - (uintptr_t )slab -> mem_ptr ) /
816+ slab -> bucket -> size );
817+ void * unaligned_ptr =
818+ (void * )((uintptr_t )slab -> mem_ptr + chunk_idx * slab -> bucket -> size );
819+
820+ utils_annotate_memory_inaccessible (unaligned_ptr , bucket -> size );
821+ bucket_free_chunk (bucket , unaligned_ptr , slab , & to_pool );
812822
813823 if (disjoint_pool -> params .pool_trace > 1 ) {
814824 bucket -> free_count ++ ;
0 commit comments