@@ -36,15 +36,15 @@ static size_t CutOff = (size_t)1 << 31; // 2GB
3636#define POISON_MEMORY 0
3737#endif
3838
39- /* static */ void annotate_memory_inaccessible (void * ptr , size_t size ) {
39+ static void annotate_memory_inaccessible (void * ptr , size_t size ) {
4040 (void )ptr ;
4141 (void )size ;
4242#if (POISON_MEMORY != 0 )
4343 utils_annotate_memory_inaccessible (ptr , size );
4444#endif
4545}
4646
47- /* static*/ void annotate_memory_undefined (void * ptr , size_t size ) {
47+ static void annotate_memory_undefined (void * ptr , size_t size ) {
4848 (void )ptr ;
4949 (void )size ;
5050#if (POISON_MEMORY != 0 )
@@ -124,8 +124,9 @@ void destroy_slab(slab_t *slab) {
124124 umf_ba_global_free (slab );
125125}
126126
127- // return the index of the first available chunk, SIZE_MAX otherwise
128127size_t slab_find_first_available_chunk_idx (const slab_t * slab ) {
128+ // return the index of the first available chunk, SIZE_MAX otherwise
129+
129130 // use the first free chunk index as a hint for the search
130131 bool * chunk = slab -> chunks + sizeof (bool ) * slab -> first_free_chunk_idx ;
131132 while (chunk != slab -> chunks + sizeof (bool ) * slab -> num_chunks ) {
@@ -143,8 +144,6 @@ size_t slab_find_first_available_chunk_idx(const slab_t *slab) {
143144}
144145
145146void * slab_get_chunk (slab_t * slab ) {
146- // assert(slab->num_allocated != slab->num_chunks);
147-
148147 // free chunk must exist, otherwise we would have allocated another slab
149148 const size_t chunk_idx = slab_find_first_available_chunk_idx (slab );
150149 assert (chunk_idx != SIZE_MAX );
@@ -168,7 +167,7 @@ void *slab_get_end(const slab_t *slab) {
168167}
169168
170169void slab_free_chunk (slab_t * slab , void * ptr ) {
171- // This method should be called through bucket(since we might remove the
170+ // This method should be called through bucket (since we might remove the
172171 // slab as a result), therefore all locks are done on that level.
173172
174173 // Make sure that we're in the right slab
@@ -256,7 +255,6 @@ create_bucket(size_t sz, disjoint_pool_t *pool,
256255 bucket -> shared_limits = shared_limits ;
257256
258257 utils_mutex_init (& bucket -> bucket_lock );
259-
260258 return bucket ;
261259}
262260
@@ -463,9 +461,9 @@ void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool) {
463461 in_pool * bucket_slab_alloc_size (bucket );
464462}
465463
466- // If a slab was available in the pool then note that the current pooled
467- // size has reduced by the size of a slab in this bucket.
468464void bucket_decrement_pool (bucket_t * bucket , bool * from_pool ) {
465+ // If a slab was available in the pool then note that the current pooled
466+ // size has reduced by the size of a slab in this bucket.
469467 * from_pool = true;
470468 bucket_update_stats (bucket , 1 , -1 );
471469 utils_fetch_and_add64 (& bucket -> shared_limits -> total_size ,
@@ -570,7 +568,6 @@ void slab_unreg_by_addr(void *addr, slab_t *slab) {
570568 utils_mutex_unlock (lock );
571569}
572570
573- //TODO add static
574571static size_t size_to_idx (disjoint_pool_t * pool , size_t size ) {
575572 assert (size <= CutOff && "Unexpected size" );
576573 assert (size > 0 && "Unexpected size" );
@@ -622,20 +619,32 @@ bucket_t *disjoint_pool_find_bucket(disjoint_pool_t *pool, size_t size) {
622619 return pool -> buckets [calculated_idx ];
623620}
624621
625- // TODO
622+ void bucket_print_stats (bucket_t * bucket , bool * title_printed ,
623+ const char * label ) {
624+ if (bucket -> alloc_count ) {
625+ if (!* title_printed ) {
626+ LOG_DEBUG ("%s memory statistics" , label );
627+ LOG_DEBUG ("%14s %12s %12s %18s %20s %21s" , "Bucket Size" , "Allocs" ,
628+ "Frees" , "Allocs from Pool" , "Peak Slabs in Use" ,
629+ "Peak Slabs in Pool" );
630+ * title_printed = true;
631+ }
632+ LOG_DEBUG ("%14zu %12zu %12zu %18zu %20zu %21zu" , bucket -> size ,
633+ bucket -> alloc_count , bucket -> free_count ,
634+ bucket -> alloc_pool_count , bucket -> max_slabs_in_use ,
635+ bucket -> max_slabs_in_pool );
636+ }
637+ }
638+
626639void disjoint_pool_print_stats (disjoint_pool_t * pool , bool * title_printed ,
627640 size_t * high_bucket_size ,
628641 size_t * high_peak_slabs_in_use ,
629642 const char * mt_name ) {
630- (void )title_printed ; // TODO
631- (void )mt_name ; // TODO
632-
633643 * high_bucket_size = 0 ;
634644 * high_peak_slabs_in_use = 0 ;
635645 for (size_t i = 0 ; i < pool -> buckets_num ; i ++ ) {
636- // TODO
637- //(*B).disjoint_pool_print_stats(title_printed, mt_name);
638646 bucket_t * bucket = pool -> buckets [i ];
647+ bucket_print_stats (bucket , title_printed , mt_name );
639648 * high_peak_slabs_in_use =
640649 utils_max (bucket -> max_slabs_in_use , * high_peak_slabs_in_use );
641650 if (bucket -> alloc_count ) {
@@ -699,27 +708,6 @@ std::ostream &operator<<(std::ostream &Os, slab_t &Slab) {
699708}
700709*/
701710
702- /*
703- // TODO move
704- void Bucket::printStats(bool &TitlePrinted, const std::string &Label) {
705- if (alloc_count) {
706- if (!TitlePrinted) {
707- std::cout << Label << " memory statistics\n";
708- std::cout << std::setw(14) << "Bucket Size" << std::setw(12)
709- << "Allocs" << std::setw(12) << "Frees" << std::setw(18)
710- << "Allocs from Pool" << std::setw(20)
711- << "Peak Slabs in Use" << std::setw(21)
712- << "Peak Slabs in Pool" << std::endl;
713- TitlePrinted = true;
714- }
715- std::cout << std::setw(14) << getSize() << std::setw(12) << alloc_count
716- << std::setw(12) << free_count << std::setw(18)
717- << allocPoolCount << std::setw(20) << max_slabs_in_use
718- << std::setw(21) << max_slabs_in_pool << std::endl;
719- }
720- }
721- */
722-
723711umf_result_t disjoint_pool_initialize (umf_memory_provider_handle_t provider ,
724712 void * params , void * * ppPool ) {
725713 if (!provider ) {
@@ -800,20 +788,15 @@ umf_result_t disjoint_pool_initialize(umf_memory_provider_handle_t provider,
800788}
801789
802790void * disjoint_pool_malloc (void * pool , size_t size ) {
803- // For full-slab allocations indicates
804- // whether slab is from Pool.
805-
791+ // For full-slab allocations indicates whether slab is from Pool.
806792 disjoint_pool_t * hPool = (disjoint_pool_t * )pool ;
807793
808794 bool from_pool ;
809795 void * ptr = disjoint_pool_allocate (hPool , size , & from_pool );
810796
811797 if (hPool -> params .pool_trace > 2 ) {
812- const char * MT = hPool -> params .name ;
813- (void )MT ;
814- //std::cout << "Allocated " << std::setw(8) << size << " " << MT
815- // << " bytes from " << (FromPool ? "Pool" : "Provider") << " ->"
816- // << ptr << std::endl;
798+ LOG_DEBUG ("Allocated %8zu %s bytes from %s -> %p" , size ,
799+ hPool -> params .name , (from_pool ? "pool" : "provider" ), ptr );
817800 }
818801
819802 return ptr ;
@@ -896,21 +879,16 @@ void *disjoint_pool_aligned_malloc(void *pool, size_t size, size_t alignment) {
896879 bucket_count_alloc (bucket , from_pool );
897880 }
898881
882+ if (disjoint_pool -> params .pool_trace > 2 ) {
883+ LOG_DEBUG ("Allocated %8zu %s bytes aligned at %zu from %s -> %p" , size ,
884+ disjoint_pool -> params .name , alignment ,
885+ (from_pool ? "pool" : "provider" ), ptr );
886+ }
887+
899888 VALGRIND_DO_MEMPOOL_ALLOC (disjoint_pool , ALIGN_UP ((size_t )ptr , alignment ),
900889 size );
901890 annotate_memory_undefined ((void * )ALIGN_UP ((size_t )ptr , alignment ), size );
902891 return (void * )ALIGN_UP ((size_t )ptr , alignment );
903-
904- if (disjoint_pool -> params .pool_trace > 2 ) {
905- const char * MT = disjoint_pool -> params .name ;
906- (void )MT ;
907- //std::cout << "Allocated " << std::setw(8) << size << " " << MT
908- // << " bytes aligned at " << alignment << " from "
909- // << (FromPool ? "Pool" : "Provider") << " ->" << Ptr
910- // << std::endl;
911- }
912-
913- return ptr ;
914892}
915893
916894size_t disjoint_pool_malloc_usable_size (void * pool , void * ptr ) {
@@ -959,8 +937,6 @@ umf_result_t disjoint_pool_free(void *pool, void *ptr) {
959937 }
960938
961939 bool to_pool = false;
962- // TODO - no multimap
963- // for (auto It = Slabs.first; It != Slabs.second; ++It) {
964940
965941 // The slab object won't be deleted until it's removed from the map which is
966942 // protected by the lock, so it's safe to access it here.
@@ -984,15 +960,14 @@ umf_result_t disjoint_pool_free(void *pool, void *ptr) {
984960
985961 return UMF_RESULT_SUCCESS ;
986962 }
987- //} // for multimap
988963
989964 /*
990965 if (ret == UMF_RESULT_SUCCESS) {
991966
992967 if (impl->getParams().pool_trace > 2) {
993968 auto MT = impl->getParams().Name;
994969 std::cout << "Freed " << MT << " " << ptr << " to "
995- << (ToPool ? "Pool " : "Provider ")
970+ << (ToPool ? "pool " : "provider ")
996971 << ", Current total pool size "
997972 << impl->getLimits()->TotalSize.load()
998973 << ", Current pool size for " << MT << " "
@@ -1001,8 +976,8 @@ umf_result_t disjoint_pool_free(void *pool, void *ptr) {
1001976 }*/
1002977
1003978 // There is a rare case when we have a pointer from system allocation next
1004- // to some slab with an entry in the map. So we find a slab
1005- // but the range checks fail.
979+ // to some slab with an entry in the map. So we find a slab but the range
980+ // checks fail.
1006981 assert (0 );
1007982 return UMF_RESULT_ERROR_MEMORY_PROVIDER_SPECIFIC ;
1008983}
0 commit comments