@@ -36,15 +36,15 @@ static size_t CutOff = (size_t)1 << 31; // 2GB
36
36
#define POISON_MEMORY 0
37
37
#endif
38
38
39
- /* static */ void annotate_memory_inaccessible (void * ptr , size_t size ) {
39
+ static void annotate_memory_inaccessible (void * ptr , size_t size ) {
40
40
(void )ptr ;
41
41
(void )size ;
42
42
#if (POISON_MEMORY != 0 )
43
43
utils_annotate_memory_inaccessible (ptr , size );
44
44
#endif
45
45
}
46
46
47
- /* static*/ void annotate_memory_undefined (void * ptr , size_t size ) {
47
+ static void annotate_memory_undefined (void * ptr , size_t size ) {
48
48
(void )ptr ;
49
49
(void )size ;
50
50
#if (POISON_MEMORY != 0 )
@@ -124,8 +124,9 @@ void destroy_slab(slab_t *slab) {
124
124
umf_ba_global_free (slab );
125
125
}
126
126
127
- // return the index of the first available chunk, SIZE_MAX otherwise
128
127
size_t slab_find_first_available_chunk_idx (const slab_t * slab ) {
128
+ // return the index of the first available chunk, SIZE_MAX otherwise
129
+
129
130
// use the first free chunk index as a hint for the search
130
131
bool * chunk = slab -> chunks + sizeof (bool ) * slab -> first_free_chunk_idx ;
131
132
while (chunk != slab -> chunks + sizeof (bool ) * slab -> num_chunks ) {
@@ -143,8 +144,6 @@ size_t slab_find_first_available_chunk_idx(const slab_t *slab) {
143
144
}
144
145
145
146
void * slab_get_chunk (slab_t * slab ) {
146
- // assert(slab->num_allocated != slab->num_chunks);
147
-
148
147
// free chunk must exist, otherwise we would have allocated another slab
149
148
const size_t chunk_idx = slab_find_first_available_chunk_idx (slab );
150
149
assert (chunk_idx != SIZE_MAX );
@@ -168,7 +167,7 @@ void *slab_get_end(const slab_t *slab) {
168
167
}
169
168
170
169
void slab_free_chunk (slab_t * slab , void * ptr ) {
171
- // This method should be called through bucket(since we might remove the
170
+ // This method should be called through bucket (since we might remove the
172
171
// slab as a result), therefore all locks are done on that level.
173
172
174
173
// Make sure that we're in the right slab
@@ -256,7 +255,6 @@ create_bucket(size_t sz, disjoint_pool_t *pool,
256
255
bucket -> shared_limits = shared_limits ;
257
256
258
257
utils_mutex_init (& bucket -> bucket_lock );
259
-
260
258
return bucket ;
261
259
}
262
260
@@ -463,9 +461,9 @@ void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool) {
463
461
in_pool * bucket_slab_alloc_size (bucket );
464
462
}
465
463
466
- // If a slab was available in the pool then note that the current pooled
467
- // size has reduced by the size of a slab in this bucket.
468
464
void bucket_decrement_pool (bucket_t * bucket , bool * from_pool ) {
465
+ // If a slab was available in the pool then note that the current pooled
466
+ // size has reduced by the size of a slab in this bucket.
469
467
* from_pool = true;
470
468
bucket_update_stats (bucket , 1 , -1 );
471
469
utils_fetch_and_add64 (& bucket -> shared_limits -> total_size ,
@@ -570,7 +568,6 @@ void slab_unreg_by_addr(void *addr, slab_t *slab) {
570
568
utils_mutex_unlock (lock );
571
569
}
572
570
573
- //TODO add static
574
571
static size_t size_to_idx (disjoint_pool_t * pool , size_t size ) {
575
572
assert (size <= CutOff && "Unexpected size" );
576
573
assert (size > 0 && "Unexpected size" );
@@ -622,20 +619,32 @@ bucket_t *disjoint_pool_find_bucket(disjoint_pool_t *pool, size_t size) {
622
619
return pool -> buckets [calculated_idx ];
623
620
}
624
621
625
- // TODO
622
+ void bucket_print_stats (bucket_t * bucket , bool * title_printed ,
623
+ const char * label ) {
624
+ if (bucket -> alloc_count ) {
625
+ if (!* title_printed ) {
626
+ LOG_DEBUG ("%s memory statistics" , label );
627
+ LOG_DEBUG ("%14s %12s %12s %18s %20s %21s" , "Bucket Size" , "Allocs" ,
628
+ "Frees" , "Allocs from Pool" , "Peak Slabs in Use" ,
629
+ "Peak Slabs in Pool" );
630
+ * title_printed = true;
631
+ }
632
+ LOG_DEBUG ("%14zu %12zu %12zu %18zu %20zu %21zu" , bucket -> size ,
633
+ bucket -> alloc_count , bucket -> free_count ,
634
+ bucket -> alloc_pool_count , bucket -> max_slabs_in_use ,
635
+ bucket -> max_slabs_in_pool );
636
+ }
637
+ }
638
+
626
639
void disjoint_pool_print_stats (disjoint_pool_t * pool , bool * title_printed ,
627
640
size_t * high_bucket_size ,
628
641
size_t * high_peak_slabs_in_use ,
629
642
const char * mt_name ) {
630
- (void )title_printed ; // TODO
631
- (void )mt_name ; // TODO
632
-
633
643
* high_bucket_size = 0 ;
634
644
* high_peak_slabs_in_use = 0 ;
635
645
for (size_t i = 0 ; i < pool -> buckets_num ; i ++ ) {
636
- // TODO
637
- //(*B).disjoint_pool_print_stats(title_printed, mt_name);
638
646
bucket_t * bucket = pool -> buckets [i ];
647
+ bucket_print_stats (bucket , title_printed , mt_name );
639
648
* high_peak_slabs_in_use =
640
649
utils_max (bucket -> max_slabs_in_use , * high_peak_slabs_in_use );
641
650
if (bucket -> alloc_count ) {
@@ -699,27 +708,6 @@ std::ostream &operator<<(std::ostream &Os, slab_t &Slab) {
699
708
}
700
709
*/
701
710
702
- /*
703
- // TODO move
704
- void Bucket::printStats(bool &TitlePrinted, const std::string &Label) {
705
- if (alloc_count) {
706
- if (!TitlePrinted) {
707
- std::cout << Label << " memory statistics\n";
708
- std::cout << std::setw(14) << "Bucket Size" << std::setw(12)
709
- << "Allocs" << std::setw(12) << "Frees" << std::setw(18)
710
- << "Allocs from Pool" << std::setw(20)
711
- << "Peak Slabs in Use" << std::setw(21)
712
- << "Peak Slabs in Pool" << std::endl;
713
- TitlePrinted = true;
714
- }
715
- std::cout << std::setw(14) << getSize() << std::setw(12) << alloc_count
716
- << std::setw(12) << free_count << std::setw(18)
717
- << allocPoolCount << std::setw(20) << max_slabs_in_use
718
- << std::setw(21) << max_slabs_in_pool << std::endl;
719
- }
720
- }
721
- */
722
-
723
711
umf_result_t disjoint_pool_initialize (umf_memory_provider_handle_t provider ,
724
712
void * params , void * * ppPool ) {
725
713
if (!provider ) {
@@ -800,20 +788,15 @@ umf_result_t disjoint_pool_initialize(umf_memory_provider_handle_t provider,
800
788
}
801
789
802
790
void * disjoint_pool_malloc (void * pool , size_t size ) {
803
- // For full-slab allocations indicates
804
- // whether slab is from Pool.
805
-
791
+ // For full-slab allocations indicates whether slab is from Pool.
806
792
disjoint_pool_t * hPool = (disjoint_pool_t * )pool ;
807
793
808
794
bool from_pool ;
809
795
void * ptr = disjoint_pool_allocate (hPool , size , & from_pool );
810
796
811
797
if (hPool -> params .pool_trace > 2 ) {
812
- const char * MT = hPool -> params .name ;
813
- (void )MT ;
814
- //std::cout << "Allocated " << std::setw(8) << size << " " << MT
815
- // << " bytes from " << (FromPool ? "Pool" : "Provider") << " ->"
816
- // << ptr << std::endl;
798
+ LOG_DEBUG ("Allocated %8zu %s bytes from %s -> %p" , size ,
799
+ hPool -> params .name , (from_pool ? "pool" : "provider" ), ptr );
817
800
}
818
801
819
802
return ptr ;
@@ -896,21 +879,16 @@ void *disjoint_pool_aligned_malloc(void *pool, size_t size, size_t alignment) {
896
879
bucket_count_alloc (bucket , from_pool );
897
880
}
898
881
882
+ if (disjoint_pool -> params .pool_trace > 2 ) {
883
+ LOG_DEBUG ("Allocated %8zu %s bytes aligned at %zu from %s -> %p" , size ,
884
+ disjoint_pool -> params .name , alignment ,
885
+ (from_pool ? "pool" : "provider" ), ptr );
886
+ }
887
+
899
888
VALGRIND_DO_MEMPOOL_ALLOC (disjoint_pool , ALIGN_UP ((size_t )ptr , alignment ),
900
889
size );
901
890
annotate_memory_undefined ((void * )ALIGN_UP ((size_t )ptr , alignment ), size );
902
891
return (void * )ALIGN_UP ((size_t )ptr , alignment );
903
-
904
- if (disjoint_pool -> params .pool_trace > 2 ) {
905
- const char * MT = disjoint_pool -> params .name ;
906
- (void )MT ;
907
- //std::cout << "Allocated " << std::setw(8) << size << " " << MT
908
- // << " bytes aligned at " << alignment << " from "
909
- // << (FromPool ? "Pool" : "Provider") << " ->" << Ptr
910
- // << std::endl;
911
- }
912
-
913
- return ptr ;
914
892
}
915
893
916
894
size_t disjoint_pool_malloc_usable_size (void * pool , void * ptr ) {
@@ -959,8 +937,6 @@ umf_result_t disjoint_pool_free(void *pool, void *ptr) {
959
937
}
960
938
961
939
bool to_pool = false;
962
- // TODO - no multimap
963
- // for (auto It = Slabs.first; It != Slabs.second; ++It) {
964
940
965
941
// The slab object won't be deleted until it's removed from the map which is
966
942
// protected by the lock, so it's safe to access it here.
@@ -984,15 +960,14 @@ umf_result_t disjoint_pool_free(void *pool, void *ptr) {
984
960
985
961
return UMF_RESULT_SUCCESS ;
986
962
}
987
- //} // for multimap
988
963
989
964
/*
990
965
if (ret == UMF_RESULT_SUCCESS) {
991
966
992
967
if (impl->getParams().pool_trace > 2) {
993
968
auto MT = impl->getParams().Name;
994
969
std::cout << "Freed " << MT << " " << ptr << " to "
995
- << (ToPool ? "Pool " : "Provider ")
970
+ << (ToPool ? "pool " : "provider ")
996
971
<< ", Current total pool size "
997
972
<< impl->getLimits()->TotalSize.load()
998
973
<< ", Current pool size for " << MT << " "
@@ -1001,8 +976,8 @@ umf_result_t disjoint_pool_free(void *pool, void *ptr) {
1001
976
}*/
1002
977
1003
978
// There is a rare case when we have a pointer from system allocation next
1004
- // to some slab with an entry in the map. So we find a slab
1005
- // but the range checks fail.
979
+ // to some slab with an entry in the map. So we find a slab but the range
980
+ // checks fail.
1006
981
assert (0 );
1007
982
return UMF_RESULT_ERROR_MEMORY_PROVIDER_SPECIFIC ;
1008
983
}
0 commit comments