@@ -395,7 +395,7 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
395395 const size_t size = mi_segment_size (segment );
396396 const size_t csize = _mi_commit_mask_committed_size (& segment -> commit_mask , size );
397397
398- _mi_abandoned_await_readers (); // wait until safe to free
398+ _mi_abandoned_await_readers (tld -> abandoned ); // wait until safe to free
399399 _mi_arena_free (segment , mi_segment_size (segment ), csize , segment -> memid , tld -> stats );
400400}
401401
@@ -1059,7 +1059,6 @@ would be spread among all other segments in the arenas.
10591059// Use the bottom 20-bits (on 64-bit) of the aligned segment pointers
10601060// to put in a tag that increments on update to avoid the A-B-A problem.
10611061#define MI_TAGGED_MASK MI_SEGMENT_MASK
1062- typedef uintptr_t mi_tagged_segment_t ;
10631062
10641063static mi_segment_t * mi_tagged_segment_ptr (mi_tagged_segment_t ts ) {
10651064 return (mi_segment_t * )(ts & ~MI_TAGGED_MASK );
@@ -1071,55 +1070,40 @@ static mi_tagged_segment_t mi_tagged_segment(mi_segment_t* segment, mi_tagged_se
10711070 return ((uintptr_t )segment | tag );
10721071}
10731072
1074- // This is a list of visited abandoned pages that were full at the time.
1075- // this list migrates to `abandoned` when that becomes NULL. The use of
1076- // this list reduces contention and the rate at which segments are visited.
1077- static mi_decl_cache_align _Atomic(mi_segment_t * ) abandoned_visited ; // = NULL
1078-
1079- // The abandoned page list (tagged as it supports pop)
1080- static mi_decl_cache_align _Atomic(mi_tagged_segment_t ) abandoned ; // = NULL
1081-
1082- // Maintain these for debug purposes (these counts may be a bit off)
1083- static mi_decl_cache_align _Atomic(size_t ) abandoned_count ;
1084- static mi_decl_cache_align _Atomic(size_t ) abandoned_visited_count ;
1085-
1086- // We also maintain a count of current readers of the abandoned list
1087- // in order to prevent resetting/decommitting segment memory if it might
1088- // still be read.
1089- static mi_decl_cache_align _Atomic(size_t ) abandoned_readers ; // = 0
1073+ mi_abandoned_pool_t _mi_abandoned_default ;
10901074
10911075// Push on the visited list
1092- static void mi_abandoned_visited_push (mi_segment_t * segment ) {
1076+ static void mi_abandoned_visited_push (mi_abandoned_pool_t * pool , mi_segment_t * segment ) {
10931077 mi_assert_internal (segment -> thread_id == 0 );
10941078 mi_assert_internal (mi_atomic_load_ptr_relaxed (mi_segment_t ,& segment -> abandoned_next ) == NULL );
10951079 mi_assert_internal (segment -> next == NULL );
10961080 mi_assert_internal (segment -> used > 0 );
1097- mi_segment_t * anext = mi_atomic_load_ptr_relaxed (mi_segment_t , & abandoned_visited );
1081+ mi_segment_t * anext = mi_atomic_load_ptr_relaxed (mi_segment_t , & pool -> abandoned_visited );
10981082 do {
10991083 mi_atomic_store_ptr_release (mi_segment_t , & segment -> abandoned_next , anext );
1100- } while (!mi_atomic_cas_ptr_weak_release (mi_segment_t , & abandoned_visited , & anext , segment ));
1101- mi_atomic_increment_relaxed (& abandoned_visited_count );
1084+ } while (!mi_atomic_cas_ptr_weak_release (mi_segment_t , & pool -> abandoned_visited , & anext , segment ));
1085+ mi_atomic_increment_relaxed (& pool -> abandoned_visited_count );
11021086}
11031087
11041088// Move the visited list to the abandoned list.
1105- static bool mi_abandoned_visited_revisit (void )
1089+ static bool mi_abandoned_visited_revisit (mi_abandoned_pool_t * pool )
11061090{
11071091 // quick check if the visited list is empty
1108- if (mi_atomic_load_ptr_relaxed (mi_segment_t , & abandoned_visited ) == NULL ) return false;
1092+ if (mi_atomic_load_ptr_relaxed (mi_segment_t , & pool -> abandoned_visited ) == NULL ) return false;
11091093
11101094 // grab the whole visited list
1111- mi_segment_t * first = mi_atomic_exchange_ptr_acq_rel (mi_segment_t , & abandoned_visited , NULL );
1095+ mi_segment_t * first = mi_atomic_exchange_ptr_acq_rel (mi_segment_t , & pool -> abandoned_visited , NULL );
11121096 if (first == NULL ) return false;
11131097
11141098 // first try to swap directly if the abandoned list happens to be NULL
11151099 mi_tagged_segment_t afirst ;
1116- mi_tagged_segment_t ts = mi_atomic_load_relaxed (& abandoned );
1100+ mi_tagged_segment_t ts = mi_atomic_load_relaxed (& pool -> abandoned );
11171101 if (mi_tagged_segment_ptr (ts )== NULL ) {
1118- size_t count = mi_atomic_load_relaxed (& abandoned_visited_count );
1102+ size_t count = mi_atomic_load_relaxed (& pool -> abandoned_visited_count );
11191103 afirst = mi_tagged_segment (first , ts );
1120- if (mi_atomic_cas_strong_acq_rel (& abandoned , & ts , afirst )) {
1121- mi_atomic_add_relaxed (& abandoned_count , count );
1122- mi_atomic_sub_relaxed (& abandoned_visited_count , count );
1104+ if (mi_atomic_cas_strong_acq_rel (& pool -> abandoned , & ts , afirst )) {
1105+ mi_atomic_add_relaxed (& pool -> abandoned_count , count );
1106+ mi_atomic_sub_relaxed (& pool -> abandoned_visited_count , count );
11231107 return true;
11241108 }
11251109 }
@@ -1133,51 +1117,51 @@ static bool mi_abandoned_visited_revisit(void)
11331117
11341118 // and atomically prepend to the abandoned list
11351119 // (no need to increase the readers as we don't access the abandoned segments)
1136- mi_tagged_segment_t anext = mi_atomic_load_relaxed (& abandoned );
1120+ mi_tagged_segment_t anext = mi_atomic_load_relaxed (& pool -> abandoned );
11371121 size_t count ;
11381122 do {
1139- count = mi_atomic_load_relaxed (& abandoned_visited_count );
1123+ count = mi_atomic_load_relaxed (& pool -> abandoned_visited_count );
11401124 mi_atomic_store_ptr_release (mi_segment_t , & last -> abandoned_next , mi_tagged_segment_ptr (anext ));
11411125 afirst = mi_tagged_segment (first , anext );
1142- } while (!mi_atomic_cas_weak_release (& abandoned , & anext , afirst ));
1143- mi_atomic_add_relaxed (& abandoned_count , count );
1144- mi_atomic_sub_relaxed (& abandoned_visited_count , count );
1126+ } while (!mi_atomic_cas_weak_release (& pool -> abandoned , & anext , afirst ));
1127+ mi_atomic_add_relaxed (& pool -> abandoned_count , count );
1128+ mi_atomic_sub_relaxed (& pool -> abandoned_visited_count , count );
11451129 return true;
11461130}
11471131
11481132// Push on the abandoned list.
1149- static void mi_abandoned_push (mi_segment_t * segment ) {
1133+ static void mi_abandoned_push (mi_abandoned_pool_t * pool , mi_segment_t * segment ) {
11501134 mi_assert_internal (segment -> thread_id == 0 );
11511135 mi_assert_internal (mi_atomic_load_ptr_relaxed (mi_segment_t , & segment -> abandoned_next ) == NULL );
11521136 mi_assert_internal (segment -> next == NULL );
11531137 mi_assert_internal (segment -> used > 0 );
11541138 mi_tagged_segment_t next ;
1155- mi_tagged_segment_t ts = mi_atomic_load_relaxed (& abandoned );
1139+ mi_tagged_segment_t ts = mi_atomic_load_relaxed (& pool -> abandoned );
11561140 do {
11571141 mi_atomic_store_ptr_release (mi_segment_t , & segment -> abandoned_next , mi_tagged_segment_ptr (ts ));
11581142 next = mi_tagged_segment (segment , ts );
1159- } while (!mi_atomic_cas_weak_release (& abandoned , & ts , next ));
1160- mi_atomic_increment_relaxed (& abandoned_count );
1143+ } while (!mi_atomic_cas_weak_release (& pool -> abandoned , & ts , next ));
1144+ mi_atomic_increment_relaxed (& pool -> abandoned_count );
11611145}
11621146
11631147// Wait until there are no more pending reads on segments that used to be in the abandoned list
11641148// called for example from `arena.c` before decommitting
1165- void _mi_abandoned_await_readers (void ) {
1149+ void _mi_abandoned_await_readers (mi_abandoned_pool_t * pool ) {
11661150 size_t n ;
11671151 do {
1168- n = mi_atomic_load_acquire (& abandoned_readers );
1152+ n = mi_atomic_load_acquire (& pool -> abandoned_readers );
11691153 if (n != 0 ) mi_atomic_yield ();
11701154 } while (n != 0 );
11711155}
11721156
11731157// Pop from the abandoned list
1174- static mi_segment_t * mi_abandoned_pop (void ) {
1158+ static mi_segment_t * mi_abandoned_pop (mi_abandoned_pool_t * pool ) {
11751159 mi_segment_t * segment ;
11761160 // Check efficiently if it is empty (or if the visited list needs to be moved)
1177- mi_tagged_segment_t ts = mi_atomic_load_relaxed (& abandoned );
1161+ mi_tagged_segment_t ts = mi_atomic_load_relaxed (& pool -> abandoned );
11781162 segment = mi_tagged_segment_ptr (ts );
11791163 if mi_likely (segment == NULL ) {
1180- if mi_likely (!mi_abandoned_visited_revisit ()) { // try to swap in the visited list on NULL
1164+ if mi_likely (!mi_abandoned_visited_revisit (pool )) { // try to swap in the visited list on NULL
11811165 return NULL ;
11821166 }
11831167 }
@@ -1186,20 +1170,20 @@ static mi_segment_t* mi_abandoned_pop(void) {
11861170 // a segment to be decommitted while a read is still pending,
11871171 // and a tagged pointer to prevent A-B-A link corruption.
11881172 // (this is called from `region.c:_mi_mem_free` for example)
1189- mi_atomic_increment_relaxed (& abandoned_readers ); // ensure no segment gets decommitted
1173+ mi_atomic_increment_relaxed (& pool -> abandoned_readers ); // ensure no segment gets decommitted
11901174 mi_tagged_segment_t next = 0 ;
1191- ts = mi_atomic_load_acquire (& abandoned );
1175+ ts = mi_atomic_load_acquire (& pool -> abandoned );
11921176 do {
11931177 segment = mi_tagged_segment_ptr (ts );
11941178 if (segment != NULL ) {
11951179 mi_segment_t * anext = mi_atomic_load_ptr_relaxed (mi_segment_t , & segment -> abandoned_next );
11961180 next = mi_tagged_segment (anext , ts ); // note: reads the segment's `abandoned_next` field so should not be decommitted
11971181 }
1198- } while (segment != NULL && !mi_atomic_cas_weak_acq_rel (& abandoned , & ts , next ));
1199- mi_atomic_decrement_relaxed (& abandoned_readers ); // release reader lock
1182+ } while (segment != NULL && !mi_atomic_cas_weak_acq_rel (& pool -> abandoned , & ts , next ));
1183+ mi_atomic_decrement_relaxed (& pool -> abandoned_readers ); // release reader lock
12001184 if (segment != NULL ) {
12011185 mi_atomic_store_ptr_release (mi_segment_t , & segment -> abandoned_next , NULL );
1202- mi_atomic_decrement_relaxed (& abandoned_count );
1186+ mi_atomic_decrement_relaxed (& pool -> abandoned_count );
12031187 }
12041188 return segment ;
12051189}
@@ -1237,7 +1221,7 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
12371221 segment -> thread_id = 0 ;
12381222 mi_atomic_store_ptr_release (mi_segment_t , & segment -> abandoned_next , NULL );
12391223 segment -> abandoned_visits = 1 ; // from 0 to 1 to signify it is abandoned
1240- mi_abandoned_push (segment );
1224+ mi_abandoned_push (tld -> abandoned , segment );
12411225}
12421226
12431227void _mi_segment_page_abandon (mi_page_t * page , mi_segments_tld_t * tld ) {
@@ -1381,7 +1365,7 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
13811365
13821366void _mi_abandoned_reclaim_all (mi_heap_t * heap , mi_segments_tld_t * tld ) {
13831367 mi_segment_t * segment ;
1384- while ((segment = mi_abandoned_pop ()) != NULL ) {
1368+ while ((segment = mi_abandoned_pop (tld -> abandoned )) != NULL ) {
13851369 mi_segment_reclaim (segment , heap , 0 , NULL , tld );
13861370 }
13871371}
@@ -1391,7 +1375,7 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice
13911375 * reclaimed = false;
13921376 mi_segment_t * segment ;
13931377 long max_tries = mi_option_get_clamp (mi_option_max_segment_reclaim , 8 , 1024 ); // limit the work to bound allocation times
1394- while ((max_tries -- > 0 ) && ((segment = mi_abandoned_pop ()) != NULL )) {
1378+ while ((max_tries -- > 0 ) && ((segment = mi_abandoned_pop (tld -> abandoned )) != NULL )) {
13951379 segment -> abandoned_visits ++ ;
13961380 // todo: an arena exclusive heap will potentially visit many abandoned unsuitable segments
13971381 // and push them into the visited list and use many tries. Perhaps we can skip non-suitable ones in a better way?
@@ -1418,7 +1402,7 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice
14181402 else {
14191403 // otherwise, push on the visited list so it gets not looked at too quickly again
14201404 mi_segment_try_purge (segment , true /* force? */ , tld -> stats ); // force purge if needed as we may not visit soon again
1421- mi_abandoned_visited_push (segment );
1405+ mi_abandoned_visited_push (tld -> abandoned , segment );
14221406 }
14231407 }
14241408 return NULL ;
@@ -1428,11 +1412,12 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice
14281412void _mi_abandoned_collect (mi_heap_t * heap , bool force , mi_segments_tld_t * tld )
14291413{
14301414 mi_segment_t * segment ;
1415+ mi_abandoned_pool_t * pool = tld -> abandoned ;
14311416 int max_tries = (force ? 16 * 1024 : 1024 ); // limit latency
14321417 if (force ) {
1433- mi_abandoned_visited_revisit ();
1418+ mi_abandoned_visited_revisit (pool );
14341419 }
1435- while ((max_tries -- > 0 ) && ((segment = mi_abandoned_pop ()) != NULL )) {
1420+ while ((max_tries -- > 0 ) && ((segment = mi_abandoned_pop (pool )) != NULL )) {
14361421 mi_segment_check_free (segment ,0 ,0 ,tld ); // try to free up pages (due to concurrent frees)
14371422 if (segment -> used == 0 ) {
14381423 // free the segment (by forced reclaim) to make it available to other threads.
@@ -1444,7 +1429,7 @@ void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld)
14441429 // otherwise, purge if needed and push on the visited list
14451430 // note: forced purge can be expensive if many threads are destroyed/created as in mstress.
14461431 mi_segment_try_purge (segment , force , tld -> stats );
1447- mi_abandoned_visited_push (segment );
1432+ mi_abandoned_visited_push (pool , segment );
14481433 }
14491434 }
14501435}
0 commit comments