@@ -44,7 +44,7 @@ typedef struct mi_arena_s {
44
44
mi_lock_t abandoned_visit_lock ; // lock is only used when abandoned segments are being visited
45
45
_Atomic (size_t ) search_idx ; // optimization to start the search for free blocks
46
46
_Atomic (mi_msecs_t ) purge_expire ; // expiration time when blocks should be purged from `blocks_purge`.
47
-
47
+
48
48
mi_bitmap_field_t * blocks_dirty ; // are the blocks potentially non-zero?
49
49
mi_bitmap_field_t * blocks_committed ; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
50
50
mi_bitmap_field_t * blocks_purge ; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
@@ -192,14 +192,9 @@ void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid) {
192
192
if (p != NULL ) return p ;
193
193
194
194
// or fall back to the OS
195
- p = _mi_os_alloc (size , memid );
195
+ p = _mi_os_zalloc (size , memid );
196
196
if (p == NULL ) return NULL ;
197
197
198
- // zero the OS memory if needed
199
- if (!memid -> initially_zero ) {
200
- _mi_memzero_aligned (p , size );
201
- memid -> initially_zero = true;
202
- }
203
198
return p ;
204
199
}
205
200
@@ -270,12 +265,12 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
270
265
else if (commit ) {
271
266
// commit requested, but the range may not be committed as a whole: ensure it is committed now
272
267
memid -> initially_committed = true;
268
+ const size_t commit_size = mi_arena_block_size (needed_bcount );
273
269
bool any_uncommitted ;
274
270
size_t already_committed = 0 ;
275
271
_mi_bitmap_claim_across (arena -> blocks_committed , arena -> field_count , needed_bcount , bitmap_index , & any_uncommitted , & already_committed );
276
272
if (any_uncommitted ) {
277
273
mi_assert_internal (already_committed < needed_bcount );
278
- const size_t commit_size = mi_arena_block_size (needed_bcount );
279
274
const size_t stat_commit_size = commit_size - mi_arena_block_size (already_committed );
280
275
bool commit_zero = false;
281
276
if (!_mi_os_commit_ex (p , commit_size , & commit_zero , stat_commit_size )) {
@@ -285,6 +280,10 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
285
280
if (commit_zero ) { memid -> initially_zero = true; }
286
281
}
287
282
}
283
+ else {
284
+ // all are already committed: signal that we are reusing memory in case it was purged before
285
+ _mi_os_reuse ( p , commit_size );
286
+ }
288
287
}
289
288
else {
290
289
// no need to commit, but check if already fully committed
@@ -369,7 +368,7 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz
369
368
static bool mi_arena_reserve (size_t req_size , bool allow_large , mi_arena_id_t * arena_id )
370
369
{
371
370
if (_mi_preloading ()) return false; // use OS only while pre loading
372
-
371
+
373
372
const size_t arena_count = mi_atomic_load_acquire (& mi_arena_count );
374
373
if (arena_count > (MI_MAX_ARENAS - 4 )) return false;
375
374
@@ -411,7 +410,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
411
410
412
411
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
413
412
if (!mi_option_is_enabled (mi_option_disallow_arena_alloc )) { // is arena allocation allowed?
414
- if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0 )
413
+ if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0 )
415
414
{
416
415
void * p = mi_arena_try_alloc (numa_node , size , alignment , commit , allow_large , req_arena_id , memid );
417
416
if (p != NULL ) return p ;
@@ -491,7 +490,7 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks)
491
490
// we need to ensure we do not try to reset (as that may be invalid for uncommitted memory).
492
491
mi_assert_internal (already_committed < blocks );
493
492
mi_assert_internal (mi_option_is_enabled (mi_option_purge_decommits ));
494
- needs_recommit = _mi_os_purge_ex (p , size , false /* allow reset? */ , mi_arena_block_size (already_committed ));
493
+ needs_recommit = _mi_os_purge_ex (p , size , false /* allow reset? */ , mi_arena_block_size (already_committed ));
495
494
}
496
495
497
496
// clear the purged blocks
@@ -560,7 +559,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
560
559
{
561
560
// check pre-conditions
562
561
if (arena -> memid .is_pinned ) return false;
563
-
562
+
564
563
// expired yet?
565
564
mi_msecs_t expire = mi_atomic_loadi64_relaxed (& arena -> purge_expire );
566
565
if (!force && (expire == 0 || expire > now )) return false;
@@ -615,7 +614,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
615
614
return any_purged ;
616
615
}
617
616
618
- static void mi_arenas_try_purge ( bool force , bool visit_all )
617
+ static void mi_arenas_try_purge ( bool force , bool visit_all )
619
618
{
620
619
if (_mi_preloading () || mi_arena_purge_delay () <= 0 ) return ; // nothing will be scheduled
621
620
@@ -632,7 +631,7 @@ static void mi_arenas_try_purge( bool force, bool visit_all )
632
631
mi_atomic_guard (& purge_guard )
633
632
{
634
633
// increase global expire: at most one purge per delay cycle
635
- mi_atomic_storei64_release (& mi_arenas_purge_expire , now + mi_arena_purge_delay ());
634
+ mi_atomic_storei64_release (& mi_arenas_purge_expire , now + mi_arena_purge_delay ());
636
635
size_t max_purge_count = (visit_all ? max_arena : 2 );
637
636
bool all_visited = true;
638
637
for (size_t i = 0 ; i < max_arena ; i ++ ) {
@@ -951,7 +950,7 @@ void mi_debug_show_arenas(void) mi_attr_noexcept {
951
950
for (size_t i = 0 ; i < max_arenas ; i ++ ) {
952
951
mi_arena_t * arena = mi_atomic_load_ptr_relaxed (mi_arena_t , & mi_arenas [i ]);
953
952
if (arena == NULL ) break ;
954
- _mi_message ("arena %zu: %zu blocks of size %zuMiB (in %zu fields) %s\n" , i , arena -> block_count , MI_ARENA_BLOCK_SIZE / MI_MiB , arena -> field_count , (arena -> memid .is_pinned ? ", pinned" : "" ));
953
+ _mi_message ("arena %zu: %zu blocks of size %zuMiB (in %zu fields) %s\n" , i , arena -> block_count , ( size_t )( MI_ARENA_BLOCK_SIZE / MI_MiB ) , arena -> field_count , (arena -> memid .is_pinned ? ", pinned" : "" ));
955
954
if (show_inuse ) {
956
955
inuse_total += mi_debug_show_bitmap (" " , "inuse blocks" , arena -> block_count , arena -> blocks_inuse , arena -> field_count );
957
956
}
@@ -1011,17 +1010,17 @@ int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t
1011
1010
if (pages == 0 ) return 0 ;
1012
1011
1013
1012
// pages per numa node
1014
- size_t numa_count = (numa_nodes > 0 ? numa_nodes : _mi_os_numa_node_count ());
1015
- if (numa_count < = 0 ) numa_count = 1 ;
1013
+ int numa_count = (numa_nodes > 0 && numa_nodes <= INT_MAX ? ( int ) numa_nodes : _mi_os_numa_node_count ());
1014
+ if (numa_count = = 0 ) numa_count = 1 ;
1016
1015
const size_t pages_per = pages / numa_count ;
1017
1016
const size_t pages_mod = pages % numa_count ;
1018
1017
const size_t timeout_per = (timeout_msecs == 0 ? 0 : (timeout_msecs / numa_count ) + 50 );
1019
1018
1020
1019
// reserve evenly among numa nodes
1021
- for (size_t numa_node = 0 ; numa_node < numa_count && pages > 0 ; numa_node ++ ) {
1020
+ for (int numa_node = 0 ; numa_node < numa_count && pages > 0 ; numa_node ++ ) {
1022
1021
size_t node_pages = pages_per ; // can be 0
1023
- if (numa_node < pages_mod ) node_pages ++ ;
1024
- int err = mi_reserve_huge_os_pages_at (node_pages , ( int ) numa_node , timeout_per );
1022
+ if (( size_t ) numa_node < pages_mod ) node_pages ++ ;
1023
+ int err = mi_reserve_huge_os_pages_at (node_pages , numa_node , timeout_per );
1025
1024
if (err ) return err ;
1026
1025
if (pages < node_pages ) {
1027
1026
pages = 0 ;
0 commit comments