@@ -184,13 +184,33 @@ mi_decl_cache_align _Atomic(mi_page_t**)* _mi_page_map;
184184static size_t mi_page_map_count ;
185185static void * mi_page_map_max_address ;
186186static mi_memid_t mi_page_map_memid ;
187+
188+ // divide the main map in 64 (`MI_BFIELD_BITS`) parts commit those parts on demand
187189static _Atomic (mi_bfield_t ) mi_page_map_commit ;
188190
189- static inline bool mi_page_map_is_committed (size_t idx , size_t * pbit_idx );
190- static mi_page_t * * mi_page_map_ensure_committed (size_t idx );
191- static mi_page_t * * mi_page_map_ensure_at (size_t idx );
192- static inline void mi_page_map_set_range (mi_page_t * page , size_t idx , size_t sub_idx , size_t slice_count );
191+ #define MI_PAGE_MAP_ENTRIES_PER_CBIT (MI_PAGE_MAP_COUNT / MI_BFIELD_BITS)
193192
193+ static inline bool mi_page_map_is_committed (size_t idx , size_t * pbit_idx ) {
194+ mi_bfield_t commit = mi_atomic_load_relaxed (& mi_page_map_commit );
195+ const size_t bit_idx = idx /MI_PAGE_MAP_ENTRIES_PER_CBIT ;
196+ mi_assert_internal (bit_idx < MI_BFIELD_BITS );
197+ if (pbit_idx != NULL ) { * pbit_idx = bit_idx ; }
198+ return ((commit & (MI_ZU (1 ) << bit_idx )) != 0 );
199+ }
200+
201+ static mi_page_t * * mi_page_map_ensure_committed (size_t idx ) {
202+ size_t bit_idx ;
203+ if mi_unlikely (!mi_page_map_is_committed (idx , & bit_idx )) {
204+ uint8_t * start = (uint8_t * )& _mi_page_map [bit_idx * MI_PAGE_MAP_ENTRIES_PER_CBIT ];
205+ if (!_mi_os_commit (start , MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof (mi_page_t * * ), NULL )) {
206+ return NULL ;
207+ }
208+ mi_atomic_or_acq_rel (& mi_page_map_commit , MI_ZU (1 ) << bit_idx );
209+ }
210+ return mi_atomic_load_ptr_acquire (mi_page_t * , & _mi_page_map [idx ]); // _mi_page_map_at(idx);
211+ }
212+
213+ // initialize the page map
194214bool _mi_page_map_init (void ) {
195215 size_t vbits = (size_t )mi_option_get_clamp (mi_option_max_vabits , 0 , MI_SIZE_BITS );
196216 if (vbits == 0 ) {
@@ -224,30 +244,27 @@ bool _mi_page_map_init(void) {
224244 _mi_warning_message ("internal: the page map was committed but not zero initialized!\n" );
225245 _mi_memzero_aligned (_mi_page_map , page_map_size );
226246 }
227- mi_atomic_store_release (& mi_page_map_commit , (commit ? ~MI_ZU (0 ) : MI_ZU (0 )));
247+ mi_atomic_store_release (& mi_page_map_commit , (mi_page_map_memid . initially_committed ? ~MI_ZU (0 ) : MI_ZU (0 )));
228248
229- // note: for the NULL range we only commit one OS page (in the map and sub)
230- if (!mi_page_map_memid .initially_committed ) {
231- if (!_mi_os_commit (& _mi_page_map [0 ], os_page_size , NULL )) { // commit first part of the map
232- mi_page_map_cannot_commit ();
233- return false;
234- }
235- }
236- _mi_page_map [0 ] = (mi_page_t * * )((uint8_t * )_mi_page_map + page_map_size ); // we reserved a submap part at the end already
249+ // ensure there is a submap for the NULL address
250+ mi_page_t * * const sub0 = (mi_page_t * * )((uint8_t * )_mi_page_map + page_map_size ); // we reserved a submap part at the end already
237251 if (!mi_page_map_memid .initially_committed ) {
238- if (!_mi_os_commit (_mi_page_map [ 0 ] , submap_size , NULL )) { // commit full submap (issue #1087)
252+ if (!_mi_os_commit (sub0 , submap_size , NULL )) { // commit full submap (issue #1087)
239253 mi_page_map_cannot_commit ();
240254 return false;
241255 }
242256 }
243- if (!mi_page_map_memid .initially_zero ) { // initialize low addresses with NULL
244- _mi_memzero_aligned (_mi_page_map [ 0 ] , submap_size );
257+ if (!mi_page_map_memid .initially_zero ) { // initialize low addresses with NULL
258+ _mi_memzero_aligned (sub0 , submap_size );
245259 }
260+ mi_page_map_ensure_committed (0 );
261+ mi_atomic_store_ptr_release (mi_page_t * , & _mi_page_map [0 ], sub0 );
246262
247263 mi_assert_internal (_mi_ptr_page (NULL )== NULL );
248264 return true;
249265}
250266
267+
251268void _mi_page_map_unsafe_destroy (void ) {
252269 mi_assert_internal (_mi_page_map != NULL );
253270 if (_mi_page_map == NULL ) return ;
@@ -271,31 +288,9 @@ void _mi_page_map_unsafe_destroy(void) {
271288}
272289
273290
274- #define MI_PAGE_MAP_ENTRIES_PER_CBIT (MI_PAGE_MAP_COUNT / MI_BFIELD_BITS)
275-
276- static inline bool mi_page_map_is_committed (size_t idx , size_t * pbit_idx ) {
277- mi_bfield_t commit = mi_atomic_load_relaxed (& mi_page_map_commit );
278- const size_t bit_idx = idx /MI_PAGE_MAP_ENTRIES_PER_CBIT ;
279- mi_assert_internal (bit_idx < MI_BFIELD_BITS );
280- if (pbit_idx != NULL ) { * pbit_idx = bit_idx ; }
281- return ((commit & (MI_ZU (1 ) << bit_idx )) != 0 );
282- }
283-
284- static mi_page_t * * mi_page_map_ensure_committed (size_t idx ) {
285- size_t bit_idx ;
286- if mi_unlikely (!mi_page_map_is_committed (idx , & bit_idx )) {
287- uint8_t * start = (uint8_t * )& _mi_page_map [bit_idx * MI_PAGE_MAP_ENTRIES_PER_CBIT ];
288- if (!_mi_os_commit (start , MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof (mi_page_t * * ), NULL )) {
289- return NULL ;
290- }
291- mi_atomic_or_acq_rel (& mi_page_map_commit , MI_ZU (1 ) << bit_idx );
292- }
293- return mi_atomic_load_ptr_acquire (mi_page_t * , & _mi_page_map [idx ]); // _mi_page_map_at(idx);
294- }
295-
296- static mi_page_t * * mi_page_map_ensure_at (size_t idx ) {
291+ static mi_page_t * * mi_page_map_ensure_submap_at (size_t idx ) {
297292 mi_page_t * * sub = mi_page_map_ensure_committed (idx );
298- if mi_unlikely (sub == NULL || idx == 0 /* low addresses */ ) {
293+ if mi_unlikely (sub == NULL ) {
299294 // sub map not yet allocated, alloc now
300295 mi_memid_t memid ;
301296 mi_page_t * * expect = sub ;
@@ -321,7 +316,7 @@ static mi_page_t** mi_page_map_ensure_at(size_t idx) {
321316static void mi_page_map_set_range (mi_page_t * page , size_t idx , size_t sub_idx , size_t slice_count ) {
322317 // is the page map area that contains the page address committed?
323318 while (slice_count > 0 ) {
324- mi_page_t * * sub = mi_page_map_ensure_at (idx );
319+ mi_page_t * * sub = mi_page_map_ensure_submap_at (idx );
325320 // set the offsets for the page
326321 while (sub_idx < MI_PAGE_MAP_SUB_COUNT ) {
327322 sub [sub_idx ] = page ;
0 commit comments