@@ -25,22 +25,29 @@ terms of the MIT license. A copy of the license can be found in the file
2525
2626// Fast allocation in a page: just pop from the free list.
2727// Fall back to generic allocation only if the list is empty.
28- extern inline void * _mi_page_malloc (mi_heap_t * heap , mi_page_t * page , size_t size ) mi_attr_noexcept {
28+ extern inline void * _mi_page_malloc (mi_heap_t * heap , mi_page_t * page , size_t size , bool zero ) mi_attr_noexcept {
2929 mi_assert_internal (page -> xblock_size == 0 || mi_page_block_size (page ) >= size );
3030 mi_block_t * const block = page -> free ;
3131 if (mi_unlikely (block == NULL )) {
32- return _mi_malloc_generic (heap , size );
32+ return _mi_malloc_generic (heap , size , zero );
3333 }
3434 mi_assert_internal (block != NULL && _mi_ptr_page (block ) == page );
3535 // pop from the free list
3636 page -> used ++ ;
3737 page -> free = mi_block_next (page , block );
3838 mi_assert_internal (page -> free == NULL || _mi_ptr_page (page -> free ) == page );
3939
40+ // zero the block?
41+ if (mi_unlikely (zero )) {
42+ mi_assert_internal (page -> xblock_size != 0 ); // do not call with zero'ing for huge blocks
43+ const size_t zsize = (mi_unlikely (page -> is_zero ) ? sizeof (block -> next ) : page -> xblock_size );
44+ _mi_memzero_aligned (block , zsize );
45+ }
46+
4047#if (MI_DEBUG > 0 )
41- if (!page -> is_zero ) { memset (block , MI_DEBUG_UNINIT , size ); }
48+ if (!page -> is_zero && ! zero ) { memset (block , MI_DEBUG_UNINIT , size ); }
4249#elif (MI_SECURE != 0 )
43- block -> next = 0 ; // don't leak internal data
50+ if (! zero ) { block -> next = 0 ; } // don't leak internal data
4451#endif
4552
4653#if (MI_STAT > 0 )
@@ -69,41 +76,45 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
6976 return block ;
7077}
7178
72- // allocate a small block
73- mi_decl_nodiscard extern inline mi_decl_restrict void * mi_heap_malloc_small (mi_heap_t * heap , size_t size ) mi_attr_noexcept {
74- mi_assert (heap != NULL );
79+ static inline mi_decl_restrict void * mi_heap_malloc_small_zero (mi_heap_t * heap , size_t size , bool zero ) mi_attr_noexcept {
80+ mi_assert (heap != NULL );
7581 mi_assert (heap -> thread_id == 0 || heap -> thread_id == _mi_thread_id ()); // heaps are thread local
7682 mi_assert (size <= MI_SMALL_SIZE_MAX );
77- #if (MI_PADDING )
83+ #if (MI_PADDING )
7884 if (size == 0 ) {
7985 size = sizeof (void * );
8086 }
81- #endif
82- mi_page_t * page = _mi_heap_get_free_small_page (heap ,size + MI_PADDING_SIZE );
83- void * p = _mi_page_malloc (heap , page , size + MI_PADDING_SIZE );
84- mi_assert_internal (p == NULL || mi_usable_size (p ) >= size );
85- #if MI_STAT > 1
87+ #endif
88+ mi_page_t * page = _mi_heap_get_free_small_page (heap , size + MI_PADDING_SIZE );
89+ void * p = _mi_page_malloc (heap , page , size + MI_PADDING_SIZE , zero );
90+ mi_assert_internal (p == NULL || mi_usable_size (p ) >= size );
91+ #if MI_STAT > 1
8692 if (p != NULL ) {
8793 if (!mi_heap_is_initialized (heap )) { heap = mi_get_default_heap (); }
8894 mi_heap_stat_increase (heap , malloc , mi_usable_size (p ));
8995 }
90- #endif
96+ #endif
9197 return p ;
9298}
9399
100+ // allocate a small block
101+ mi_decl_nodiscard extern inline mi_decl_restrict void * mi_heap_malloc_small (mi_heap_t * heap , size_t size ) mi_attr_noexcept {
102+ return mi_heap_malloc_small_zero (heap , size , false);
103+ }
104+
94105mi_decl_nodiscard extern inline mi_decl_restrict void * mi_malloc_small (size_t size ) mi_attr_noexcept {
95106 return mi_heap_malloc_small (mi_get_default_heap (), size );
96107}
97108
98109// The main allocation function
99- mi_decl_nodiscard extern inline mi_decl_restrict void * mi_heap_malloc (mi_heap_t * heap , size_t size ) mi_attr_noexcept {
110+ mi_decl_nodiscard extern inline void * _mi_heap_malloc_zero (mi_heap_t * heap , size_t size , bool zero ) mi_attr_noexcept {
100111 if (mi_likely (size <= MI_SMALL_SIZE_MAX )) {
101- return mi_heap_malloc_small (heap , size );
112+ return mi_heap_malloc_small_zero (heap , size , zero );
102113 }
103114 else {
104115 mi_assert (heap != NULL );
105- mi_assert (heap -> thread_id == 0 || heap -> thread_id == _mi_thread_id ()); // heaps are thread local
106- void * const p = _mi_malloc_generic (heap , size + MI_PADDING_SIZE ); // note: size can overflow but it is detected in malloc_generic
116+ mi_assert (heap -> thread_id == 0 || heap -> thread_id == _mi_thread_id ()); // heaps are thread local
117+ void * const p = _mi_malloc_generic (heap , size + MI_PADDING_SIZE , zero ); // note: size can overflow but it is detected in malloc_generic
107118 mi_assert_internal (p == NULL || mi_usable_size (p ) >= size );
108119 #if MI_STAT > 1
109120 if (p != NULL ) {
@@ -115,44 +126,17 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t*
115126 }
116127}
117128
118- mi_decl_nodiscard extern inline mi_decl_restrict void * mi_malloc ( size_t size ) mi_attr_noexcept {
119- return mi_heap_malloc ( mi_get_default_heap () , size );
129+ mi_decl_nodiscard extern inline mi_decl_restrict void * mi_heap_malloc ( mi_heap_t * heap , size_t size ) mi_attr_noexcept {
130+ return _mi_heap_malloc_zero ( heap , size , false );
120131}
121132
122-
123- void _mi_block_zero_init (const mi_page_t * page , void * p , size_t size ) {
124- // note: we need to initialize the whole usable block size to zero, not just the requested size,
125- // or the recalloc/rezalloc functions cannot safely expand in place (see issue #63)
126- MI_UNUSED (size );
127- mi_assert_internal (p != NULL );
128- mi_assert_internal (mi_usable_size (p ) >= size ); // size can be zero
129- mi_assert_internal (_mi_ptr_page (p )== page );
130- if (page -> is_zero && size > sizeof (mi_block_t )) {
131- // already zero initialized memory
132- ((mi_block_t * )p )-> next = 0 ; // clear the free list pointer
133- mi_assert_expensive (mi_mem_is_zero (p , mi_usable_size (p )));
134- }
135- else {
136- // otherwise memset
137- memset (p , 0 , mi_usable_size (p ));
138- }
133+ mi_decl_nodiscard extern inline mi_decl_restrict void * mi_malloc (size_t size ) mi_attr_noexcept {
134+ return mi_heap_malloc (mi_get_default_heap (), size );
139135}
140136
141137// zero initialized small block
142138mi_decl_nodiscard mi_decl_restrict void * mi_zalloc_small (size_t size ) mi_attr_noexcept {
143- void * p = mi_malloc_small (size );
144- if (p != NULL ) {
145- _mi_block_zero_init (_mi_ptr_page (p ), p , size ); // todo: can we avoid getting the page again?
146- }
147- return p ;
148- }
149-
150- void * _mi_heap_malloc_zero (mi_heap_t * heap , size_t size , bool zero ) mi_attr_noexcept {
151- void * p = mi_heap_malloc (heap ,size );
152- if (zero && p != NULL ) {
153- _mi_block_zero_init (_mi_ptr_page (p ),p ,size ); // todo: can we avoid getting the page again?
154- }
155- return p ;
139+ return mi_heap_malloc_small_zero (mi_get_default_heap (), size , true);
156140}
157141
158142mi_decl_nodiscard extern inline mi_decl_restrict void * mi_heap_zalloc (mi_heap_t * heap , size_t size ) mi_attr_noexcept {
@@ -564,6 +548,7 @@ mi_decl_nodiscard size_t mi_usable_size(const void* p) mi_attr_noexcept {
564548#ifdef __cplusplus
565549void * _mi_externs [] = {
566550 (void * )& _mi_page_malloc ,
551+ (void * )& _mi_heap_malloc_zero ,
567552 (void * )& mi_malloc ,
568553 (void * )& mi_malloc_small ,
569554 (void * )& mi_zalloc_small ,
0 commit comments