@@ -28,7 +28,7 @@ terms of the MIT license. A copy of the license can be found in the file
2828extern inline void * _mi_page_malloc (mi_heap_t * heap , mi_page_t * page , size_t size , bool zero ) mi_attr_noexcept {
2929 mi_assert_internal (page -> xblock_size == 0 || mi_page_block_size (page ) >= size );
3030 mi_block_t * const block = page -> free ;
31- if ( mi_unlikely (block == NULL ) ) {
31+ if mi_unlikely (block == NULL ) {
3232 return _mi_malloc_generic (heap , size , zero );
3333 }
3434 mi_assert_internal (block != NULL && _mi_ptr_page (block ) == page );
@@ -38,9 +38,9 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
3838 mi_assert_internal (page -> free == NULL || _mi_ptr_page (page -> free ) == page );
3939
4040 // zero the block?
41- if ( mi_unlikely (zero ) ) {
41+ if mi_unlikely (zero ) {
4242 mi_assert_internal (page -> xblock_size != 0 ); // do not call with zero'ing for huge blocks
43- const size_t zsize = (mi_unlikely ( page -> is_zero ) ? sizeof (block -> next ) : page -> xblock_size );
43+ const size_t zsize = (page -> is_zero ? sizeof (block -> next ) : page -> xblock_size );
4444 _mi_memzero_aligned (block , zsize );
4545 }
4646
@@ -108,7 +108,7 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t si
108108
109109// The main allocation function
110110mi_decl_nodiscard extern inline void * _mi_heap_malloc_zero (mi_heap_t * heap , size_t size , bool zero ) mi_attr_noexcept {
111- if ( mi_likely (size <= MI_SMALL_SIZE_MAX ) ) {
111+ if mi_likely (size <= MI_SMALL_SIZE_MAX ) {
112112 return mi_heap_malloc_small_zero (heap , size , zero );
113113 }
114114 else {
@@ -350,7 +350,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
350350 mi_thread_free_t tfree = mi_atomic_load_relaxed (& page -> xthread_free );
351351 do {
352352 use_delayed = (mi_tf_delayed (tfree ) == MI_USE_DELAYED_FREE );
353- if ( mi_unlikely (use_delayed ) ) {
353+ if mi_unlikely (use_delayed ) {
354354 // unlikely: this only happens on the first concurrent free in a page that is in the full list
355355 tfreex = mi_tf_set_delayed (tfree ,MI_DELAYED_FREEING );
356356 }
@@ -361,7 +361,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
361361 }
362362 } while (!mi_atomic_cas_weak_release (& page -> xthread_free , & tfree , tfreex ));
363363
364- if ( mi_unlikely (use_delayed ) ) {
364+ if mi_unlikely (use_delayed ) {
365365 // racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
366366 mi_heap_t * const heap = (mi_heap_t * )(mi_atomic_load_acquire (& page -> xheap )); //mi_page_heap(page);
367367 mi_assert_internal (heap != NULL );
@@ -387,20 +387,20 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
387387static inline void _mi_free_block (mi_page_t * page , bool local , mi_block_t * block )
388388{
389389 // and push it on the free list
390- if ( mi_likely (local ) ) {
390+ if mi_likely (local ) {
391391 // owning thread can free a block directly
392- if ( mi_unlikely (mi_check_is_double_free (page , block ) )) return ;
392+ if mi_unlikely (mi_check_is_double_free (page , block )) return ;
393393 mi_check_padding (page , block );
394394 #if (MI_DEBUG != 0 )
395395 memset (block , MI_DEBUG_FREED , mi_page_block_size (page ));
396396 #endif
397397 mi_block_set_next (page , block , page -> local_free );
398398 page -> local_free = block ;
399399 page -> used -- ;
400- if ( mi_unlikely (mi_page_all_free (page ) )) {
400+ if mi_unlikely (mi_page_all_free (page )) {
401401 _mi_page_retire (page );
402402 }
403- else if ( mi_unlikely (mi_page_is_in_full (page ) )) {
403+ else if mi_unlikely (mi_page_is_in_full (page )) {
404404 _mi_page_unfull (page );
405405 }
406406 }
@@ -433,26 +433,26 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
433433{
434434 MI_UNUSED (msg );
435435#if (MI_DEBUG > 0 )
436- if ( mi_unlikely (((uintptr_t )p & (MI_INTPTR_SIZE - 1 )) != 0 ) ) {
436+ if mi_unlikely (((uintptr_t )p & (MI_INTPTR_SIZE - 1 )) != 0 ) {
437437 _mi_error_message (EINVAL , "%s: invalid (unaligned) pointer: %p\n" , msg , p );
438438 return NULL ;
439439 }
440440#endif
441441
442442 mi_segment_t * const segment = _mi_ptr_segment (p );
443- if ( mi_unlikely (segment == NULL ) ) return NULL ; // checks also for (p==NULL)
443+ if mi_unlikely (segment == NULL ) return NULL ; // checks also for (p==NULL)
444444
445445#if (MI_DEBUG > 0 )
446- if ( mi_unlikely (!mi_is_in_heap_region (p ) )) {
446+ if mi_unlikely (!mi_is_in_heap_region (p )) {
447447 _mi_warning_message ("%s: pointer might not point to a valid heap region: %p\n"
448448 "(this may still be a valid very large allocation (over 64MiB))\n" , msg , p );
449- if ( mi_likely (_mi_ptr_cookie (segment ) == segment -> cookie ) ) {
449+ if mi_likely (_mi_ptr_cookie (segment ) == segment -> cookie ) {
450450 _mi_warning_message ("(yes, the previous pointer %p was valid after all)\n" , p );
451451 }
452452 }
453453#endif
454454#if (MI_DEBUG > 0 || MI_SECURE >=4 )
455- if ( mi_unlikely (_mi_ptr_cookie (segment ) != segment -> cookie ) ) {
455+ if mi_unlikely (_mi_ptr_cookie (segment ) != segment -> cookie ) {
456456 _mi_error_message (EINVAL , "%s: pointer does not point to a valid heap space: %p\n" , msg , p );
457457 return NULL ;
458458 }
@@ -464,23 +464,23 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
464464void mi_free (void * p ) mi_attr_noexcept
465465{
466466 mi_segment_t * const segment = mi_checked_ptr_segment (p ,"mi_free" );
467- if ( mi_unlikely (segment == NULL ) ) return ;
467+ if mi_unlikely (segment == NULL ) return ;
468468
469469 mi_threadid_t tid = _mi_thread_id ();
470470 mi_page_t * const page = _mi_segment_page_of (segment , p );
471471 mi_block_t * const block = (mi_block_t * )p ;
472472
473- if ( mi_likely (tid == mi_atomic_load_relaxed (& segment -> thread_id ) && page -> flags .full_aligned == 0 ) ) { // the thread id matches and it is not a full page, nor has aligned blocks
473+ if mi_likely (tid == mi_atomic_load_relaxed (& segment -> thread_id ) && page -> flags .full_aligned == 0 ) { // the thread id matches and it is not a full page, nor has aligned blocks
474474 // local, and not full or aligned
475- if ( mi_unlikely (mi_check_is_double_free (page ,block ) )) return ;
475+ if mi_unlikely (mi_check_is_double_free (page ,block )) return ;
476476 mi_check_padding (page , block );
477477 mi_stat_free (page , block );
478478 #if (MI_DEBUG != 0 )
479479 memset (block , MI_DEBUG_FREED , mi_page_block_size (page ));
480480 #endif
481481 mi_block_set_next (page , block , page -> local_free );
482482 page -> local_free = block ;
483- if ( mi_unlikely (-- page -> used == 0 ) ) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
483+ if mi_unlikely (-- page -> used == 0 ) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
484484 _mi_page_retire (page );
485485 }
486486 }
@@ -526,7 +526,7 @@ static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noe
526526 const mi_segment_t * const segment = mi_checked_ptr_segment (p , msg );
527527 if (segment == NULL ) return 0 ; // also returns 0 if `p == NULL`
528528 const mi_page_t * const page = _mi_segment_page_of (segment , p );
529- if ( mi_likely (!mi_page_has_aligned (page ) )) {
529+ if mi_likely (!mi_page_has_aligned (page )) {
530530 const mi_block_t * block = (const mi_block_t * )p ;
531531 return mi_page_usable_size_of (page , block );
532532 }
@@ -621,18 +621,18 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero)
621621 // else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)).
622622 // (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.)
623623 const size_t size = _mi_usable_size (p ,"mi_realloc" ); // also works if p == NULL (with size 0)
624- if ( mi_unlikely (newsize <= size && newsize >= (size / 2 ) && newsize > 0 ) ) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
624+ if mi_unlikely (newsize <= size && newsize >= (size / 2 ) && newsize > 0 ) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
625625 // todo: adjust potential padding to reflect the new size?
626626 return p ; // reallocation still fits and not more than 50% waste
627627 }
628628 void * newp = mi_heap_malloc (heap ,newsize );
629- if ( mi_likely (newp != NULL ) ) {
629+ if mi_likely (newp != NULL ) {
630630 if (zero && newsize > size ) {
631631 // also set last word in the previous allocation to zero to ensure any padding is zero-initialized
632632 const size_t start = (size >= sizeof (intptr_t ) ? size - sizeof (intptr_t ) : 0 );
633633 memset ((uint8_t * )newp + start , 0 , newsize - start );
634634 }
635- if ( mi_likely (p != NULL ) ) {
635+ if mi_likely (p != NULL ) {
636636 _mi_memcpy_aligned (newp , p , (newsize > size ? size : newsize ));
637637 mi_free (p ); // only free the original pointer if successful
638638 }
@@ -857,13 +857,13 @@ static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow ) {
857857
858858mi_decl_nodiscard mi_decl_restrict void * mi_new (size_t size ) {
859859 void * p = mi_malloc (size );
860- if ( mi_unlikely (p == NULL ) ) return mi_try_new (size ,false);
860+ if mi_unlikely (p == NULL ) return mi_try_new (size ,false);
861861 return p ;
862862}
863863
864864mi_decl_nodiscard mi_decl_restrict void * mi_new_nothrow (size_t size ) mi_attr_noexcept {
865865 void * p = mi_malloc (size );
866- if ( mi_unlikely (p == NULL ) ) return mi_try_new (size , true);
866+ if mi_unlikely (p == NULL ) return mi_try_new (size , true);
867867 return p ;
868868}
869869
@@ -887,7 +887,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, siz
887887
888888mi_decl_nodiscard mi_decl_restrict void * mi_new_n (size_t count , size_t size ) {
889889 size_t total ;
890- if ( mi_unlikely (mi_count_size_overflow (count , size , & total ) )) {
890+ if mi_unlikely (mi_count_size_overflow (count , size , & total )) {
891891 mi_try_new_handler (false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
892892 return NULL ;
893893 }
@@ -906,7 +906,7 @@ mi_decl_nodiscard void* mi_new_realloc(void* p, size_t newsize) {
906906
907907mi_decl_nodiscard void * mi_new_reallocn (void * p , size_t newcount , size_t size ) {
908908 size_t total ;
909- if ( mi_unlikely (mi_count_size_overflow (newcount , size , & total ) )) {
909+ if mi_unlikely (mi_count_size_overflow (newcount , size , & total )) {
910910 mi_try_new_handler (false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
911911 return NULL ;
912912 }
0 commit comments