@@ -13,7 +13,7 @@ static void *chunk_mem(struct z_heap *h, chunkid_t c)
1313 chunk_unit_t * buf = chunk_buf (h );
1414 uint8_t * ret = ((uint8_t * )& buf [c ]) + chunk_header_bytes (h );
1515
16- CHECK (!(((size_t )ret ) & (big_heap (h ) ? 7 : 3 )));
16+ CHECK (!(((uintptr_t )ret ) & (big_heap (h ) ? 7 : 3 )));
1717
1818 return ret ;
1919}
@@ -90,9 +90,9 @@ static void split_chunks(struct z_heap *h, chunkid_t lc, chunkid_t rc)
9090 CHECK (rc > lc );
9191 CHECK (rc - lc < chunk_size (h , lc ));
9292
93- size_t sz0 = chunk_size (h , lc );
94- size_t lsz = rc - lc ;
95- size_t rsz = sz0 - lsz ;
93+ chunksz_t sz0 = chunk_size (h , lc );
94+ chunksz_t lsz = rc - lc ;
95+ chunksz_t rsz = sz0 - lsz ;
9696
9797 set_chunk_size (h , lc , lsz );
9898 set_chunk_size (h , rc , rsz );
@@ -103,7 +103,7 @@ static void split_chunks(struct z_heap *h, chunkid_t lc, chunkid_t rc)
103103/* Does not modify free list */
104104static void merge_chunks (struct z_heap * h , chunkid_t lc , chunkid_t rc )
105105{
106- size_t newsz = chunk_size (h , lc ) + chunk_size (h , rc );
106+ chunksz_t newsz = chunk_size (h , lc ) + chunk_size (h , rc );
107107
108108 set_chunk_size (h , lc , newsz );
109109 set_left_chunk_size (h , right_chunk (h , rc ), newsz );
@@ -167,7 +167,7 @@ void sys_heap_free(struct sys_heap *heap, void *mem)
167167 free_chunk (h , c );
168168}
169169
170- static chunkid_t alloc_chunk (struct z_heap * h , size_t sz )
170+ static chunkid_t alloc_chunk (struct z_heap * h , chunksz_t sz )
171171{
172172 int bi = bucket_idx (h , sz );
173173 struct z_heap_bucket * b = & h -> buckets [bi ];
@@ -205,7 +205,7 @@ static chunkid_t alloc_chunk(struct z_heap *h, size_t sz)
205205 /* Otherwise pick the smallest non-empty bucket guaranteed to
206206 * fit and use that unconditionally.
207207 */
208- size_t bmask = h -> avail_buckets & ~((1 << (bi + 1 )) - 1 );
208+ uint32_t bmask = h -> avail_buckets & ~((1 << (bi + 1 )) - 1 );
209209
210210 if (bmask != 0U ) {
211211 int minbucket = __builtin_ctz (bmask );
@@ -227,7 +227,7 @@ void *sys_heap_alloc(struct sys_heap *heap, size_t bytes)
227227 return NULL ;
228228 }
229229
230- size_t chunk_sz = bytes_to_chunksz (h , bytes );
230+ chunksz_t chunk_sz = bytes_to_chunksz (h , bytes );
231231 chunkid_t c = alloc_chunk (h , chunk_sz );
232232 if (c == 0U ) {
233233 return NULL ;
@@ -246,7 +246,7 @@ void *sys_heap_alloc(struct sys_heap *heap, size_t bytes)
246246void * sys_heap_aligned_alloc (struct sys_heap * heap , size_t align , size_t bytes )
247247{
248248 struct z_heap * h = heap -> heap ;
249- size_t padded_sz , gap , rewind ;
249+ size_t gap , rewind ;
250250
251251 /*
252252 * Split align and rewind values (if any).
@@ -277,7 +277,7 @@ void *sys_heap_aligned_alloc(struct sys_heap *heap, size_t align, size_t bytes)
277277 * We over-allocate to account for alignment and then free
278278 * the extra allocations afterwards.
279279 */
280- padded_sz = bytes_to_chunksz (h , bytes + align - gap );
280+ chunksz_t padded_sz = bytes_to_chunksz (h , bytes + align - gap );
281281 chunkid_t c0 = alloc_chunk (h , padded_sz );
282282
283283 if (c0 == 0 ) {
@@ -333,7 +333,7 @@ void *sys_heap_aligned_realloc(struct sys_heap *heap, void *ptr,
333333 chunkid_t c = mem_to_chunkid (h , ptr );
334334 chunkid_t rc = right_chunk (h , c );
335335 size_t align_gap = (uint8_t * )ptr - (uint8_t * )chunk_mem (h , c );
336- size_t chunks_need = bytes_to_chunksz (h , bytes + align_gap );
336+ chunksz_t chunks_need = bytes_to_chunksz (h , bytes + align_gap );
337337
338338 if (align && ((uintptr_t )ptr & (align - 1 ))) {
339339 /* ptr is not sufficiently aligned */
@@ -387,39 +387,39 @@ void sys_heap_init(struct sys_heap *heap, void *mem, size_t bytes)
387387 /* Round the start up, the end down */
388388 uintptr_t addr = ROUND_UP (mem , CHUNK_UNIT );
389389 uintptr_t end = ROUND_DOWN ((uint8_t * )mem + bytes , CHUNK_UNIT );
390- size_t buf_sz = (end - addr ) / CHUNK_UNIT ;
390+ chunksz_t heap_sz = (end - addr ) / CHUNK_UNIT ;
391391
392392 CHECK (end > addr );
393- __ASSERT (buf_sz > chunksz (sizeof (struct z_heap )), "heap size is too small" );
393+ __ASSERT (heap_sz > chunksz (sizeof (struct z_heap )), "heap size is too small" );
394394
395395 struct z_heap * h = (struct z_heap * )addr ;
396396 heap -> heap = h ;
397- h -> chunk0_hdr_area = 0 ;
398- h -> end_chunk = buf_sz ;
397+ h -> end_chunk = heap_sz ;
399398 h -> avail_buckets = 0 ;
400399
401- int nb_buckets = bucket_idx (h , buf_sz ) + 1 ;
402- size_t chunk0_size = chunksz (sizeof (struct z_heap ) +
400+ int nb_buckets = bucket_idx (h , heap_sz ) + 1 ;
401+ chunksz_t chunk0_size = chunksz (sizeof (struct z_heap ) +
403402 nb_buckets * sizeof (struct z_heap_bucket ));
404403
405- __ASSERT (chunk0_size + min_chunk_size (h ) < buf_sz , "heap size is too small" );
404+ __ASSERT (chunk0_size + min_chunk_size (h ) < heap_sz , "heap size is too small" );
406405
407406 for (int i = 0 ; i < nb_buckets ; i ++ ) {
408407 h -> buckets [i ].next = 0 ;
409408 }
410409
411410 /* chunk containing our struct z_heap */
412411 set_chunk_size (h , 0 , chunk0_size );
412+ set_left_chunk_size (h , 0 , 0 );
413413 set_chunk_used (h , 0 , true);
414414
415415 /* chunk containing the free heap */
416- set_chunk_size (h , chunk0_size , buf_sz - chunk0_size );
416+ set_chunk_size (h , chunk0_size , heap_sz - chunk0_size );
417417 set_left_chunk_size (h , chunk0_size , chunk0_size );
418418
419419 /* the end marker chunk */
420- set_chunk_size (h , buf_sz , 0 );
421- set_left_chunk_size (h , buf_sz , buf_sz - chunk0_size );
422- set_chunk_used (h , buf_sz , true);
420+ set_chunk_size (h , heap_sz , 0 );
421+ set_left_chunk_size (h , heap_sz , heap_sz - chunk0_size );
422+ set_chunk_used (h , heap_sz , true);
423423
424424 free_list_add (h , chunk0_size );
425425}
0 commit comments