diff --git a/kernel/init.c b/kernel/init.c index 190114b0d601f..14fb807494045 100644 --- a/kernel/init.c +++ b/kernel/init.c @@ -515,7 +515,7 @@ void __weak z_early_rand_get(uint8_t *buf, size_t length) state = state + k_cycle_get_32(); state = state * 2862933555777941757ULL + 3037000493ULL; val = (uint32_t)(state >> 32); - rc = MIN(length, sizeof(val)); + rc = min(length, sizeof(val)); arch_early_memcpy((void *)buf, &val, rc); length -= rc; diff --git a/kernel/mem_slab.c b/kernel/mem_slab.c index 880f92f457dec..d0753bc91436a 100644 --- a/kernel/mem_slab.c +++ b/kernel/mem_slab.c @@ -237,7 +237,7 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout) "slab corruption detected"); #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION - slab->info.max_used = MAX(slab->info.num_used, + slab->info.max_used = max(slab->info.num_used, slab->info.max_used); #endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */ diff --git a/kernel/mmu.c b/kernel/mmu.c index 322fc61c5c0b0..fde160de20943 100644 --- a/kernel/mmu.c +++ b/kernel/mmu.c @@ -274,8 +274,8 @@ static void virt_region_free(void *vaddr, size_t size) (vaddr_u8 < Z_VIRT_REGION_END_ADDR)) || (((vaddr_u8 + size - 1) >= Z_VIRT_REGION_START_ADDR) && ((vaddr_u8 + size - 1) < Z_VIRT_REGION_END_ADDR))) { - uint8_t *adjusted_start = MAX(vaddr_u8, Z_VIRT_REGION_START_ADDR); - uint8_t *adjusted_end = MIN(vaddr_u8 + size, + uint8_t *adjusted_start = max(vaddr_u8, Z_VIRT_REGION_START_ADDR); + uint8_t *adjusted_end = min(vaddr_u8 + size, Z_VIRT_REGION_END_ADDR); size_t adjusted_sz = adjusted_end - adjusted_start; @@ -930,8 +930,8 @@ void k_mem_map_phys_bare(uint8_t **virt_ptr, uintptr_t phys, size_t size, uint32 IN_RANGE(aligned_phys + aligned_size - 1, (uintptr_t)K_MEM_VIRT_RAM_START, (uintptr_t)(K_MEM_VIRT_RAM_END - 1))) { - uint8_t *adjusted_start = MAX(dest_addr, K_MEM_VIRT_RAM_START); - uint8_t *adjusted_end = MIN(dest_addr + aligned_size, + uint8_t *adjusted_start = max(dest_addr, K_MEM_VIRT_RAM_START); + uint8_t *adjusted_end = min(dest_addr + aligned_size, K_MEM_VIRT_RAM_END); size_t adjusted_sz = adjusted_end - adjusted_start; diff --git a/kernel/pipe.c b/kernel/pipe.c index c5157ea6921e6..6bbd50263ffed 100644 --- a/kernel/pipe.c +++ b/kernel/pipe.c @@ -113,7 +113,7 @@ static size_t copy_to_pending_readers(struct k_pipe *pipe, bool *need_resched, } reader_buf = reader->base.swap_data; - copy_size = MIN(len - written, + copy_size = min(len - written, reader_buf->len - reader_buf->used); memcpy(&reader_buf->data[reader_buf->used], &data[written], copy_size); diff --git a/kernel/sched.c b/kernel/sched.c index 37fdbf3bf0b6b..b316df8d89562 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1005,7 +1005,7 @@ void z_impl_k_thread_absolute_deadline_set(k_tid_t tid, int deadline) void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline) { - deadline = CLAMP(deadline, 0, INT_MAX); + deadline = clamp(deadline, 0, INT_MAX); int32_t newdl = k_cycle_get_32() + deadline; @@ -1139,7 +1139,7 @@ int32_t z_impl_k_sleep(k_timeout_t timeout) /* k_sleep() still returns 32 bit milliseconds for compatibility */ int64_t ms = K_TIMEOUT_EQ(timeout, K_FOREVER) ? K_TICKS_FOREVER : - CLAMP(k_ticks_to_ms_ceil64(ticks), 0, INT_MAX); + clamp(k_ticks_to_ms_ceil64(ticks), 0, INT_MAX); SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ms); return (int32_t) ms; diff --git a/kernel/thread.c b/kernel/thread.c index bfcd9c87772fb..024b916b7fb5e 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -217,7 +217,7 @@ static size_t copy_bytes(char *dest, size_t dest_size, const char *src, size_t s { size_t bytes_to_copy; - bytes_to_copy = MIN(dest_size, src_size); + bytes_to_copy = min(dest_size, src_size); memcpy(dest, src, bytes_to_copy); return bytes_to_copy; diff --git a/kernel/timeout.c b/kernel/timeout.c index f0c2cc5ecec32..e9e4f86a85076 100644 --- a/kernel/timeout.c +++ b/kernel/timeout.c @@ -90,7 +90,7 @@ static int32_t next_timeout(int32_t ticks_elapsed) ((int64_t)(to->dticks - ticks_elapsed) > (int64_t)INT_MAX)) { ret = SYS_CLOCK_MAX_WAIT; } else { - ret = MAX(0, to->dticks - ticks_elapsed); + ret = max(0, to->dticks - ticks_elapsed); } return ret; @@ -124,7 +124,7 @@ k_ticks_t z_add_timeout(struct _timeout *to, _timeout_func_t fn, k_timeout_t tim } else { k_ticks_t dticks = Z_TICK_ABS(timeout.ticks) - curr_tick; - to->dticks = MAX(1, dticks); + to->dticks = max(1, dticks); ticks = timeout.ticks; } @@ -322,7 +322,7 @@ k_timepoint_t sys_timepoint_calc(k_timeout_t timeout) k_ticks_t dt = timeout.ticks; if (Z_IS_TIMEOUT_RELATIVE(timeout)) { - timepoint.tick = sys_clock_tick_get() + MAX(1, dt); + timepoint.tick = sys_clock_tick_get() + max(1, dt); } else { timepoint.tick = Z_TICK_ABS(dt); } diff --git a/kernel/timer.c b/kernel/timer.c index 513d676bf0d2b..0fa14b55315d9 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -57,7 +57,7 @@ void z_timer_expiration_handler(struct _timeout *t) k_timeout_t next = timer->period; /* see note about z_add_timeout() in z_impl_k_timer_start() */ - next.ticks = MAX(next.ticks - 1, 0); + next.ticks = max(next.ticks - 1, 0); #ifdef CONFIG_TIMEOUT_64BIT /* Exploit the fact that uptime during a kernel @@ -171,7 +171,7 @@ void z_impl_k_timer_start(struct k_timer *timer, k_timeout_t duration, * is consistent for both 32-bit k_ticks_t which are unsigned * and 64-bit k_ticks_t which are signed. */ - duration.ticks = MAX(1, duration.ticks); + duration.ticks = max(1, duration.ticks); duration.ticks = duration.ticks - 1; } diff --git a/lib/heap/heap.c b/lib/heap/heap.c index 8ccfb7093d708..52ab4384d61ac 100644 --- a/lib/heap/heap.c +++ b/lib/heap/heap.c @@ -17,7 +17,7 @@ static inline void increase_allocated_bytes(struct z_heap *h, size_t num_bytes) { h->allocated_bytes += num_bytes; - h->max_allocated_bytes = MAX(h->max_allocated_bytes, h->allocated_bytes); + h->max_allocated_bytes = max(h->max_allocated_bytes, h->allocated_bytes); } #endif @@ -321,7 +321,7 @@ void *sys_heap_aligned_alloc(struct sys_heap *heap, size_t align, size_t bytes) rew = align & -align; if (align != rew) { align -= rew; - gap = MIN(rew, chunk_header_bytes(h)); + gap = min(rew, chunk_header_bytes(h)); } else { if (align <= chunk_header_bytes(h)) { return sys_heap_alloc(heap, bytes); @@ -482,7 +482,7 @@ void *sys_heap_realloc(struct sys_heap *heap, void *ptr, size_t bytes) if (ptr2 != NULL) { size_t prev_size = sys_heap_usable_size(heap, ptr); - memcpy(ptr2, ptr, MIN(prev_size, bytes)); + memcpy(ptr2, ptr, min(prev_size, bytes)); sys_heap_free(heap, ptr); } return ptr2; @@ -516,7 +516,7 @@ void *sys_heap_aligned_realloc(struct sys_heap *heap, void *ptr, if (ptr2 != NULL) { size_t prev_size = sys_heap_usable_size(heap, ptr); - memcpy(ptr2, ptr, MIN(prev_size, bytes)); + memcpy(ptr2, ptr, min(prev_size, bytes)); sys_heap_free(heap, ptr); } return ptr2; diff --git a/lib/heap/heap.h b/lib/heap/heap.h index d053a6db88392..5e38bc083eff0 100644 --- a/lib/heap/heap.h +++ b/lib/heap/heap.h @@ -245,7 +245,7 @@ static ALWAYS_INLINE chunksz_t bytes_to_chunksz(struct z_heap *h, size_t bytes, size_t oddments = ((bytes % CHUNK_UNIT) + (extra % CHUNK_UNIT) + chunk_header_bytes(h) + CHUNK_UNIT - 1U) / CHUNK_UNIT; - return (chunksz_t)MIN(chunks + oddments, h->end_chunk); + return (chunksz_t)min(chunks + oddments, h->end_chunk); } static inline chunksz_t min_chunk_size(struct z_heap *h) diff --git a/lib/heap/heap_info.c b/lib/heap/heap_info.c index c618af2ea0072..7f0698d28334d 100644 --- a/lib/heap/heap_info.c +++ b/lib/heap/heap_info.c @@ -32,7 +32,7 @@ static void heap_print_info(struct z_heap *h, bool dump_chunks) do { count++; - largest = MAX(largest, chunk_size(h, curr)); + largest = max(largest, chunk_size(h, curr)); curr = next_free_chunk(h, curr); } while (curr != first); } diff --git a/lib/heap/multi_heap.c b/lib/heap/multi_heap.c index 5cecfec4f8a4f..e7579bbff7c13 100644 --- a/lib/heap/multi_heap.c +++ b/lib/heap/multi_heap.c @@ -120,7 +120,7 @@ void *sys_multi_heap_aligned_realloc(struct sys_multi_heap *mheap, void *cfg, /* Otherwise, allocate a new block and copy the data */ new_ptr = sys_multi_heap_aligned_alloc(mheap, cfg, align, bytes); if (new_ptr != NULL) { - memcpy(new_ptr, ptr, MIN(old_size, bytes)); + memcpy(new_ptr, ptr, min(old_size, bytes)); sys_multi_heap_free(mheap, ptr); } diff --git a/lib/libc/newlib/libc-hooks.c b/lib/libc/newlib/libc-hooks.c index 4ec1887f1cb54..55c109d8fe84d 100644 --- a/lib/libc/newlib/libc-hooks.c +++ b/lib/libc/newlib/libc-hooks.c @@ -121,7 +121,7 @@ static int malloc_prepare(void) #ifdef USE_MALLOC_PREPARE #ifdef CONFIG_MMU - max_heap_size = MIN(CONFIG_NEWLIB_LIBC_MAX_MAPPED_REGION_SIZE, + max_heap_size = min(CONFIG_NEWLIB_LIBC_MAX_MAPPED_REGION_SIZE, k_mem_free_get()); if (max_heap_size != 0) { diff --git a/lib/net_buf/buf.c b/lib/net_buf/buf.c index 3dfbfb8699897..9a32aeddd3375 100644 --- a/lib/net_buf/buf.c +++ b/lib/net_buf/buf.c @@ -354,7 +354,7 @@ struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size, #if defined(CONFIG_NET_BUF_POOL_USAGE) atomic_dec(&pool->avail_count); __ASSERT_NO_MSG(atomic_get(&pool->avail_count) >= 0); - pool->max_used = MAX(pool->max_used, + pool->max_used = max(pool->max_used, pool->buf_count - atomic_get(&pool->avail_count)); #endif return buf; @@ -630,7 +630,7 @@ size_t net_buf_linearize(void *dst, size_t dst_len, const struct net_buf *src, size_t to_copy; size_t copied; - len = MIN(len, dst_len); + len = min(len, dst_len); frag = src; @@ -643,7 +643,7 @@ size_t net_buf_linearize(void *dst, size_t dst_len, const struct net_buf *src, /* traverse the fragment chain until len bytes are copied */ copied = 0; while (frag && len > 0) { - to_copy = MIN(len, frag->len - offset); + to_copy = min(len, frag->len - offset); memcpy((uint8_t *)dst + copied, frag->data + offset, to_copy); copied += to_copy; @@ -673,7 +673,7 @@ size_t net_buf_append_bytes(struct net_buf *buf, size_t len, size_t max_size; do { - uint16_t count = MIN(len, net_buf_tailroom(frag)); + uint16_t count = min(len, net_buf_tailroom(frag)); net_buf_add_mem(frag, value8, count); len -= count; @@ -695,7 +695,7 @@ size_t net_buf_append_bytes(struct net_buf *buf, size_t len, pool = net_buf_pool_get(buf->pool_id); max_size = pool->alloc->max_alloc_size; frag = net_buf_alloc_len(pool, - max_size ? MIN(len, max_size) : len, + max_size ? min(len, max_size) : len, timeout); } @@ -729,7 +729,7 @@ size_t net_buf_data_match(const struct net_buf *buf, size_t offset, const void * while (buf && len > 0) { bptr = buf->data + offset; - to_compare = MIN(len, buf->len - offset); + to_compare = min(len, buf->len - offset); for (size_t i = 0; i < to_compare; ++i) { if (dptr[compared] != bptr[i]) { diff --git a/lib/os/mpsc_pbuf.c b/lib/os/mpsc_pbuf.c index 56c8def5bc4fe..e5590ac8e196c 100644 --- a/lib/os/mpsc_pbuf.c +++ b/lib/os/mpsc_pbuf.c @@ -112,7 +112,7 @@ static inline void max_utilization_update(struct mpsc_pbuf_buffer *buffer) return; } - buffer->max_usage = MAX(buffer->max_usage, get_usage(buffer)); + buffer->max_usage = max(buffer->max_usage, get_usage(buffer)); } static inline bool is_valid(union mpsc_pbuf_generic *item) diff --git a/lib/os/spsc_pbuf.c b/lib/os/spsc_pbuf.c index 748be9fc1be0d..f842a3518e883 100644 --- a/lib/os/spsc_pbuf.c +++ b/lib/os/spsc_pbuf.c @@ -192,7 +192,7 @@ int spsc_pbuf_alloc(struct spsc_pbuf *pb, uint16_t len, char **buf) free_space = rd_idx - wr_idx - FREE_SPACE_DISTANCE; } - len = MIN(len, MAX(free_space - (int32_t)LEN_SZ, 0)); + len = min(len, max(free_space - (int32_t)LEN_SZ, 0)); *buf = &data_loc[wr_idx + LEN_SZ]; return len; diff --git a/lib/posix/options/file_system_r.c b/lib/posix/options/file_system_r.c index 6cd14f9f81ad6..b5c8d2f473f2a 100644 --- a/lib/posix/options/file_system_r.c +++ b/lib/posix/options/file_system_r.c @@ -39,7 +39,7 @@ int readdir_r(DIR *dirp, struct dirent *entry, struct dirent **result) return -rc; } - strncpy(entry->d_name, de.name, MIN(sizeof(entry->d_name), sizeof(de.name))); + strncpy(entry->d_name, de.name, min(sizeof(entry->d_name), sizeof(de.name))); entry->d_name[sizeof(entry->d_name) - 1] = '\0'; if (entry->d_name[0] == '\0') { diff --git a/lib/posix/options/shm.c b/lib/posix/options/shm.c index 120d7625e7cc5..9b446a7088585 100644 --- a/lib/posix/options/shm.c +++ b/lib/posix/options/shm.c @@ -211,7 +211,7 @@ static ssize_t shm_rw(struct shm_obj *shm, void *buf, size_t size, bool is_write if (offset >= shm->size) { size = 0; } else { - size = MIN(size, shm->size - offset); + size = min(size, shm->size - offset); } if (size > 0) { diff --git a/lib/posix/options/timespec_to_timeout.c b/lib/posix/options/timespec_to_timeout.c index f8719a77eeba1..4c6cceddb00ac 100644 --- a/lib/posix/options/timespec_to_timeout.c +++ b/lib/posix/options/timespec_to_timeout.c @@ -21,5 +21,5 @@ uint32_t timespec_to_timeoutms(int clock_id, const struct timespec *abstime) return 0; } - return CLAMP(tp_diff(abstime, &curtime) / NSEC_PER_MSEC, 0, UINT32_MAX); + return clamp(tp_diff(abstime, &curtime) / NSEC_PER_MSEC, 0, UINT32_MAX); } diff --git a/lib/utils/ring_buffer.c b/lib/utils/ring_buffer.c index 652b53b758f8b..02364b1275a7b 100644 --- a/lib/utils/ring_buffer.c +++ b/lib/utils/ring_buffer.c @@ -20,7 +20,7 @@ uint32_t ring_buf_area_claim(struct ring_buf *buf, struct ring_buf_index *ring, head_offset -= buf->size; } wrap_size = buf->size - head_offset; - size = MIN(size, wrap_size); + size = min(size, wrap_size); *data = &buf->buffer[head_offset]; ring->head += size; diff --git a/lib/utils/winstream.c b/lib/utils/winstream.c index 35498b51a6002..136d0d200fb56 100644 --- a/lib/utils/winstream.c +++ b/lib/utils/winstream.c @@ -55,7 +55,7 @@ void sys_winstream_write(struct sys_winstream *ws, /* Make room in the buffer by advancing start first (note same * len-1 from above) */ - len = MIN(len, ws->len); + len = min(len, ws->len); if (seq != 0) { uint32_t avail = (ws->len - 1) - idx_sub(ws, end, start); @@ -71,7 +71,7 @@ void sys_winstream_write(struct sys_winstream *ws, data += len0 - len; } - suffix = MIN(len, ws->len - end); + suffix = min(len, ws->len - end); MEMCPY(&ws->data[end], data, suffix); if (len > suffix) { MEMCPY(&ws->data[0], data + suffix, len - suffix); @@ -109,8 +109,8 @@ uint32_t sys_winstream_read(struct sys_winstream *ws, /* Copy data */ copy = idx_sub(ws, ws->end, behind); - len = MIN(buflen, behind); - suffix = MIN(len, ws->len - copy); + len = min(buflen, behind); + suffix = min(len, ws->len - copy); MEMCPY(buf, &ws->data[copy], suffix); if (len > suffix) { MEMCPY(buf + suffix, &ws->data[0], len - suffix);