From 4587595b25c51d30bf452539b7276f06d1d3df05 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Sat, 9 Jul 2022 11:56:06 -0400 Subject: [PATCH 01/18] DO NOT REVIEW - SQUASHED PATCHES FROM #51924 No seriously, don't review this. Look at the same content in #51924 --- drivers/counter/counter_mchp_xec.c | 1 + drivers/watchdog/wdt_mchp_xec.c | 1 + include/zephyr/drivers/kscan.h | 1 + include/zephyr/toolchain/common.h | 16 +++++++++++-- lib/os/fdtable.c | 15 ++++++++---- .../cmsis_rtos_v1/philosophers/prj.conf | 1 + .../cmsis_rtos_v2/philosophers/prj.conf | 1 + samples/userspace/shared_mem/src/main.c | 4 ++++ scripts/build/gen_kobject_list.py | 4 +--- soc/xtensa/intel_adsp/common/boot_complete.c | 1 + soc/xtensa/intel_adsp/common/mem_window.c | 2 ++ subsys/net/ip/net_context.c | 4 ++-- tests/benchmarks/footprints/src/mutex.c | 3 +-- tests/benchmarks/footprints/src/semaphore.c | 2 +- tests/drivers/ipm/src/ipm_dummy.c | 7 ++++++ tests/kernel/sched/preempt/src/main.c | 10 ++++---- .../src/test_priority_scheduling.c | 24 ++++++++++++------- tests/kernel/semaphore/semaphore/src/main.c | 11 ++++++++- tests/kernel/semaphore/sys_sem/src/main.c | 21 ++-------------- .../src/test_threads_cancel_abort.c | 5 ++++ tests/kernel/workq/work/src/main.c | 2 ++ tests/net/socket/udp/src/main.c | 14 +++++++---- tests/posix/common/src/posix_rwlock.c | 21 +++++++++------- tests/ztest/error_hook/src/main.c | 4 ++++ 24 files changed, 115 insertions(+), 60 deletions(-) diff --git a/drivers/counter/counter_mchp_xec.c b/drivers/counter/counter_mchp_xec.c index 580e4f30908b2..a183ad69b1986 100644 --- a/drivers/counter/counter_mchp_xec.c +++ b/drivers/counter/counter_mchp_xec.c @@ -27,6 +27,7 @@ LOG_MODULE_REGISTER(counter_mchp_xec, CONFIG_COUNTER_LOG_LEVEL); #include +#include #include #include #include diff --git a/drivers/watchdog/wdt_mchp_xec.c b/drivers/watchdog/wdt_mchp_xec.c index a6da7d29ed5bd..1de7d0da217ac 100644 --- a/drivers/watchdog/wdt_mchp_xec.c +++ b/drivers/watchdog/wdt_mchp_xec.c @@ -14,6 +14,7 @@ LOG_MODULE_REGISTER(wdt_mchp_xec); #include +#include #include #include diff --git a/include/zephyr/drivers/kscan.h b/include/zephyr/drivers/kscan.h index 64e9e8bbb74fb..39b17648436dd 100644 --- a/include/zephyr/drivers/kscan.h +++ b/include/zephyr/drivers/kscan.h @@ -20,6 +20,7 @@ #include #include #include +#include #ifdef __cplusplus extern "C" { diff --git a/include/zephyr/toolchain/common.h b/include/zephyr/toolchain/common.h index 3dd1f4ee903a2..0dbaf7ede63a5 100644 --- a/include/zephyr/toolchain/common.h +++ b/include/zephyr/toolchain/common.h @@ -204,8 +204,20 @@ * The section name is the struct type prepended with an underscore. * The subsection is "static" and the subsubsection is the variable name. * - * In the linker script, create output sections for these using - * ITERABLE_SECTION_ROM() or ITERABLE_SECTION_RAM(). + * Note that this is only the C syntax side of this feature. The + * linker script also needs to be made aware of the output sections. + * Traditionally this is done by inserting expansions of + * ITERABLE_SECTION_*() macros into + * include/zephyr/linker/common-ram/rom.ld. + * + * But now there is a variant linker script generation scheme when + * CONFIG_CMAKE_LINKER_GENERATOR=y. That is a different framework, + * where you have to use zephyr_iterable_section() commands in + * cmake/linker_script/common/common-ram.cmake + * + * (Really both of those are problematic: the build should be able to + * detect iterable sections in the build artifacts automatically from + * nothing more than what we have here). * * @note In order to store the element in ROM, a const specifier has to * be added to the declaration: const STRUCT_SECTION_ITERABLE(...); diff --git a/lib/os/fdtable.c b/lib/os/fdtable.c index 983e3e79fcc3c..99cc680a71e71 100644 --- a/lib/os/fdtable.c +++ b/lib/os/fdtable.c @@ -40,17 +40,14 @@ static struct fd_entry fdtable[CONFIG_POSIX_MAX_FDS] = { { /* STDIN */ .vtable = &stdinout_fd_op_vtable, - .refcount = ATOMIC_INIT(1) }, { /* STDOUT */ .vtable = &stdinout_fd_op_vtable, - .refcount = ATOMIC_INIT(1) }, { /* STDERR */ .vtable = &stdinout_fd_op_vtable, - .refcount = ATOMIC_INIT(1) }, #else { @@ -95,8 +92,9 @@ static int z_fd_unref(int fd) static int _find_fd_entry(void) { int fd; + const int min = IS_ENABLED(CONFIG_POSIX_API) ? 3 : 0; - for (fd = 0; fd < ARRAY_SIZE(fdtable); fd++) { + for (fd = min; fd < ARRAY_SIZE(fdtable); fd++) { if (!atomic_get(&fdtable[fd].refcount)) { return fd; } @@ -115,6 +113,15 @@ static int _check_fd(int fd) fd = k_array_index_sanitize(fd, ARRAY_SIZE(fdtable)); + /* lazy-init the mutex in the static descriptors */ + if (IS_ENABLED(CONFIG_POSIX_API) && fd < 3) { + if (atomic_cas(&fdtable[fd].refcount, 0, 1)) { + (void)k_mutex_lock(&fdtable_lock, K_FOREVER); + k_mutex_init(&fdtable[fd].lock); + k_mutex_unlock(&fdtable_lock); + } + } + if (!atomic_get(&fdtable[fd].refcount)) { errno = EBADF; return -1; diff --git a/samples/subsys/portability/cmsis_rtos_v1/philosophers/prj.conf b/samples/subsys/portability/cmsis_rtos_v1/philosophers/prj.conf index 561fc011c850a..f5771370ca07f 100644 --- a/samples/subsys/portability/cmsis_rtos_v1/philosophers/prj.conf +++ b/samples/subsys/portability/cmsis_rtos_v1/philosophers/prj.conf @@ -11,3 +11,4 @@ CONFIG_POLL=y CONFIG_SCHED_SCALABLE=y CONFIG_THREAD_CUSTOM_DATA=y CONFIG_CMSIS_THREAD_MAX_STACK_SIZE=1024 +CONFIG_SMP=n \ No newline at end of file diff --git a/samples/subsys/portability/cmsis_rtos_v2/philosophers/prj.conf b/samples/subsys/portability/cmsis_rtos_v2/philosophers/prj.conf index f6660f8e8cf6a..cc7d8b79776a1 100644 --- a/samples/subsys/portability/cmsis_rtos_v2/philosophers/prj.conf +++ b/samples/subsys/portability/cmsis_rtos_v2/philosophers/prj.conf @@ -12,3 +12,4 @@ CONFIG_SCHED_SCALABLE=y CONFIG_SYS_CLOCK_TICKS_PER_SEC=1000 CONFIG_CMSIS_V2_THREAD_MAX_STACK_SIZE=2048 CONFIG_CMSIS_V2_THREAD_DYNAMIC_STACK_SIZE=2048 +CONFIG_SMP=n diff --git a/samples/userspace/shared_mem/src/main.c b/samples/userspace/shared_mem/src/main.c index 82e28c64c3f92..6d07676ad9cd7 100644 --- a/samples/userspace/shared_mem/src/main.c +++ b/samples/userspace/shared_mem/src/main.c @@ -192,6 +192,7 @@ void main(void) k_thread_start(&ct_thread); k_sem_give(&allforone); + k_yield(); printk("CT thread started\n"); } @@ -243,6 +244,7 @@ void enc(void) } k_sem_give(&allforone); + k_yield(); } } @@ -265,6 +267,7 @@ void pt(void) fBUFIN = 1; } k_sem_give(&allforone); + k_yield(); k_sem_take(&allforone, K_FOREVER); if (fBUFIN == 0) { /* send message to decode */ printk("\nPT Sending Message 1'\n"); @@ -296,5 +299,6 @@ void ct(void) printk("CT MSG: %s\n", (char *)&tbuf); } k_sem_give(&allforone); + k_yield(); } } diff --git a/scripts/build/gen_kobject_list.py b/scripts/build/gen_kobject_list.py index 9a30540696c98..728a0eda94a33 100755 --- a/scripts/build/gen_kobject_list.py +++ b/scripts/build/gen_kobject_list.py @@ -589,9 +589,7 @@ def find_kobjects(elf, syms): continue if "DW_AT_location" not in die.attributes: - debug_die(die, - "No location information for object '%s'; possibly stack allocated" - % name) + # This can happen for linker aliases continue loc = die.attributes["DW_AT_location"] diff --git a/soc/xtensa/intel_adsp/common/boot_complete.c b/soc/xtensa/intel_adsp/common/boot_complete.c index aca44c611130d..902647e2f7ea4 100644 --- a/soc/xtensa/intel_adsp/common/boot_complete.c +++ b/soc/xtensa/intel_adsp/common/boot_complete.c @@ -2,6 +2,7 @@ * SPDX-License-Identifier: Apache-2.0 */ +#include #include #include #include diff --git a/soc/xtensa/intel_adsp/common/mem_window.c b/soc/xtensa/intel_adsp/common/mem_window.c index 0fb68673696c1..935bf050f77e8 100644 --- a/soc/xtensa/intel_adsp/common/mem_window.c +++ b/soc/xtensa/intel_adsp/common/mem_window.c @@ -3,9 +3,11 @@ */ #include +#include #include #include #include +#include #include #include diff --git a/subsys/net/ip/net_context.c b/subsys/net/ip/net_context.c index 630b5d23ddd98..b7cda7e0e9fac 100644 --- a/subsys/net/ip/net_context.c +++ b/subsys/net/ip/net_context.c @@ -255,6 +255,8 @@ int net_context_get(sa_family_t family, enum net_sock_type type, uint16_t proto, } memset(&contexts[i], 0, sizeof(contexts[i])); + k_mutex_init(&contexts[i].lock); + /* FIXME - Figure out a way to get the correct network interface * as it is not known at this point yet. */ @@ -312,8 +314,6 @@ int net_context_get(sa_family_t family, enum net_sock_type type, uint16_t proto, k_sem_init(&contexts[i].recv_data_wait, 1, K_SEM_MAX_LIMIT); } - k_mutex_init(&contexts[i].lock); - contexts[i].flags |= NET_CONTEXT_IN_USE; *context = &contexts[i]; diff --git a/tests/benchmarks/footprints/src/mutex.c b/tests/benchmarks/footprints/src/mutex.c index bff420de8f281..79f44e876d65e 100644 --- a/tests/benchmarks/footprints/src/mutex.c +++ b/tests/benchmarks/footprints/src/mutex.c @@ -11,6 +11,7 @@ #define STACK_SIZE 512 K_MUTEX_DEFINE(user_mutex); +K_MUTEX_DEFINE(sys_mutex); #ifdef CONFIG_USERSPACE static void user_thread_fn(void *arg1, void *arg2, void *arg3) @@ -41,8 +42,6 @@ static void run_user_mutex(void) static void run_system_mutex(void) { - struct k_mutex sys_mutex; - k_mutex_init(&sys_mutex); k_mutex_lock(&sys_mutex, K_FOREVER); diff --git a/tests/benchmarks/footprints/src/semaphore.c b/tests/benchmarks/footprints/src/semaphore.c index 938747af46815..5a053bc44bc02 100644 --- a/tests/benchmarks/footprints/src/semaphore.c +++ b/tests/benchmarks/footprints/src/semaphore.c @@ -12,6 +12,7 @@ #define STACK_SIZE 512 K_SEM_DEFINE(semaphore0, 0, 1); +K_SEM_DEFINE(sem0, 0, 1); void thread_fn(void *p1, void *p2, void *p3) { @@ -23,7 +24,6 @@ void thread_fn(void *p1, void *p2, void *p3) void run_semaphore(void) { k_tid_t sem0_tid; - struct k_sem sem0; k_sem_init(&sem0, 0, 1); diff --git a/tests/drivers/ipm/src/ipm_dummy.c b/tests/drivers/ipm/src/ipm_dummy.c index af6f61df584a1..740014da289b8 100644 --- a/tests/drivers/ipm/src/ipm_dummy.c +++ b/tests/drivers/ipm/src/ipm_dummy.c @@ -72,6 +72,13 @@ static int ipm_dummy_send(const struct device *d, int wait, uint32_t id, irq_offload(ipm_dummy_isr, (const void *)d); + if (IS_ENABLED(CONFIG_ARC)) { + /* ARC's irq_offload doesn't switch threads, so we + * need to do it manually. See #51814 + */ + k_yield(); + } + if (wait) { while (driver_data->regs.busy) { /* busy-wait */ diff --git a/tests/kernel/sched/preempt/src/main.c b/tests/kernel/sched/preempt/src/main.c index 5c8bc474597c5..44438ad611f42 100644 --- a/tests/kernel/sched/preempt/src/main.c +++ b/tests/kernel/sched/preempt/src/main.c @@ -87,12 +87,12 @@ void wakeup_src_thread(int id) zassert_true(k_current_get() == &manager_thread, ""); - /* irq_offload() on ARM appears not to do what we want. It - * doesn't appear to go through the normal exception return - * path and always returns back into the calling context, so - * it can't be used to fake preemption. + /* irq_offload() on ARM (and ARC, see #51814) appear not to do + * what we want. It doesn't appear to go through the normal + * exception return path and always returns back into the + * calling context, so it can't be used to fake preemption. */ - if (do_irq && IS_ENABLED(CONFIG_ARM)) { + if (do_irq && (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARC))) { return; } diff --git a/tests/kernel/sched/schedule_api/src/test_priority_scheduling.c b/tests/kernel/sched/schedule_api/src/test_priority_scheduling.c index dfd7ba57460b9..a851544107377 100644 --- a/tests/kernel/sched/schedule_api/src/test_priority_scheduling.c +++ b/tests/kernel/sched/schedule_api/src/test_priority_scheduling.c @@ -27,6 +27,9 @@ static K_SEM_DEFINE(sema2, 0, NUM_THREAD); /* Semaphore on which application threads wait */ static K_SEM_DEFINE(sema3, 0, NUM_THREAD); +/* Semaphore to flag the next iteration */ +static K_SEM_DEFINE(sema4, 0, NUM_THREAD); + static int thread_idx; static struct k_thread t[NUM_THREAD]; @@ -40,6 +43,9 @@ static void thread_tslice(void *p1, void *p2, void *p3) (idx + 'A'); while (1) { + /* Wait for the signal to start */ + k_sem_take(&sema3, K_FOREVER); + /* Printing alphabet corresponding to thread */ TC_PRINT("%c", thread_parameter); /* Testing if threads are executed as per priority */ @@ -48,8 +54,9 @@ static void thread_tslice(void *p1, void *p2, void *p3) /* Release CPU and give chance to Ztest thread to run */ k_sem_give(&sema2); - /* Wait for release of semaphore from Ztest thread */ - k_sem_take(&sema3, K_FOREVER); + + /* Wait here for the end of the iteration */ + k_sem_take(&sema4, K_FOREVER); } } @@ -84,18 +91,19 @@ ZTEST(threads_scheduling, test_priority_scheduling) } while (count < ITRERATION_COUNT) { - - /* Wait for each thread to complete */ + /* Wake up each thread in turn and give it a chance to run */ for (int i = 0; i < NUM_THREAD; i++) { + k_sem_give(&sema3); k_sem_take(&sema2, K_FOREVER); } - /* Delay to give chance to last thread to run */ - k_sleep(K_MSEC(1)); - /* Giving Chance to other threads to run */ + /* Wake them all up for the next iteration */ for (int i = 0; i < NUM_THREAD; i++) { - k_sem_give(&sema3); + k_sem_give(&sema4); } + + /* Give them all a chance to block on sema3 again */ + k_msleep(100); count++; } diff --git a/tests/kernel/semaphore/semaphore/src/main.c b/tests/kernel/semaphore/semaphore/src/main.c index ddbc3c3106e40..44f00d168d981 100644 --- a/tests/kernel/semaphore/semaphore/src/main.c +++ b/tests/kernel/semaphore/semaphore/src/main.c @@ -89,6 +89,7 @@ struct k_thread tdata; void sem_give_task(void *p1, void *p2, void *p3) { k_sem_give((struct k_sem *)p1); + k_sleep(K_TICKS(1)); } void sem_reset_take_task(void *p1, void *p2, void *p3) @@ -110,6 +111,7 @@ static void tsema_thread_thread(struct k_sem *psem) K_PRIO_PREEMPT(0), K_USER | K_INHERIT_PERMS, K_NO_WAIT); + k_sleep(K_TICKS(1)); expect_k_sem_take_nomsg(psem, K_FOREVER, 0); /*clean the spawn thread avoid side effect in next TC*/ @@ -572,12 +574,19 @@ ZTEST(semaphore, test_sem_take_timeout_isr) * thread (which is at a lower priority) will cause simple_sem * to be signalled, thus waking this task. */ + k_sem_reset(&simple_sem); k_thread_create(&sem_tid_1, stack_1, STACK_SIZE, sem_take_timeout_isr_helper, NULL, NULL, NULL, K_PRIO_PREEMPT(0), 0, K_NO_WAIT); - k_sem_reset(&simple_sem); + if (IS_ENABLED(CONFIG_SMP)) { + /* This only works reliably on 1 core where that + * thread won't run yet. Give it a bit of time on SMP + * to get the semaphore into the right state + */ + k_sleep(K_TICKS(2)); + } expect_k_sem_take_nomsg(&simple_sem, SEM_TIMEOUT, 0); diff --git a/tests/kernel/semaphore/sys_sem/src/main.c b/tests/kernel/semaphore/sys_sem/src/main.c index fc65668a7adbf..96df83d526476 100644 --- a/tests/kernel/semaphore/sys_sem/src/main.c +++ b/tests/kernel/semaphore/sys_sem/src/main.c @@ -22,7 +22,7 @@ static K_THREAD_STACK_ARRAY_DEFINE(multi_stack_take, STACK_NUMS, STACK_SIZE); static struct k_thread multi_tid_give[STACK_NUMS]; static struct k_thread multi_tid_take[STACK_NUMS]; -static struct k_sem usage_sem, sync_sem, limit_sem, uninit_sem; +static struct k_sem usage_sem, sync_sem, limit_sem; static ZTEST_DMEM int flag; static ZTEST_DMEM atomic_t atomic_count; @@ -33,16 +33,6 @@ static ZTEST_DMEM atomic_t atomic_count; * @} */ -static void sem_thread_give_uninit(void *p1, void *p2, void *p3) -{ - ztest_set_fault_valid(true); - - /* use sem without initialise */ - k_sem_give(&uninit_sem); - - ztest_test_fail(); -} - static void sem_thread_give(void *p1, void *p2, void *p3) { flag = 1; @@ -69,7 +59,6 @@ static void thread_high_prio_sem_take(void *p1, void *p2, void *p3) * @brief Test semaphore usage with multiple thread * * @details Using semaphore with some situations - * - Use a uninitialized semaphore * - Use semaphore normally * - Use semaphore with different priority threads * @@ -90,6 +79,7 @@ ZTEST_USER(kernel_sys_sem, test_multiple_thread_sem_usage) zassert_equal(flag, 1, "value != 1"); zassert_equal(k_sem_count_get(&usage_sem), 0, "sem not be took"); + k_sem_reset(&usage_sem); /* Use sem with different priority thread */ k_thread_create(&multi_tid_take[0], multi_stack_take[0], STACK_SIZE, @@ -116,13 +106,6 @@ ZTEST_USER(kernel_sys_sem, test_multiple_thread_sem_usage) k_thread_join(&multi_tid_give[0], K_FOREVER); k_thread_join(&multi_tid_take[0], K_FOREVER); k_thread_join(&multi_tid_take[1], K_FOREVER); - - k_thread_create(&multi_tid_give[1], multi_stack_give[1], STACK_SIZE, - sem_thread_give_uninit, NULL, NULL, - NULL, PRIO, K_USER | K_INHERIT_PERMS, - K_NO_WAIT); - k_sleep(K_MSEC(20)); - k_thread_join(&multi_tid_give[1], K_FOREVER); } static void multi_thread_sem_give(void *p1, void *p2, void *p3) diff --git a/tests/kernel/threads/thread_apis/src/test_threads_cancel_abort.c b/tests/kernel/threads/thread_apis/src/test_threads_cancel_abort.c index 481aeac676df1..9593bfdefcd27 100644 --- a/tests/kernel/threads/thread_apis/src/test_threads_cancel_abort.c +++ b/tests/kernel/threads/thread_apis/src/test_threads_cancel_abort.c @@ -188,6 +188,11 @@ extern struct k_sem offload_sem; */ ZTEST(threads_lifecycle, test_abort_from_isr) { + if (IS_ENABLED(CONFIG_ARC)) { + /* This doesn't work on ARC currently, see #51814 */ + ztest_test_skip(); + } + isr_finished = false; k_thread_create(&tdata, tstack, STACK_SIZE, entry_abort_isr, NULL, NULL, NULL, 0, 0, K_NO_WAIT); diff --git a/tests/kernel/workq/work/src/main.c b/tests/kernel/workq/work/src/main.c index a51946ca50632..7b92ff2d44838 100644 --- a/tests/kernel/workq/work/src/main.c +++ b/tests/kernel/workq/work/src/main.c @@ -1302,6 +1302,8 @@ static bool try_queue_no_yield(struct k_work_q *wq) zassert_equal(k_sem_take(&sync_sem, K_NO_WAIT), -EBUSY); + k_sem_reset(&sync_sem); + return is_high; } diff --git a/tests/net/socket/udp/src/main.c b/tests/net/socket/udp/src/main.c index 7db2c299579b8..6b99e4fa201e6 100644 --- a/tests/net/socket/udp/src/main.c +++ b/tests/net/socket/udp/src/main.c @@ -924,7 +924,7 @@ static struct eth_fake_context eth_fake_data; static ZTEST_BMEM struct sockaddr_in6 server_addr; /* The semaphore is there to wait the data to be received. */ -static ZTEST_BMEM SYS_MUTEX_DEFINE(wait_data); +K_SEM_DEFINE(wait_data, 0, K_SEM_MAX_LIMIT); static struct net_if *eth_iface; static ZTEST_BMEM bool test_started; @@ -974,7 +974,7 @@ static int eth_fake_send(const struct device *dev, struct net_pkt *pkt) test_failed = false; } - sys_mutex_unlock(&wait_data); + k_sem_give(&wait_data); return 0; } @@ -1090,7 +1090,7 @@ ZTEST_USER(net_socket_udp, test_18_v6_sendmsg_with_txtime) rv = close(client_sock); zassert_equal(rv, 0, "close failed"); - if (sys_mutex_lock(&wait_data, WAIT_TIME)) { + if (k_sem_take(&wait_data, WAIT_TIME)) { zassert_true(false, "Timeout DNS query not received"); } @@ -1294,4 +1294,10 @@ ZTEST(net_socket_udp, test_23_v6_dgram_overflow) BUF_AND_SIZE(test_str_all_tx_bufs)); } -ZTEST_SUITE(net_socket_udp, NULL, NULL, NULL, NULL, NULL); +static void *suite_setup(void) +{ + k_object_access_grant(&wait_data, k_current_get()); + return NULL; +} + +ZTEST_SUITE(net_socket_udp, NULL, suite_setup, NULL, NULL, NULL); diff --git a/tests/posix/common/src/posix_rwlock.c b/tests/posix/common/src/posix_rwlock.c index 67fc45016672a..f7a7b0a0c8e9f 100644 --- a/tests/posix/common/src/posix_rwlock.c +++ b/tests/posix/common/src/posix_rwlock.c @@ -17,7 +17,7 @@ pthread_rwlock_t rwlock; static void *thread_top(void *p1) { pthread_t pthread; - uint32_t policy, ret = 0U; + uint32_t policy; struct sched_param param; int id = POINTER_TO_INT(p1); @@ -26,11 +26,12 @@ static void *thread_top(void *p1) printk("Thread %d scheduling policy = %d & priority %d started\n", id, policy, param.sched_priority); - ret = pthread_rwlock_tryrdlock(&rwlock); - if (ret) { + while (true) { + if (pthread_rwlock_tryrdlock(&rwlock) == 0) { + break; + } printk("Not able to get RD lock on trying, try again\n"); - zassert_false(pthread_rwlock_rdlock(&rwlock), - "Failed to acquire write lock"); + k_sleep(K_TICKS(1)); } printk("Thread %d got RD lock\n", id); @@ -39,10 +40,12 @@ static void *thread_top(void *p1) zassert_false(pthread_rwlock_unlock(&rwlock), "Failed to unlock"); printk("Thread %d acquiring WR lock\n", id); - ret = pthread_rwlock_trywrlock(&rwlock); - if (ret != 0U) { - zassert_false(pthread_rwlock_wrlock(&rwlock), - "Failed to acquire WR lock"); + while (true) { + if (pthread_rwlock_trywrlock(&rwlock) == 0U) { + break; + } + printk("Failed to acquire WR lock, trying again\n"); + k_sleep(K_TICKS(1)); } printk("Thread %d acquired WR lock\n", id); diff --git a/tests/ztest/error_hook/src/main.c b/tests/ztest/error_hook/src/main.c index 8805037f7f735..9d0aa32a732f1 100644 --- a/tests/ztest/error_hook/src/main.c +++ b/tests/ztest/error_hook/src/main.c @@ -322,6 +322,10 @@ static void tIsr_assert(const void *p) */ ZTEST(error_hook_tests, test_catch_assert_in_isr) { + if (IS_ENABLED(CONFIG_ARC)) { + ztest_test_skip(); + } + case_type = ZTEST_CATCH_ASSERT_IN_ISR; irq_offload(tIsr_assert, NULL); } From 44ecced354cd9293ee273aac570db655447eb04e Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Sat, 2 Jul 2022 17:52:59 -0400 Subject: [PATCH 02/18] kernel: Add waitq "lazy init", move z_waitq_head() Add a "lazy initialization" utility so kernel code can manage the case where it's faced with wait queues in zero-filled .bss without engaging in the self-referential struct gynastics that have plagued K_SEM_DEFINE() forever. This is an easy check, even given that we have two separate wait queue backends. Also move the z_waitq_head() function from ksched.h to waitq.h, where it arguably should always have been. Signed-off-by: Andy Ross --- include/zephyr/kernel_structs.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/include/zephyr/kernel_structs.h b/include/zephyr/kernel_structs.h index 5a48745ffa680..5f4486ecd869b 100644 --- a/include/zephyr/kernel_structs.h +++ b/include/zephyr/kernel_structs.h @@ -226,6 +226,12 @@ extern bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b); #define Z_WAIT_Q_INIT(wait_q) { { { .lessthan_fn = z_priq_rb_lessthan } } } +#define Z_WAIT_Q_LAZY_INIT(wq) do { \ + if ((wq)->waitq.tree.lessthan_fn == NULL) { \ + (wq)->waitq.tree.lessthan_fn = z_priq_rb_lessthan; \ + } } while (false) + + #else typedef struct { @@ -234,6 +240,10 @@ typedef struct { #define Z_WAIT_Q_INIT(wait_q) { SYS_DLIST_STATIC_INIT(&(wait_q)->waitq) } +#define Z_WAIT_Q_LAZY_INIT(wq) do { \ + if ((wq)->waitq.head == NULL) { \ + sys_dlist_init(&(wq)->waitq); } \ + } while (false) #endif /* kernel timeout record */ From b83149abb1c37e76ada2c864f5451d4f9f34acc1 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Sun, 9 Oct 2022 13:14:58 -0700 Subject: [PATCH 03/18] tests/lib/ringbuffer: Use k_mutex, not sys_mutex sys_mutex is being deprecated Signed-off-by: Andy Ross --- tests/lib/ringbuffer/src/concurrent.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tests/lib/ringbuffer/src/concurrent.c b/tests/lib/ringbuffer/src/concurrent.c index 5bc5fd502581b..fe045413c49de 100644 --- a/tests/lib/ringbuffer/src/concurrent.c +++ b/tests/lib/ringbuffer/src/concurrent.c @@ -6,7 +6,6 @@ #include #include #include -#include #include #include @@ -24,7 +23,7 @@ #define VALUE 0xb #define TYPE 0xc -static ZTEST_BMEM SYS_MUTEX_DEFINE(mutex); +K_MUTEX_DEFINE(mutex); RING_BUF_ITEM_DECLARE(ringbuf, RINGBUFFER); static uint32_t output[LENGTH]; static uint32_t databuffer1[LENGTH]; @@ -32,11 +31,11 @@ static uint32_t databuffer2[LENGTH]; static void data_write(uint32_t *input) { - sys_mutex_lock(&mutex, K_FOREVER); + k_mutex_lock(&mutex, K_FOREVER); int ret = ring_buf_item_put(&ringbuf, TYPE, VALUE, input, LENGTH); zassert_equal(ret, 0); - sys_mutex_unlock(&mutex); + k_mutex_unlock(&mutex); } static void data_read(uint32_t *output) @@ -45,9 +44,9 @@ static void data_read(uint32_t *output) uint8_t value, size32 = LENGTH; int ret; - sys_mutex_lock(&mutex, K_FOREVER); + k_mutex_lock(&mutex, K_FOREVER); ret = ring_buf_item_get(&ringbuf, &type, &value, output, &size32); - sys_mutex_unlock(&mutex); + k_mutex_unlock(&mutex); zassert_equal(ret, 0); zassert_equal(type, TYPE); From 69486ab1899e1e1e1d35c7a8acd0ec8a5cd07ab4 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Tue, 1 Nov 2022 13:49:17 -0700 Subject: [PATCH 04/18] subsys/tracing: Remove obj_tracking for semaphores and mutexes These structs are going away, becoming mere wrappers around zyncs. Signed-off-by: Andy Ross --- doc/services/tracing/index.rst | 4 ++-- include/zephyr/tracing/tracing_macros.h | 18 ----------------- include/zephyr/tracing/tracking.h | 2 -- subsys/tracing/Kconfig | 18 ----------------- subsys/tracing/tracing_tracking.c | 26 ------------------------- tests/kernel/obj_tracking/src/main.c | 26 ------------------------- 6 files changed, 2 insertions(+), 92 deletions(-) diff --git a/doc/services/tracing/index.rst b/doc/services/tracing/index.rst index 418be0cdd4152..57d348b98da10 100644 --- a/doc/services/tracing/index.rst +++ b/doc/services/tracing/index.rst @@ -382,8 +382,8 @@ all initialized mutexes, one can write:: To enable object tracking, enable :kconfig:option:`CONFIG_TRACING_OBJECT_TRACKING`. Note that each list can be enabled or disabled via their tracing -configuration. For example, to disable tracking of semaphores, one can -disable :kconfig:option:`CONFIG_TRACING_SEMAPHORE`. +configuration. For example, to disable tracking of FIFOs, one can +disable :kconfig:option:`CONFIG_TRACING_FIFO`. Object tracking is behind tracing configuration as it currently leverages tracing infrastructure to perform the tracking. diff --git a/include/zephyr/tracing/tracing_macros.h b/include/zephyr/tracing/tracing_macros.h index 5f0f871aac77a..bc23b27715d20 100644 --- a/include/zephyr/tracing/tracing_macros.h +++ b/include/zephyr/tracing/tracing_macros.h @@ -87,24 +87,6 @@ #define sys_port_trace_type_mask_k_work_poll(trace_call) #endif -#if defined(CONFIG_TRACING_SEMAPHORE) - #define sys_port_trace_type_mask_k_sem(trace_call) trace_call -#else - #define sys_port_trace_type_mask_k_sem(trace_call) -#endif - -#if defined(CONFIG_TRACING_MUTEX) - #define sys_port_trace_type_mask_k_mutex(trace_call) trace_call -#else - #define sys_port_trace_type_mask_k_mutex(trace_call) -#endif - -#if defined(CONFIG_TRACING_CONDVAR) - #define sys_port_trace_type_mask_k_condvar(trace_call) trace_call -#else - #define sys_port_trace_type_mask_k_condvar(trace_call) -#endif - #if defined(CONFIG_TRACING_QUEUE) #define sys_port_trace_type_mask_k_queue(trace_call) trace_call #else diff --git a/include/zephyr/tracing/tracking.h b/include/zephyr/tracing/tracking.h index db1cefde58648..329f39f14856a 100644 --- a/include/zephyr/tracing/tracking.h +++ b/include/zephyr/tracing/tracking.h @@ -35,8 +35,6 @@ extern struct k_timer *_track_list_k_timer; extern struct k_mem_slab *_track_list_k_mem_slab; -extern struct k_sem *_track_list_k_sem; -extern struct k_mutex *_track_list_k_mutex; extern struct k_stack *_track_list_k_stack; extern struct k_msgq *_track_list_k_msgq; extern struct k_mbox *_track_list_k_mbox; diff --git a/subsys/tracing/Kconfig b/subsys/tracing/Kconfig index dd36635244dfa..a0ca3c0c77083 100644 --- a/subsys/tracing/Kconfig +++ b/subsys/tracing/Kconfig @@ -224,24 +224,6 @@ config TRACING_ISR Enable tracing ISRs. This requires the backend to be very low-latency. -config TRACING_SEMAPHORE - bool "Tracing Semaphores" - default y - help - Enable tracing Semaphores. - -config TRACING_MUTEX - bool "Tracing Mutexes" - default y - help - Enable tracing Mutexes. - -config TRACING_CONDVAR - bool "Tracing Condition Variables" - default y - help - Enable tracing Condition Variables - config TRACING_QUEUE bool "Tracing Queues" default y diff --git a/subsys/tracing/tracing_tracking.c b/subsys/tracing/tracing_tracking.c index 9a4ed2cc9e4ef..0e8d4caada0d4 100644 --- a/subsys/tracing/tracing_tracking.c +++ b/subsys/tracing/tracing_tracking.c @@ -15,12 +15,6 @@ struct k_spinlock _track_list_k_timer_lock; struct k_mem_slab *_track_list_k_mem_slab; struct k_spinlock _track_list_k_mem_slab_lock; -struct k_sem *_track_list_k_sem; -struct k_spinlock _track_list_k_sem_lock; - -struct k_mutex *_track_list_k_mutex; -struct k_spinlock _track_list_k_mutex_lock; - struct k_stack *_track_list_k_stack; struct k_spinlock _track_list_k_stack_lock; @@ -65,20 +59,6 @@ void sys_track_k_mem_slab_init(struct k_mem_slab *slab) SYS_TRACK_LIST_PREPEND(_track_list_k_mem_slab, slab)); } -void sys_track_k_sem_init(struct k_sem *sem) -{ - if (sem) { - SYS_PORT_TRACING_TYPE_MASK(k_sem, - SYS_TRACK_LIST_PREPEND(_track_list_k_sem, sem)); - } -} - -void sys_track_k_mutex_init(struct k_mutex *mutex) -{ - SYS_PORT_TRACING_TYPE_MASK(k_mutex, - SYS_TRACK_LIST_PREPEND(_track_list_k_mutex, mutex)); -} - void sys_track_k_stack_init(struct k_stack *stack) { SYS_PORT_TRACING_TYPE_MASK(k_stack, @@ -121,12 +101,6 @@ static int sys_track_static_init(const struct device *arg) SYS_PORT_TRACING_TYPE_MASK(k_mem_slab, SYS_TRACK_STATIC_INIT(k_mem_slab, 0)); - SYS_PORT_TRACING_TYPE_MASK(k_sem, - SYS_TRACK_STATIC_INIT(k_sem, 0)); - - SYS_PORT_TRACING_TYPE_MASK(k_mutex, - SYS_TRACK_STATIC_INIT(k_mutex, 0)); - SYS_PORT_TRACING_TYPE_MASK(k_stack, SYS_TRACK_STATIC_INIT(k_stack)); diff --git a/tests/kernel/obj_tracking/src/main.c b/tests/kernel/obj_tracking/src/main.c index ba71f523e7ce1..685997a1e22ad 100644 --- a/tests/kernel/obj_tracking/src/main.c +++ b/tests/kernel/obj_tracking/src/main.c @@ -13,8 +13,6 @@ void dummy_fn(struct k_timer *timer) K_TIMER_DEFINE(timer_s, dummy_fn, NULL); K_MEM_SLAB_DEFINE(slab_s, 8, 2, 8); -K_SEM_DEFINE(sem_s, 0, 1); -K_MUTEX_DEFINE(mutex_s); K_STACK_DEFINE(stack_s, 64); K_MSGQ_DEFINE(msgq_s, sizeof(int), 2, 4); K_MBOX_DEFINE(mbox_s); @@ -30,8 +28,6 @@ ZTEST(obj_tracking, test_obj_tracking_sanity) { struct k_timer timer; struct k_mem_slab slab; - struct k_sem sem; - struct k_mutex mutex; struct k_stack stack; struct k_msgq msgq; struct k_mbox mbox; @@ -62,28 +58,6 @@ ZTEST(obj_tracking, test_obj_tracking_sanity) } zassert_equal(count, 2, "Wrong number of mem_slab objects"); - k_sem_init(&sem, 1, 2); - count = 0; - list = _track_list_k_sem; - while (list != NULL) { - if (list == &sem || list == &sem_s) { - count++; - } - list = SYS_PORT_TRACK_NEXT((struct k_sem *)list); - } - zassert_equal(count, 2, "Wrong number of semaphore objects"); - - k_mutex_init(&mutex); - count = 0; - list = _track_list_k_mutex; - while (list != NULL) { - if (list == &mutex || list == &mutex_s) { - count++; - } - list = SYS_PORT_TRACK_NEXT((struct k_mutex *)list); - } - zassert_equal(count, 2, "Wrong number of mutex objects"); - k_stack_init(&stack, stack_array, 20); count = 0; list = _track_list_k_stack; From a05e2d4c05de6e574a5589ef68564f4cd3f61ac9 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Sun, 6 Nov 2022 14:52:47 -0800 Subject: [PATCH 05/18] kernel: Clean up DYNAMIC_OBJECTS vs. HEAP_MEM_POOL_SIZE When DYNAMIC_OBJECTS are selected[1], it makes very little sense to leave the default kernel heap at zero bytes. Always give it a little space. The one exception is tests/kernel/common's MISRA_SANE=y case, which ends up putting very large zync objects (maximally-sized rbtree recursion stacks) there in some configurations (when CONFIG_WAITQ_SCALABLE=y, which is true on a few of our CI platforms). Make sure these options are not used together. [1] Which they always are. Our test suite kconfig "selects" CONFIG_DYNAMIC_OBJECTS whenever CONFIG_TEST_USERSPACE is enabled, meaning that it's actually not possible to get coverage of the =n case! Signed-off-by: Andy Ross --- kernel/Kconfig | 3 ++- tests/kernel/common/testcase.yaml | 2 ++ tests/kernel/threads/thread_stack/prj.conf | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/kernel/Kconfig b/kernel/Kconfig index b9a65676bba74..908ced7cf0e04 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -574,8 +574,9 @@ if KERNEL_MEM_POOL config HEAP_MEM_POOL_SIZE int "Heap memory pool size (in bytes)" - default 0 if !POSIX_MQUEUE default 1024 if POSIX_MQUEUE + default 512 if DYNAMIC_OBJECTS + default 0 help This option specifies the size of the heap memory pool used when dynamically allocating memory using k_malloc(). The maximum size of diff --git a/tests/kernel/common/testcase.yaml b/tests/kernel/common/testcase.yaml index c2b4ebbe01768..d6a3c89b56ea7 100644 --- a/tests/kernel/common/testcase.yaml +++ b/tests/kernel/common/testcase.yaml @@ -16,6 +16,8 @@ tests: - native_posix extra_configs: - CONFIG_MISRA_SANE=y + - CONFIG_WAITQ_SCALABLE=n + - CONFIG_WAITQ_DUMB=y kernel.common.nano32: filter: not CONFIG_KERNEL_COHERENCE extra_configs: diff --git a/tests/kernel/threads/thread_stack/prj.conf b/tests/kernel/threads/thread_stack/prj.conf index ddc7b71cef9ff..998f21fbb0198 100644 --- a/tests/kernel/threads/thread_stack/prj.conf +++ b/tests/kernel/threads/thread_stack/prj.conf @@ -4,4 +4,4 @@ CONFIG_INIT_STACKS=y CONFIG_THREAD_STACK_INFO=y CONFIG_APPLICATION_DEFINED_SYSCALL=y CONFIG_TEST_USERSPACE=y -CONFIG_HEAP_MEM_POOL_SIZE=192 +CONFIG_HEAP_MEM_POOL_SIZE=512 From 2406ef80bf176e1a094680458feb07680a4d45a8 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Mon, 7 Nov 2022 12:56:47 -0800 Subject: [PATCH 06/18] tests/sys_mutex: Fix scheduling/priority behavior This spot would unlock a mutex from a high priority thread (priority 10 in mutex_complex()) that was being waited on by a low priority thread (12/thread_12()), then expect that the thread would be able to acquire it before the higher priority thread tried to lock it again. Older k_mutex code would act as a scheduling point (and on SMP platforms another CPU could race in to "fix" it), but I don't see why this ever should have worked. Put a sleep in to allow the low priority worker to reach sys_mutex_lock() reliably. Similarly there was a spot in test_mutex_multithread_competition() where the code was assuming that the thread created in k_thread_create() would have a chance to run and pend on the mutex before the main thread released it to start the test. Signed-off-by: Andy Ross --- tests/kernel/mutex/sys_mutex/src/main.c | 3 ++- tests/kernel/mutex/sys_mutex/src/thread_competition.c | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/kernel/mutex/sys_mutex/src/main.c b/tests/kernel/mutex/sys_mutex/src/main.c index 86f90c6afcf73..fe46227300709 100644 --- a/tests/kernel/mutex/sys_mutex/src/main.c +++ b/tests/kernel/mutex/sys_mutex/src/main.c @@ -402,7 +402,8 @@ ZTEST_USER_OR_NOT(mutex_complex, test_mutex) k_sleep(K_MSEC(1)); /* Give thread_12 a chance to block on the mutex */ sys_mutex_unlock(&private_mutex); - sys_mutex_unlock(&private_mutex); /* thread_12 should now have lock */ + sys_mutex_unlock(&private_mutex); + k_sleep(K_TICKS(1)); /* Let thread_12 run */ rv = sys_mutex_lock(&private_mutex, K_NO_WAIT); zassert_equal(rv, -EBUSY, "Unexpectedly got lock on private mutex"); diff --git a/tests/kernel/mutex/sys_mutex/src/thread_competition.c b/tests/kernel/mutex/sys_mutex/src/thread_competition.c index 4501127918cb1..e8e60ceb3197a 100644 --- a/tests/kernel/mutex/sys_mutex/src/thread_competition.c +++ b/tests/kernel/mutex/sys_mutex/src/thread_competition.c @@ -118,6 +118,8 @@ ZTEST(mutex_complex, test_mutex_multithread_competition) &mutex, NULL, NULL, prio + 2, 0, K_NO_WAIT); + k_sleep(K_TICKS(1)); + /* Release mutex by current thread */ sys_mutex_unlock(&mutex); From d5814ea5d439c2e4408fcb33a361e035f3032d96 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Mon, 7 Nov 2022 14:18:26 -0800 Subject: [PATCH 07/18] tests/kernel/sys_mutex: Remove noop userspace test This test was merely looking for an -EACCES return from sys_mutex calls on a local sys_mutex struct. But nothing in the code ever used that struct, it just wrapped a kernel k_mutex. Zync doesn't synthesize this error, and there's no reason it should. Instead it offers (via plain old k_mutex) the "locks are atomic userspace access" feature sys_mutex always should have. Just remove the test. Signed-off-by: Andy Ross --- tests/kernel/mutex/sys_mutex/src/main.c | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/tests/kernel/mutex/sys_mutex/src/main.c b/tests/kernel/mutex/sys_mutex/src/main.c index fe46227300709..4e749202849c6 100644 --- a/tests/kernel/mutex/sys_mutex/src/main.c +++ b/tests/kernel/mutex/sys_mutex/src/main.c @@ -62,9 +62,6 @@ ZTEST_BMEM SYS_MUTEX_DEFINE(mutex_2); ZTEST_BMEM SYS_MUTEX_DEFINE(mutex_3); ZTEST_BMEM SYS_MUTEX_DEFINE(mutex_4); -#ifdef CONFIG_USERSPACE -static SYS_MUTEX_DEFINE(no_access_mutex); -#endif static ZTEST_BMEM SYS_MUTEX_DEFINE(not_my_mutex); static ZTEST_BMEM SYS_MUTEX_DEFINE(bad_count_mutex); @@ -444,20 +441,6 @@ ZTEST(mutex_complex, test_supervisor_access) zassert_true(rv == -EINVAL, "mutex wasn't locked"); } -ZTEST_USER_OR_NOT(mutex_complex, test_user_access) -{ -#ifdef CONFIG_USERSPACE - int rv; - - rv = sys_mutex_lock(&no_access_mutex, K_NO_WAIT); - zassert_true(rv == -EACCES, "accessed mutex not in memory domain"); - rv = sys_mutex_unlock(&no_access_mutex); - zassert_true(rv == -EACCES, "accessed mutex not in memory domain"); -#else - ztest_test_skip(); -#endif /* CONFIG_USERSPACE */ -} - /*test case main entry*/ static void *sys_mutex_tests_setup(void) { From 648a8e17f6fe466f96ab613279b2074bd35d7859 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Mon, 7 Nov 2022 14:33:32 -0800 Subject: [PATCH 08/18] tests/kernel/sched/metairq: Correct scheduling point behavior Another spot where a test was depending on the undocumented behavior of semaphore operations being a scheduling point. That's no longer true with zync. Add a yield to get the thread sequencing to match expectations. Signed-off-by: Andy Ross --- tests/kernel/sched/metairq/src/main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/kernel/sched/metairq/src/main.c b/tests/kernel/sched/metairq/src/main.c index 89b54f7ba7742..497fa562e9965 100644 --- a/tests/kernel/sched/metairq/src/main.c +++ b/tests/kernel/sched/metairq/src/main.c @@ -190,6 +190,7 @@ ZTEST(suite_preempt_metairq, test_preempt_metairq) /* Kick off meta-IRQ */ k_sem_give(&metairq_sem); + k_yield(); /* Wait for all threads to finish */ k_sem_take(&coop_sem2, K_FOREVER); From 8ea69284282716593a75a6484ac0ec096166e9e8 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Sat, 2 Jul 2022 17:53:32 -0400 Subject: [PATCH 09/18] kernel: Add k_zync, a universal Zephyr synchronization primitive. A k_zync object is intended to represent the core kernel synchronization mechanism in the Zephyr RTOS. In general it will be used via wrapper APIs implementing more commonly-understood idioms (e.g. k_mutex, k_sem, k_condvar). The zync object is always used in tandem with one or more k_zync_atom structs, which store a guaranteed-atomic value. The expectation is that those values will be used by an outer layer to implement lockless behavior for simple/"uncontended" special cases, falling through into a k_zync() call only when threads need to be suspended or awoken. Signed-off-by: Andy Ross --- cmake/linker_script/common/common-ram.cmake | 2 + drivers/bluetooth/hci/hci_b91.c | 2 +- drivers/bluetooth/hci/hci_esp32.c | 2 +- drivers/bluetooth/hci/ipm_stm32wb.c | 8 +- drivers/bluetooth/hci/rpmsg.c | 2 +- drivers/bluetooth/hci/spi.c | 6 +- .../ec_host_cmd_simulator.c | 4 +- drivers/modem/hl7800.c | 6 +- drivers/net/ppp.c | 2 +- drivers/wifi/simplelink/simplelink.c | 2 +- include/zephyr/kernel.h | 318 +++++++++---- include/zephyr/kernel_includes.h | 1 + include/zephyr/linker/common-ram.ld | 2 + include/zephyr/rtio/rtio.h | 4 +- include/zephyr/sys/zync.h | 390 +++++++++++++++ kernel/CMakeLists.txt | 4 +- kernel/Kconfig | 92 ++++ kernel/condvar.c | 127 ----- kernel/mutex.c | 283 ----------- kernel/poll.c | 39 +- kernel/sem.c | 195 -------- kernel/zync.c | 447 ++++++++++++++++++ lib/libc/picolibc/libc-hooks.c | 4 +- lib/os/fdtable.c | 2 +- lib/os/mutex.c | 19 +- .../platform/nrf_802154_spinel_backend_ipc.c | 4 +- .../nrf_802154_spinel_response_notifier.c | 2 +- .../trusted-firmware-m/interface/interface.c | 2 +- .../bluetooth/broadcast_audio_sink/src/main.c | 10 +- .../broadcast_audio_source/src/main.c | 4 +- samples/bluetooth/central_past/src/main.c | 8 +- .../src/main.c | 6 +- samples/bluetooth/hci_rpmsg/src/main.c | 2 +- samples/bluetooth/hci_spi/src/main.c | 4 +- samples/bluetooth/iso_broadcast/src/main.c | 4 +- .../iso_broadcast_benchmark/src/broadcaster.c | 4 +- .../iso_broadcast_benchmark/src/receiver.c | 12 +- .../iso_connected_benchmark/src/main.c | 12 +- samples/bluetooth/iso_receive/src/main.c | 12 +- samples/bluetooth/mesh_demo/src/main.c | 2 +- samples/bluetooth/periodic_sync/src/main.c | 6 +- samples/bluetooth/peripheral_past/src/main.c | 4 +- .../bluetooth/unicast_audio_client/src/main.c | 20 +- .../bluetooth/unicast_audio_server/src/main.c | 2 +- samples/boards/bbc_microbit/pong/src/main.c | 2 +- samples/boards/nrf/nrfx_prs/src/main.c | 4 +- samples/drivers/i2s/echo/src/main.c | 2 +- .../condition_variables/simple/src/main.c | 2 +- samples/net/cloud/google_iot_mqtt/src/dhcp.c | 2 +- samples/net/cloud/mqtt_azure/src/main.c | 2 +- samples/net/sockets/packet/src/packet.c | 2 +- samples/sensor/fxos8700-hid/src/main.c | 2 +- .../static_vrings/remote/src/main.c | 12 +- .../ipc/ipc_service/static_vrings/src/main.c | 12 +- samples/subsys/ipc/openamp/remote/src/main.c | 6 +- samples/subsys/ipc/openamp/src/main.c | 6 +- .../ipc/openamp_rsc_table/src/main_remote.c | 6 +- .../ipc/rpmsg_service/remote/src/main.c | 4 +- samples/subsys/ipc/rpmsg_service/src/main.c | 2 +- .../multidomain/remote/src/ipc_service.c | 4 +- .../logging/multidomain/src/ipc_service.c | 4 +- samples/subsys/mgmt/hawkbit/src/dhcp.c | 2 +- samples/subsys/usb/hid-cdc/src/main.c | 4 +- samples/subsys/usb/hid-mouse/src/main.c | 2 +- samples/userspace/shared_mem/prj.conf | 1 + scripts/build/gen_kobject_list.py | 25 +- subsys/bluetooth/host/smp.c | 2 +- subsys/net/ip/dhcpv4.c | 2 +- subsys/net/ip/net_if.c | 2 +- subsys/net/ip/net_mgmt.c | 4 +- subsys/net/ip/route.c | 2 +- subsys/net/ip/tcp.c | 2 +- subsys/net/lib/capture/capture.c | 2 +- subsys/net/lib/config/init.c | 4 +- subsys/net/lib/lwm2m/lwm2m_pull_context.c | 2 +- subsys/net/lib/lwm2m/lwm2m_registry.c | 2 +- subsys/net/lib/sockets/socket_dispatcher.c | 2 +- .../cmsis_rtos_v1/cmsis_semaphore.c | 2 +- subsys/portability/cmsis_rtos_v2/mutex.c | 7 +- subsys/portability/cmsis_rtos_v2/semaphore.c | 2 +- subsys/random/rand32_ctr_drbg.c | 2 +- subsys/testsuite/ztest/src/ztest_new.c | 3 +- subsys/tracing/tracing_core.c | 2 +- .../bsim_bt/bsim_test_adv_chain/src/main.c | 2 +- .../bsim_test_audio/src/broadcast_sink_test.c | 4 +- .../src/broadcast_source_test.c | 4 +- .../bsim_test_l2cap/src/main_l2cap_ecred.c | 8 +- .../bsim_test_mesh/src/test_advertiser.c | 2 +- tests/bluetooth/hci_prop_evt/src/main.c | 2 +- tests/bluetooth/host_long_adv_recv/src/main.c | 2 +- tests/boards/intel_adsp/smoke/src/ipm.c | 2 +- tests/drivers/spi/spi_loopback/src/spi.c | 2 +- tests/kernel/condvar/condvar_api/prj.conf | 1 + tests/kernel/device/prj.conf | 1 + .../events/event_api/src/test_event_apis.c | 4 +- tests/kernel/events/sys_event/src/main.c | 4 +- .../fpu_sharing/generic/src/load_store.c | 2 +- tests/kernel/fpu_sharing/generic/src/pi.c | 2 +- tests/kernel/mem_protect/futex/prj.conf | 1 + tests/kernel/mem_protect/mem_protect/prj.conf | 2 + .../mem_protect/mem_protect/src/mem_domain.c | 2 +- .../mem_protect/obj_validation/prj.conf | 1 + tests/kernel/mem_protect/sys_sem/prj.conf | 1 + tests/kernel/mem_protect/userspace/prj.conf | 1 + tests/kernel/msgq/msgq_api/prj.conf | 1 + tests/kernel/mutex/mutex_api/prj.conf | 2 + .../mutex/mutex_api/src/test_mutex_apis.c | 5 + tests/kernel/mutex/mutex_error_case/prj.conf | 1 + tests/kernel/mutex/sys_mutex/prj.conf | 4 + tests/kernel/pipe/pipe/prj.conf | 1 + tests/kernel/pipe/pipe_api/prj.conf | 1 + tests/kernel/poll/prj.conf | 2 + tests/kernel/poll/src/test_poll.c | 8 +- tests/kernel/queue/prj.conf | 1 + tests/kernel/sched/schedule_api/prj.conf | 1 + tests/kernel/sched/schedule_api/prj_dumb.conf | 1 + .../kernel/sched/schedule_api/prj_multiq.conf | 1 + .../src/test_priority_scheduling.c | 6 +- .../schedule_api/src/test_slice_scheduling.c | 4 +- tests/kernel/semaphore/semaphore/prj.conf | 1 + tests/kernel/semaphore/sys_sem/prj.conf | 1 + tests/kernel/sleep/prj.conf | 1 + tests/kernel/stack/stack/prj.conf | 1 + tests/kernel/threads/dynamic_thread/prj.conf | 1 + .../kernel/threads/dynamic_thread/src/main.c | 4 +- tests/kernel/threads/thread_init/prj.conf | 1 + tests/kernel/workq/user_work/prj.conf | 1 + tests/kernel/workq/work/prj.conf | 1 + tests/lib/mem_alloc/prj.conf | 1 + tests/net/checksum_offload/src/main.c | 2 +- tests/net/ptp/clock/src/main.c | 2 +- .../net/socket/offload_dispatcher/src/main.c | 2 +- tests/net/tcp/src/main.c | 2 +- tests/net/tx_timestamp/src/main.c | 2 +- tests/net/virtual/src/main.c | 2 +- tests/net/vlan/src/main.c | 2 +- .../logging/log_benchmark/testcase.yaml | 1 + .../log_core_additional/src/log_test.c | 2 +- .../subsys/portability/cmsis_rtos_v1/prj.conf | 3 + .../portability/cmsis_rtos_v1/src/mutex.c | 2 +- .../subsys/portability/cmsis_rtos_v2/prj.conf | 3 + tests/subsys/rtio/rtio_api/testcase.yaml | 1 + 142 files changed, 1434 insertions(+), 929 deletions(-) create mode 100644 include/zephyr/sys/zync.h delete mode 100644 kernel/condvar.c delete mode 100644 kernel/mutex.c delete mode 100644 kernel/sem.c create mode 100644 kernel/zync.c diff --git a/cmake/linker_script/common/common-ram.cmake b/cmake/linker_script/common/common-ram.cmake index 03c7960105ef7..ac8718bcb2e11 100644 --- a/cmake/linker_script/common/common-ram.cmake +++ b/cmake/linker_script/common/common-ram.cmake @@ -57,6 +57,8 @@ zephyr_iterable_section(NAME k_pipe GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SU zephyr_iterable_section(NAME k_sem GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4) zephyr_iterable_section(NAME k_queue GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4) zephyr_iterable_section(NAME k_condvar GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4) +zephyr_iterable_section(NAME k_zync GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4) +zephyr_iterable_section(NAME z_zync_pair GROUP DATA_REGION ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4) zephyr_linker_section(NAME _net_buf_pool_area GROUP DATA_REGION NOINPUT ${XIP_ALIGN_WITH_INPUT} SUBALIGN 4) zephyr_linker_section_configure(SECTION _net_buf_pool_area diff --git a/drivers/bluetooth/hci/hci_b91.c b/drivers/bluetooth/hci/hci_b91.c index 156eacd8606fe..2a0c91722af68 100644 --- a/drivers/bluetooth/hci/hci_b91.c +++ b/drivers/bluetooth/hci/hci_b91.c @@ -19,7 +19,7 @@ #define HCI_BT_B91_TIMEOUT K_MSEC(2000) -static K_SEM_DEFINE(hci_send_sem, 1, 1); +K_SEM_STATIC_DEFINE(hci_send_sem, 1, 1); static bool is_hci_event_discardable(const uint8_t *evt_data) { diff --git a/drivers/bluetooth/hci/hci_esp32.c b/drivers/bluetooth/hci/hci_esp32.c index 93adac2601610..a148eabbdb300 100644 --- a/drivers/bluetooth/hci/hci_esp32.c +++ b/drivers/bluetooth/hci/hci_esp32.c @@ -23,7 +23,7 @@ #define HCI_BT_ESP32_TIMEOUT K_MSEC(2000) -static K_SEM_DEFINE(hci_send_sem, 1, 1); +K_SEM_STATIC_DEFINE(hci_send_sem, 1, 1); static bool is_hci_event_discardable(const uint8_t *evt_data) { diff --git a/drivers/bluetooth/hci/ipm_stm32wb.c b/drivers/bluetooth/hci/ipm_stm32wb.c index 95b08de9a6388..be4fe8bc5b53f 100644 --- a/drivers/bluetooth/hci/ipm_stm32wb.c +++ b/drivers/bluetooth/hci/ipm_stm32wb.c @@ -49,10 +49,10 @@ static void sysevt_received(void *pdata); #define STM32WB_C2_LOCK_TIMEOUT K_MSEC(500) -static K_SEM_DEFINE(c2_started, 0, 1); -static K_SEM_DEFINE(ble_sys_wait_cmd_rsp, 0, 1); -static K_SEM_DEFINE(acl_data_ack, 1, 1); -static K_SEM_DEFINE(ipm_busy, 1, 1); +K_SEM_STATIC_DEFINE(c2_started, 0, 1); +K_SEM_STATIC_DEFINE(ble_sys_wait_cmd_rsp, 0, 1); +K_SEM_STATIC_DEFINE(acl_data_ack, 1, 1); +K_SEM_STATIC_DEFINE(ipm_busy, 1, 1); struct aci_set_tx_power { uint8_t cmd; diff --git a/drivers/bluetooth/hci/rpmsg.c b/drivers/bluetooth/hci/rpmsg.c index 4c3bcde6c3a8f..d04c02141ec62 100644 --- a/drivers/bluetooth/hci/rpmsg.c +++ b/drivers/bluetooth/hci/rpmsg.c @@ -27,7 +27,7 @@ #define IPC_BOUND_TIMEOUT_IN_MS K_MSEC(1000) static struct ipc_ept hci_ept; -static K_SEM_DEFINE(ipc_bound_sem, 0, 1); +K_SEM_STATIC_DEFINE(ipc_bound_sem, 0, 1); static bool is_hci_event_discardable(const uint8_t *evt_data) { diff --git a/drivers/bluetooth/hci/spi.c b/drivers/bluetooth/hci/spi.c index a8cd2269c16dc..eb4a35ea2e818 100644 --- a/drivers/bluetooth/hci/spi.c +++ b/drivers/bluetooth/hci/spi.c @@ -64,9 +64,9 @@ static const struct gpio_dt_spec rst_gpio = GPIO_DT_SPEC_INST_GET(0, reset_gpios static struct gpio_callback gpio_cb; -static K_SEM_DEFINE(sem_initialised, 0, 1); -static K_SEM_DEFINE(sem_request, 0, 1); -static K_SEM_DEFINE(sem_busy, 1, 1); +K_SEM_STATIC_DEFINE(sem_initialised, 0, 1); +K_SEM_STATIC_DEFINE(sem_request, 0, 1); +K_SEM_STATIC_DEFINE(sem_busy, 1, 1); static K_KERNEL_STACK_DEFINE(spi_rx_stack, 512); static struct k_thread spi_rx_thread_data; diff --git a/drivers/ec_host_cmd_periph/ec_host_cmd_simulator.c b/drivers/ec_host_cmd_periph/ec_host_cmd_simulator.c index 5c6194f6a2f03..f7e2e023ec17a 100644 --- a/drivers/ec_host_cmd_periph/ec_host_cmd_simulator.c +++ b/drivers/ec_host_cmd_periph/ec_host_cmd_simulator.c @@ -21,8 +21,8 @@ static uint8_t rx_buffer[256]; static size_t rx_buffer_len; /* Allow writing to rx buff at startup and block on reading. */ -static K_SEM_DEFINE(handler_owns, 0, 1); -static K_SEM_DEFINE(dev_owns, 1, 1); +K_SEM_STATIC_DEFINE(handler_owns, 0, 1); +K_SEM_STATIC_DEFINE(dev_owns, 1, 1); static ec_host_cmd_periph_api_send tx; diff --git a/drivers/modem/hl7800.c b/drivers/modem/hl7800.c index 01b82da0e7753..86baae7bd21f3 100644 --- a/drivers/modem/hl7800.c +++ b/drivers/modem/hl7800.c @@ -349,9 +349,9 @@ NET_BUF_POOL_DEFINE(mdm_recv_pool, CONFIG_MODEM_HL7800_RECV_BUF_CNT, static uint8_t mdm_recv_buf[MDM_MAX_DATA_LENGTH]; -static K_SEM_DEFINE(hl7800_RX_lock_sem, 1, 1); -static K_SEM_DEFINE(hl7800_TX_lock_sem, 1, 1); -static K_MUTEX_DEFINE(cb_lock); +K_SEM_STATIC_DEFINE(hl7800_RX_lock_sem, 1, 1); +K_SEM_STATIC_DEFINE(hl7800_TX_lock_sem, 1, 1); +K_MUTEX_STATIC_DEFINE(cb_lock); /* RX thread structures */ K_THREAD_STACK_DEFINE(hl7800_rx_stack, CONFIG_MODEM_HL7800_RX_STACK_SIZE); diff --git a/drivers/net/ppp.c b/drivers/net/ppp.c index ff003c6bc8afb..fafd9771170de 100644 --- a/drivers/net/ppp.c +++ b/drivers/net/ppp.c @@ -109,7 +109,7 @@ static bool rx_retry_pending; static bool uart_recovery_pending; static uint8_t *next_buf; -static K_SEM_DEFINE(uarte_tx_finished, 0, 1); +K_SEM_STATIC_DEFINE(uarte_tx_finished, 0, 1); static void uart_callback(const struct device *dev, struct uart_event *evt, diff --git a/drivers/wifi/simplelink/simplelink.c b/drivers/wifi/simplelink/simplelink.c index a60ac990b6367..abc17e6435617 100644 --- a/drivers/wifi/simplelink/simplelink.c +++ b/drivers/wifi/simplelink/simplelink.c @@ -40,7 +40,7 @@ struct simplelink_data { }; static struct simplelink_data simplelink_data; -static K_SEM_DEFINE(ip_acquired, 0, 1); +K_SEM_STATIC_DEFINE(ip_acquired, 0, 1); /* Handle connection events from the SimpleLink Event Handlers: */ static void simplelink_wifi_cb(uint32_t event, struct sl_connect_state *conn) diff --git a/include/zephyr/kernel.h b/include/zephyr/kernel.h index ff4f0d7e919de..6e8151f424893 100644 --- a/include/zephyr/kernel.h +++ b/include/zephyr/kernel.h @@ -2718,34 +2718,14 @@ extern struct k_work_q k_sys_work_q; * @ingroup mutex_apis */ struct k_mutex { - /** Mutex wait queue */ - _wait_q_t wait_q; - /** Mutex owner */ - struct k_thread *owner; - - /** Current lock count */ - uint32_t lock_count; - - /** Original thread priority */ - int owner_orig_prio; - - SYS_PORT_TRACING_TRACKING_FIELD(k_mutex) + struct z_zync_pair zp; }; -/** - * @cond INTERNAL_HIDDEN - */ -#define Z_MUTEX_INITIALIZER(obj) \ - { \ - .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \ - .owner = NULL, \ - .lock_count = 0, \ - .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \ - } +#define K_OBJ_MUTEX K_OBJ_ZYNC -/** - * INTERNAL_HIDDEN @endcond - */ +#ifdef Z_ZYNC_INTERNAL_ATOM +#define Z_MUTEX_INITIALIZER(obj) { Z_ZYNCP_INITIALIZER(1, true, true, true, 1) } +#endif /** * @brief Statically define and initialize a mutex. @@ -2756,9 +2736,33 @@ struct k_mutex { * * @param name Name of the mutex. */ -#define K_MUTEX_DEFINE(name) \ - STRUCT_SECTION_ITERABLE(k_mutex, name) = \ - Z_MUTEX_INITIALIZER(name) +#define K_MUTEX_DEFINE(name) \ + Z_ZYNCP_DEFINE(_z_##name, 1, true, true, true, 1); \ + extern struct k_mutex name ALIAS_OF(_z_##name); + +/** + * @brief Statically define and initialize a local mutex. + * + * As for K_MUTEX_DEFINE, but the resulting symbol is static and + * cannot be used outside the current translation unit. + * + * @param name Name of the mutex. + */ +#define K_MUTEX_STATIC_DEFINE(name) \ + Z_ZYNCP_DEFINE(_z_##name, 1, true, true, true, 1); \ + static struct k_mutex name ALIAS_OF(_z_##name); + +/** @brief Define a mutex for use from a specific memory domain + * + * As for K_MUTEX_DEFINE, but places the (fast!) k_zync_atom_t in the + * specific app shared memory partition, allowing kernel-free + * operation for uncontended use cases. Note that such a mutex will + * still require system call operations if CONFIG_ZYNC_PRIO_BOOST=y or + * CONFIG_ZYNC_RECURSIVE=y. + */ +#define K_MUTEX_USER_DEFINE(name, part) \ + Z_ZYNCP_USER_DEFINE(_zm_##name, part, 1, true, true, true, 1); \ + extern struct k_mutex name ALIAS_OF(_zm_##name); /** * @brief Initialize a mutex. @@ -2772,8 +2776,18 @@ struct k_mutex { * @retval 0 Mutex object created * */ -__syscall int k_mutex_init(struct k_mutex *mutex); +static inline int k_mutex_init(struct k_mutex *mutex) +{ + struct k_zync_cfg cfg = { + .atom_init = 1, + .fair = true, + IF_ENABLED(CONFIG_ZYNC_PRIO_BOOST, (.prio_boost = true,)) + IF_ENABLED(CONFIG_ZYNC_RECURSIVE, (.recursive = true,)) + }; + z_pzync_init(&mutex->zp, &cfg); + return 0; +} /** * @brief Lock a mutex. @@ -2782,10 +2796,12 @@ __syscall int k_mutex_init(struct k_mutex *mutex); * the calling thread waits until the mutex becomes available or until * a timeout occurs. * - * A thread is permitted to lock a mutex it has already locked. The operation - * completes immediately and the lock count is increased by 1. + * If CONFIG_ZYNC_RECURSIVE=y, a thread is permitted to lock a mutex + * it has already locked. The operation completes immediately and the + * lock count is increased by 1. * - * Mutexes may not be locked in ISRs. + * Mutexes may be used in ISRs, though blocking is not possible and + * the only valid timeout parameter is K_NO_WAIT. * * @param mutex Address of the mutex. * @param timeout Waiting period to lock the mutex, @@ -2796,7 +2812,16 @@ __syscall int k_mutex_init(struct k_mutex *mutex); * @retval -EBUSY Returned without waiting. * @retval -EAGAIN Waiting period timed out. */ -__syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout); +static inline int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout) +{ +#if defined(CONFIG_ZYNC_RECURSIVE) && defined(CONFIG_ZYNC_VALIDATE) + __ASSERT_NO_MSG(Z_PAIR_ZYNC(&mutex->zp)->cfg.recursive); +#endif +#if defined(CONFIG_ZYNC_PRIO_BOOST) && defined(CONFIG_ZYNC_VALIDATE) + __ASSERT_NO_MSG(Z_PAIR_ZYNC(&mutex->zp)->cfg.prio_boost); +#endif + return z_pzyncmod(&mutex->zp, -1, timeout); +} /** * @brief Unlock a mutex. @@ -2804,12 +2829,9 @@ __syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout); * This routine unlocks @a mutex. The mutex must already be locked by the * calling thread. * - * The mutex cannot be claimed by another thread until it has been unlocked by - * the calling thread as many times as it was previously locked by that - * thread. - * - * Mutexes may not be unlocked in ISRs, as mutexes must only be manipulated - * in thread context due to ownership and priority inheritance semantics. + * The mutex cannot be claimed by another thread until it has been + * unlocked by the calling thread (if recursive locking is enabled, as + * many times as it was previously locked by that thread). * * @param mutex Address of the mutex. * @@ -2818,21 +2840,37 @@ __syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout); * @retval -EINVAL The mutex is not locked * */ -__syscall int k_mutex_unlock(struct k_mutex *mutex); +static inline int k_mutex_unlock(struct k_mutex *mutex) +{ +#ifdef CONFIG_ZYNC_VALIDATE + __ASSERT(Z_PAIR_ATOM(&mutex->zp)->val == 0, "mutex not locked"); +#endif +#ifdef Z_ZYNC_ALWAYS_KERNEL + /* Synthesize "soft failure" return codes. Needed by current + * tests, consider wrapping into ZYNC_VALIDATE. + */ + int32_t ret = z_zync_unlock_ok(Z_PAIR_ZYNC(&mutex->zp)); + + if (ret != 0) { + return ret; + } +#endif + return z_pzyncmod(&mutex->zp, 1, K_NO_WAIT); +} /** * @} */ - struct k_condvar { - _wait_q_t wait_q; + struct z_zync_pair zp; }; -#define Z_CONDVAR_INITIALIZER(obj) \ - { \ - .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \ - } +#define K_OBJ_CONDVAR K_OBJ_ZYNC + +#ifdef Z_ZYNC_INTERNAL_ATOM +#define Z_CONDVAR_INITIALIZER(obj) { Z_ZYNCP_INITIALIZER(0, true, false, false, 0) } +#endif /** * @defgroup condvar_apis Condition Variables APIs @@ -2846,24 +2884,38 @@ struct k_condvar { * @param condvar pointer to a @p k_condvar structure * @retval 0 Condition variable created successfully */ -__syscall int k_condvar_init(struct k_condvar *condvar); +static inline int k_condvar_init(struct k_condvar *condvar) +{ + struct k_zync_cfg cfg = { .fair = true }; + + z_pzync_init(&condvar->zp, &cfg); + return 0; +} /** * @brief Signals one thread that is pending on the condition variable * - * @param condvar pointer to a @p k_condvar structure + * @param cv pointer to a @p k_condvar structure * @retval 0 On success */ -__syscall int k_condvar_signal(struct k_condvar *condvar); +static inline int k_condvar_signal(struct k_condvar *cv) +{ + k_zync(Z_PAIR_ZYNC(&cv->zp), Z_PAIR_ATOM(&cv->zp), true, 1, K_NO_WAIT); + return 0; +} /** * @brief Unblock all threads that are pending on the condition * variable * - * @param condvar pointer to a @p k_condvar structure + * @param cv pointer to a @p k_condvar structure * @return An integer with number of woken threads on success */ -__syscall int k_condvar_broadcast(struct k_condvar *condvar); +static inline int k_condvar_broadcast(struct k_condvar *cv) +{ + return k_zync(Z_PAIR_ZYNC(&cv->zp), Z_PAIR_ATOM(&cv->zp), true, + K_ZYNC_ATOM_VAL_MAX, K_NO_WAIT); +} /** * @brief Waits on the condition variable releasing the mutex lock @@ -2882,8 +2934,18 @@ __syscall int k_condvar_broadcast(struct k_condvar *condvar); * @retval 0 On success * @retval -EAGAIN Waiting period timed out. */ -__syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex, - k_timeout_t timeout); +static inline int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex, + k_timeout_t timeout) +{ + int ret = z_pzync_condwait(&condvar->zp, &mutex->zp, timeout); + + /* K_FOREVER (i.e. ignoring the user timeout) is the way this + * was coded originally, and we actually have a test that + * fails if we pass it K_NO_WAIT here. Seems surprising... + */ + (void) k_mutex_lock(mutex, K_FOREVER); + return ret; +} /** * @brief Statically define and initialize a condition variable. @@ -2895,9 +2957,22 @@ __syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex, * * @param name Name of the condition variable. */ -#define K_CONDVAR_DEFINE(name) \ - STRUCT_SECTION_ITERABLE(k_condvar, name) = \ - Z_CONDVAR_INITIALIZER(name) +#define K_CONDVAR_DEFINE(name) \ + Z_ZYNCP_DEFINE(_zc_##name, 0, true, false, false, 0); \ + extern struct k_condvar name ALIAS_OF(_zc_##name); + +/** @brief Define a condition variable for use from a specific memory domain + * + * As for K_CONDVAR_DEFINE, but places the (fast!) k_zync_atom_t in + * the specific app shared memory partition, allowing kernel-free + * operation for uncontended use cases. Note that such a condvar will + * still require system call operations if CONFIG_ZYNC_PRIO_BOOST=y or + * CONFIG_ZYNC_RECURSIVE=y. + */ +#define K_CONDVAR_USER_DEFINE(name, part) \ + Z_ZYNCP_USER_DEFINE(_zc_##name, part, 0, true, false, false, 0) \ + extern struct k_condvar name ALIAS_OF(_zc_##name); + /** * @} */ @@ -2907,23 +2982,22 @@ __syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex, */ struct k_sem { - _wait_q_t wait_q; - unsigned int count; - unsigned int limit; - - _POLL_EVENT; - - SYS_PORT_TRACING_TRACKING_FIELD(k_sem) + struct z_zync_pair zp; + /* Workaround for an whiteboxed field used in upstream + * libmetal, thankfully not in a way exercised by Zephyr. Can + * be removed when upstream is patched to use proper k_sem + * APIs + */ + IF_ENABLED(CONFIG_LIBMETAL, (int8_t count;)) }; +#define K_OBJ_SEM K_OBJ_ZYNC + +#ifdef Z_ZYNC_INTERNAL_ATOM #define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \ - { \ - .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \ - .count = initial_count, \ - .limit = count_limit, \ - _POLL_EVENT_OBJ_INIT(obj) \ - } + { Z_ZYNCP_INITIALIZER(initial_count, true, false, false, count_limit) } +#endif /** * INTERNAL_HIDDEN @endcond @@ -2943,7 +3017,7 @@ struct k_sem { * counting purposes. * */ -#define K_SEM_MAX_LIMIT UINT_MAX +#define K_SEM_MAX_LIMIT K_ZYNC_ATOM_VAL_MAX /** * @brief Initialize a semaphore. @@ -2960,8 +3034,25 @@ struct k_sem { * @retval -EINVAL Invalid values * */ -__syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count, - unsigned int limit); +static inline int k_sem_init(struct k_sem *sem, unsigned int initial_count, + unsigned int limit) +{ + limit = limit > K_SEM_MAX_LIMIT ? K_SEM_MAX_LIMIT : limit; + + struct k_zync_cfg cfg = { + .atom_init = initial_count, + .fair = true, + IF_ENABLED(CONFIG_ZYNC_MAX_VAL, (.max_val = limit,)) + }; + + if (limit > K_ZYNC_ATOM_VAL_MAX || limit == 0 || initial_count > limit) { + return -EINVAL; + } + + z_pzync_init(&sem->zp, &cfg); + + return 0; +} /** * @brief Take a semaphore. @@ -2981,7 +3072,10 @@ __syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count, * @retval -EAGAIN Waiting period timed out, * or the semaphore was reset during the waiting period. */ -__syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout); +static inline int k_sem_take(struct k_sem *sem, k_timeout_t timeout) +{ + return z_pzyncmod(&sem->zp, -1, timeout); +} /** * @brief Give a semaphore. @@ -2993,7 +3087,10 @@ __syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout); * * @param sem Address of the semaphore. */ -__syscall void k_sem_give(struct k_sem *sem); +static inline void k_sem_give(struct k_sem *sem) +{ + z_pzyncmod(&sem->zp, 1, K_NO_WAIT); +} /** * @brief Resets a semaphore's count to zero. @@ -3004,25 +3101,33 @@ __syscall void k_sem_give(struct k_sem *sem); * * @param sem Address of the semaphore. */ -__syscall void k_sem_reset(struct k_sem *sem); +static inline void k_sem_reset(struct k_sem *sem) +{ + k_zync_reset(Z_PAIR_ZYNC(&sem->zp), Z_PAIR_ATOM(&sem->zp)); +} /** * @brief Get a semaphore's count. * * This routine returns the current count of @a sem. * + * @note The nature of semaphores is to be used in asynchronous + * contexts. The use of this API is very likely to be subject to + * unavoidable race conditions without an exterior layer of locking + * provided by the app. Users tempted by this call should strongly + * consider condition variables instead. + * * @param sem Address of the semaphore. * * @return Current semaphore count. */ -__syscall unsigned int k_sem_count_get(struct k_sem *sem); - -/** - * @internal - */ -static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem) +static inline unsigned int k_sem_count_get(struct k_sem *sem) { - return sem->count; +#ifdef Z_ZYNC_INTERNAL_ATOM + return z_zync_atom_val(Z_PAIR_ZYNC(&sem->zp)); +#else + return sem->zp.atom.val; +#endif } /** @@ -3036,12 +3141,28 @@ static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem) * @param initial_count Initial semaphore count. * @param count_limit Maximum permitted semaphore count. */ -#define K_SEM_DEFINE(name, initial_count, count_limit) \ - STRUCT_SECTION_ITERABLE(k_sem, name) = \ - Z_SEM_INITIALIZER(name, initial_count, count_limit); \ - BUILD_ASSERT(((count_limit) != 0) && \ - ((initial_count) <= (count_limit)) && \ - ((count_limit) <= K_SEM_MAX_LIMIT)); +#define K_SEM_DEFINE(name, initial_count, count_limit) \ + Z_ZYNCP_DEFINE(_z_##name, initial_count, true, false, false, count_limit); \ + extern struct k_sem name ALIAS_OF(_z_##name); + +/** + * @brief Statically define and initialize a local semaphore. + * + * As for K_SEM_DEFINE(), but defines the resulting symbol as static, + * such that it cannot be used outside the local translation unit. + * + * @param name Name of the semaphore. + * @param initial_count Initial semaphore count. + * @param count_limit Maximum permitted semaphore count. + */ +#define K_SEM_STATIC_DEFINE(name, initial_count, count_limit) \ + Z_ZYNCP_DEFINE(_z_##name, initial_count, true, false, false, count_limit); \ + static struct k_sem name ALIAS_OF(_z_##name); + +#define K_SEM_USER_DEFINE(name, part, initial_count, count_limit) \ + Z_ZYNCP_USER_DEFINE(_z_##name, part, initial_count, \ + true, true, true, count_limit); \ + extern struct k_sem name ALIAS_OF(_z_##name); /** @} */ @@ -5300,9 +5421,6 @@ enum _poll_types_bits { /* to be signaled by k_poll_signal_raise() */ _POLL_TYPE_SIGNAL, - /* semaphore availability */ - _POLL_TYPE_SEM_AVAILABLE, - /* queue/FIFO/LIFO data availability */ _POLL_TYPE_DATA_AVAILABLE, @@ -5312,6 +5430,9 @@ enum _poll_types_bits { /* pipe data availability */ _POLL_TYPE_PIPE_DATA_AVAILABLE, + /* zync transitions from 0 to any positive value */ + _POLL_TYPE_ZYNC, + _POLL_NUM_TYPES }; @@ -5325,9 +5446,6 @@ enum _poll_states_bits { /* signaled by k_poll_signal_raise() */ _POLL_STATE_SIGNALED, - /* semaphore is available */ - _POLL_STATE_SEM_AVAILABLE, - /* data is available to read on queue/FIFO/LIFO */ _POLL_STATE_DATA_AVAILABLE, @@ -5340,6 +5458,9 @@ enum _poll_states_bits { /* data is available to read from a pipe */ _POLL_STATE_PIPE_DATA_AVAILABLE, + /* zync has transitioned to positive value */ + _POLL_STATE_ZYNC, + _POLL_NUM_STATES }; @@ -5367,11 +5488,13 @@ enum _poll_states_bits { /* public - values for k_poll_event.type bitfield */ #define K_POLL_TYPE_IGNORE 0 #define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL) -#define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE) #define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE) #define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE #define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE) #define K_POLL_TYPE_PIPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_PIPE_DATA_AVAILABLE) +#define K_POLL_TYPE_ZYNC Z_POLL_TYPE_BIT(_POLL_TYPE_ZYNC) + +#define K_POLL_TYPE_SEM_AVAILABLE K_POLL_TYPE_ZYNC /* public - polling modes */ enum k_poll_modes { @@ -5384,12 +5507,14 @@ enum k_poll_modes { /* public - values for k_poll_event.state bitfield */ #define K_POLL_STATE_NOT_READY 0 #define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED) -#define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE) #define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE) #define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE #define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE) #define K_POLL_STATE_PIPE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_PIPE_DATA_AVAILABLE) #define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED) +#define K_POLL_STATE_ZYNC Z_POLL_STATE_BIT(_POLL_STATE_ZYNC) + +#define K_POLL_STATE_SEM_AVAILABLE K_POLL_STATE_ZYNC /* public - poll signal object */ struct k_poll_signal { @@ -5449,6 +5574,7 @@ struct k_poll_event { #ifdef CONFIG_PIPES struct k_pipe *pipe; #endif + struct k_zync *zync; }; }; diff --git a/include/zephyr/kernel_includes.h b/include/zephyr/kernel_includes.h index 4e19fa1bd962a..fd4e003df652e 100644 --- a/include/zephyr/kernel_includes.h +++ b/include/zephyr/kernel_includes.h @@ -39,6 +39,7 @@ #include #include #include +#include #include #endif /* ZEPHYR_INCLUDE_KERNEL_INCLUDES_H_ */ diff --git a/include/zephyr/linker/common-ram.ld b/include/zephyr/linker/common-ram.ld index 553879ab7d9e0..24bcf39f670d5 100644 --- a/include/zephyr/linker/common-ram.ld +++ b/include/zephyr/linker/common-ram.ld @@ -99,6 +99,8 @@ ITERABLE_SECTION_RAM_GC_ALLOWED(k_event, 4) ITERABLE_SECTION_RAM_GC_ALLOWED(k_queue, 4) ITERABLE_SECTION_RAM_GC_ALLOWED(k_condvar, 4) + ITERABLE_SECTION_RAM_GC_ALLOWED(k_zync, 4) + ITERABLE_SECTION_RAM_GC_ALLOWED(z_zync_pair, 4) SECTION_DATA_PROLOGUE(_net_buf_pool_area,,SUBALIGN(4)) { diff --git a/include/zephyr/rtio/rtio.h b/include/zephyr/rtio/rtio.h index abbd1f4794733..695fc884c3cf2 100644 --- a/include/zephyr/rtio/rtio.h +++ b/include/zephyr/rtio/rtio.h @@ -425,9 +425,9 @@ static inline void rtio_sqe_prep_write(struct rtio_sqe *sqe, */ #define RTIO_DEFINE(name, exec, sq_sz, cq_sz) \ IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, \ - (static K_SEM_DEFINE(_submit_sem_##name, 0, K_SEM_MAX_LIMIT))) \ + (K_SEM_STATIC_DEFINE(_submit_sem_##name, 0, K_SEM_MAX_LIMIT))) \ IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, \ - (static K_SEM_DEFINE(_consume_sem_##name, 0, 1))) \ + (K_SEM_STATIC_DEFINE(_consume_sem_##name, 0, 1))) \ static RTIO_SQ_DEFINE(_sq_##name, sq_sz); \ static RTIO_CQ_DEFINE(_cq_##name, cq_sz); \ STRUCT_SECTION_ITERABLE(rtio, name) = { \ diff --git a/include/zephyr/sys/zync.h b/include/zephyr/sys/zync.h new file mode 100644 index 0000000000000..2bc42ae840aac --- /dev/null +++ b/include/zephyr/sys/zync.h @@ -0,0 +1,390 @@ +/* Copyright (c) 2022 Google LLC. + * SPDX-License-Identifier: Apache-2.0 + */ +#ifndef ZEPHYR_SYS_ZYNC_H +#define ZEPHYR_SYS_ZYNC_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define K_ZYNC_ATOM_VAL_BITS 24 +#define K_ZYNC_ATOM_VAL_MAX ((int32_t)(BIT(K_ZYNC_ATOM_VAL_BITS) - 1)) + +/** @brief Zephyr atomic synchronization primitive + * + * The zync atom stores the counted state variable for a struct + * k_zync, in such a way that it can be atomically modified. + */ +typedef union { + atomic_t atomic; + struct { + uint32_t val : K_ZYNC_ATOM_VAL_BITS; /* Value of the lock/counter */ + bool waiters : 1; /* Threads waiting? */ + }; +} k_zync_atom_t; + +/* True if zyncs must track their "owner" */ +#if defined(CONFIG_ZYNC_PRIO_BOOST) || defined(CONFIG_ZYNC_RECURSIVE) \ + || defined(CONFIG_ZYNC_VALIDATE) +#define Z_ZYNC_OWNER 1 +#endif + +/* True if all zync calls must go through the full kernel call + * (i.e. the atomic shortcut can't be used) + */ +#if defined(CONFIG_ZYNC_RECURSIVE) || defined(CONFIG_ZYNC_MAX_VAL) \ + || defined(CONFIG_ZYNC_PRIO_BOOST) \ + || (defined(CONFIG_ZYNC_USERSPACE_COMPAT) && defined(CONFIG_USERSPACE)) +#define Z_ZYNC_ALWAYS_KERNEL 1 +#endif + +/* True if every k_zync struct includes its own atom (it's not in the + * zync_pair to make all the vrfy boilerplate simpler) + */ +#if defined(Z_ZYNC_ALWAYS_KERNEL) || !defined(CONFIG_USERSPACE) +#define Z_ZYNC_INTERNAL_ATOM 1 +#endif + +struct k_zync_cfg { + uint32_t atom_init : K_ZYNC_ATOM_VAL_BITS; + bool fair : 1; + + IF_ENABLED(CONFIG_ZYNC_PRIO_BOOST, (bool prio_boost : 1;)) + IF_ENABLED(CONFIG_ZYNC_RECURSIVE, (bool recursive : 1;)) + IF_ENABLED(CONFIG_ZYNC_MAX_VAL, (uint32_t max_val;)) +}; + +/* @brief Fundamental Zephyr thread synchronization primitive + * + * @see `k_zync()` + */ +struct k_zync { + _wait_q_t waiters; + + IF_ENABLED(Z_ZYNC_OWNER, (struct k_thread *owner;)) + IF_ENABLED(CONFIG_POLL, (sys_dlist_t poll_events;)) + struct k_spinlock lock; + struct k_zync_cfg cfg; + + IF_ENABLED(CONFIG_ZYNC_RECURSIVE, (uint32_t rec_count;)) + IF_ENABLED(Z_ZYNC_INTERNAL_ATOM, (k_zync_atom_t atom;)) + IF_ENABLED(CONFIG_ZYNC_PRIO_BOOST, (int8_t orig_prio;)) + IF_ENABLED(CONFIG_POLL, (bool pollable;)) +}; + +#define Z_ZYNC_MVCLAMP(v) ((v) == 0 ? K_ZYNC_ATOM_VAL_MAX \ + : CLAMP((v), 0, K_ZYNC_ATOM_VAL_MAX)) + +#define K_ZYNC_INITIALIZER(init, isfair, rec, prioboost, maxval) { \ + .cfg.atom_init = (init), \ + IF_ENABLED(CONFIG_ZYNC_MAX_VAL, \ + (.cfg.max_val = Z_ZYNC_MVCLAMP(maxval),)) \ + IF_ENABLED(CONFIG_ZYNC_RECURSIVE, \ + (.cfg.recursive = (rec),)) \ + IF_ENABLED(CONFIG_ZYNC_PRIO_BOOST, \ + (.cfg.prio_boost = (prioboost),)) \ + IF_ENABLED(CONFIG_POLL, \ + (.pollable = (init != 0),)) \ + .cfg.fair = (isfair) } + +#define K_ZYNC_DEFINE(name, init, isfair, rec, prioboost, maxval) \ + STRUCT_SECTION_ITERABLE(k_zync, name) = \ + K_ZYNC_INITIALIZER((init), (isfair), (rec), \ + (prioboost), (maxval)); + +/** @brief Atomically modify a k_zync_atom + * + * This macro heads a code block which is responsible for assigning + * fields of "new_atom" in terms of the (repeatedly-re-read) + * "old_atom", attempting to set the value with `atomic_cas()`, and + * repeating the process if the value was changed from another + * context. Code can exit the loop without setting any value by using + * a break statement. + * + * No other modification of the values inside a k_zync_atom from + * potentially simultaneous contexts is permitted. + * + * @note The atom_ptr argument is expanded multiple times in the macro + * body and must not have side effects. + * + * @param atom Pointer to a k_zync_atom to modify + */ +#define K_ZYNC_ATOM_SET(atom) \ +for (k_zync_atom_t old_atom = { .atomic = atomic_get(&(atom)->atomic) }, \ + new_atom = old_atom, done = {}; \ + !done.atomic; \ + done.atomic = atomic_cas(&(atom)->atomic, old_atom.atomic, new_atom.atomic)\ + , old_atom.atomic = done.atomic ? \ + old_atom.atomic : atomic_get(&(atom)->atomic) \ + , new_atom = done.atomic ? new_atom : old_atom) + +/** @brief Try a zync atom modification + * + * Attempts an atomic mod operation on the value field of a zync atom, + * as specified for k_zync() (but without clamping to a max_val other + * than the static field maximum). Returns true if the modifcation + * was be made completely without saturation and if no other threads + * are waiting. Will not otherwise modify the atom state, and no + * intermediate states will be visible to other zync code. + * + * @param atom A pointer to a zync atom to be modified + * @param mod A count to add to the atom value + * @return True if the atom was successfully modified, otherwise false + */ +static inline bool k_zync_try_mod(k_zync_atom_t *atom, int32_t mod) +{ + k_zync_atom_t modded, old = { .atomic = atom->atomic }; + + if (mod > 0 && (old.waiters || (mod > (K_ZYNC_ATOM_VAL_MAX - old.val)))) { + return false; + } + if (mod < 0 && (-mod > old.val)) { + return false; + } + + modded = old; + modded.val = old.val + mod; + return atomic_cas(&atom->atomic, old.atomic, modded.atomic); +} + +/** @brief Initialize a Zync + * + * Initializes and configures a struct k_zync. Resets all internal + * state, but does not wake waiting threads as for `k_zync_reset()`. + * + * @see k_zync_reset() + * @see k_zync_set_config() + * + * @param zync The object to configure + * @param cfg Initial configuration for the zync object + */ +__syscall void k_zync_init(struct k_zync *zync, k_zync_atom_t *atom, + struct k_zync_cfg *cfg); + +/** @brief Set zync configuration parameters + * + * Reconfigures an already-initialized k_zync object. This only + * changes parameters, it does not "reset" the object by waking up + * waiters or modifying atom state. + * + * @see k_zync_get_config() + * @see k_zync_init() + * @see k_zync_reset() + * + * @param zync A struct k_zync + * @param cfg Configuration values to set + */ +__syscall void k_zync_set_config(struct k_zync *zync, const struct k_zync_cfg *cfg); + +/** @brief Get zync configuration parameters + * + * Returns the current configuratio parameters for a k_zync object in + * a caller-provided struct. Does not modify zync state. + * + * @see k_zync_set_config() + * + * @param zync A struct k_zync + * @param cfg Storage for returned configuration values + */ +__syscall void k_zync_get_config(struct k_zync *zync, struct k_zync_cfg *cfg); + +/** @brief Reset k_zync object + * + * This "resets" a zync object by atomically setting the atom value to + * its initial value and waking up any waiters, who will return with a + * -EAGAIN result code. + * + * @param zync A struct k_zync to reset + */ +__syscall void k_zync_reset(struct k_zync *zync, k_zync_atom_t *atom); + +/** @brief Zephyr universal synchronization primitive + * + * A k_zync object represents the core kernel synchronization + * mechanism in the Zephyr RTOS. In general it will be used via + * wrapper APIs implementing more commonly-understood idioms + * (e.g. k_mutex, k_sem, k_condvar). The zync object is always used + * in tandem with one or more k_zync_atom structs, which store a + * guaranteed-atomic value. The expectation is that those values will + * be used by an outer layer to implement lockless behavior for + * simple/"uncontended" special cases, falling through into a k_zync() + * call only when threads need to be suspended or awoken. + * + * On entry to k_zync, the kernel will: + * + * 1. Atomically adds the signed integer value "mod" to the ``val`` + * field of the "mod_atom" argument. The math is saturated, + * clamping to the inclusive range between zero and the object's + * maximum (which is always K_ZYNC_ATOM_VAL_MAX unless + * CONFIG_ZYNC_MAX_VAL is set, allowing it to be configured + * per-object) + * + * 2. Wake up one thread from the zync wait queue (if any exist) for + * each unit of increase of the ``val`` field of "mod_atom". + * + * 3. If the "reset_atom" argument is true, atomically set the "val" + * field of "mod_atom" to zero. The atom value will never be seen + * to change by external code in other ways, regardless of the + * value of "mod". Effectively this causes the zync to act as a + * wakeup source (as for e.g. condition variables), but without + * maintaining a "semaphore count". + * + * 4. If the "mod" step above would have caused the "mod_atom" value + * to be negative before clamping, the current thread will pend on + * the zync object's wait queue using the provided timeout (which + * may be K_NO_WAIT). Upon waking up, it will repeat the mod step + * again to "consume" any value that had been added to "mod_atom" + * (but it will not pend again). + * + * 5. If one or more threads were awoken, and the zync object is + * configured to do so, the k_zync() call will invoke the scheduler + * to select a new thread. It does not otherwise act as a + * scheduling point. + * + * The description above may seem obtuse, but effectively the zync + * object is implemented as a counting semaphore with the added + * "reset" behavior that allows it to emulate "wait unconditionally" + * and "atomically release lock" semantics needed by condition + * variables and similar constructs. + * + * Zync objects also optionally implement a "priority boost" feature, + * where the priority of the last thread that exited k_zync() having + * reduced the mod_atom value to zero is maintained at the maximum of + * its own priority and that of all waiters. This feature is only + * available when CONFIG_ZYNC_PRIO_BOOST is enabled at build time. + * + * @note When userspace is enabled, the k_zync object is a kernel + * object. But the k_zync_atom values are not, and are in general are + * expected to be writable user memory used to store the lockless half + * of the API state. + * + * @note The only field of the k_zync_atom that should be used by + * caller code is "val". The other fields are intended for the API + * layer. They are not considered kernel/supervisor memory, however, + * and any corruption at the caller side is limited to causing threads + * to suspend for too long and/or wake up too early (something that + * normal misuse of synchronization primitives can do anyway). + * + * @param zync Kernel zync object + * @param mod_atom Atom to modify + * @param reset_atom Atom to set, or NULL + * @param mod Value to attempt to add to mod_atom + * @param timeout Maximum time to wait, or K_NO_WAIT + * @return The absolute value of the change in mod_atom, or a negative + * number indicating an error code (e.g. -EAGAIN returned + * from the pend operation). + */ +__syscall int32_t k_zync(struct k_zync *zync, k_zync_atom_t *mod_atom, + bool reset_atom, int32_t mod, k_timeout_t to); + +/* In practice, zyncs and atoms are always used togather; z_zync_pair + * is an internal utility to manage this arrangement for the benefit + * of higher level APIs like k_sem/k_mutex. + */ + +#ifdef Z_ZYNC_INTERNAL_ATOM + +struct z_zync_pair { + struct k_zync zync; +}; + +__syscall uint32_t z_zync_atom_val(struct k_zync *zync); +__syscall int32_t z_zync_unlock_ok(struct k_zync *zync); + +#define Z_PAIR_ZYNC(zp) (&(zp)->zync) +#define Z_PAIR_ATOM(zp) (&(zp)->zync.atom) + +#define Z_ZYNCP_INITIALIZER(initv, fair, rec, pboost, maxv) { \ + .zync = K_ZYNC_INITIALIZER(initv, fair, rec, pboost, maxv), \ + .zync.atom = { .val = (initv) }} \ + +#define Z_ZYNCP_DEFINE(name, initv, fair, rec, prio_boost, maxv) \ + static STRUCT_SECTION_ITERABLE(z_zync_pair, name) = \ + Z_ZYNCP_INITIALIZER((initv), (fair), (rec), (prio_boost), (maxv)) + +#define Z_ZYNCP_USER_DEFINE(name, part, initv, fair, rec, pboost, maxv) \ + Z_ZYNCP_DEFINE(name, initv, fair, rec, pboost, maxv) \ + +#else /* !INTERNAL_ATOM */ + +struct z_zync_pair { + struct k_zync *zync; + k_zync_atom_t atom; +}; + +#define Z_PAIR_ZYNC(zp) ((zp)->zync) +#define Z_PAIR_ATOM(zp) (&(zp)->atom) + +#define Z_ZYNCP_PDEF(name, part, initv, fair, rec, pboost, maxv) \ + static struct k_zync _zn_##name = \ + K_ZYNC_INITIALIZER((initv), (fair), (rec), \ + (pboost), (maxv)); \ + static struct z_zync_pair name part = \ + { .zync = &_zn_##name, .atom = { .val = (initv) } }; + +#define Z_ZYNCP_USER_DEFINE(name, part, initv, fair, rec, pboost, maxv) \ + Z_ZYNCP_PDEF(name, K_APP_DMEM(part), initv, fair, rec, pboost, maxv) + +#define Z_ZYNCP_DEFINE(name, initv, fair, rec, pboost, maxv) \ + Z_ZYNCP_PDEF(name, /*no partition*/, initv, fair, rec, pboost, maxv) + +#endif + +__syscall int32_t z_pzync(struct k_zync *zync, int32_t mod, k_timeout_t timeout); +__syscall void z_pzync_init(struct z_zync_pair *zp, struct k_zync_cfg *cfg); + +static inline int32_t z_pzyncmod(struct z_zync_pair *zp, int32_t mod, + k_timeout_t timeout) +{ + int32_t ret; + + do { + if (IS_ENABLED(Z_ZYNC_ALWAYS_KERNEL)) { + ret = z_pzync(Z_PAIR_ZYNC(zp), mod, timeout); + } else if (k_zync_try_mod(Z_PAIR_ATOM(zp), mod)) { + return 0; + } else { + ret = k_zync(Z_PAIR_ZYNC(zp), Z_PAIR_ATOM(zp), + false, mod, timeout); + } + } while (mod < 0 && K_TIMEOUT_EQ(timeout, Z_FOREVER) && ret == 0); + + /* Infuriating historical API requirements in test suite */ + if (ret == 0) { + ret = -EAGAIN; + } + if (ret == -EAGAIN && K_TIMEOUT_EQ(timeout, Z_TIMEOUT_NO_WAIT)) { + ret = -EBUSY; + } + return ret < 0 ? ret : 0; +} + +/* Low level "wait on condition variable" utility. Atomically: sets + * the "mut" zync to 1, wakes up a waiting thread if there is one, and + * pends on the "cv" zync. Unlike k_condvar_wait() it does not + * reacquire the mutex on exit. The return value is as per k_zync. + */ +__syscall int z_pzync_condwait(struct z_zync_pair *cv, struct z_zync_pair *mut, + k_timeout_t timeout); + +bool z_vrfy_zync(void *p, bool init); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#ifndef CONFIG_BOARD_UNIT_TESTING +#include +#endif + +#endif /* ZEPHYR_SYS_ZYNC_H */ diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt index 9c648bf8c214f..a345500fbc0a1 100644 --- a/kernel/CMakeLists.txt +++ b/kernel/CMakeLists.txt @@ -35,14 +35,12 @@ list(APPEND kernel_files idle.c mailbox.c msg_q.c - mutex.c queue.c - sem.c stack.c system_work_q.c work.c sched.c - condvar.c + zync.c ) if(CONFIG_SMP) diff --git a/kernel/Kconfig b/kernel/Kconfig index 908ced7cf0e04..afc58980d305d 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -311,6 +311,98 @@ config WAITQ_DUMB endchoice # WAITQ_ALGORITHM +config ZYNC_VALIDATE + bool "k_zync validation layer" + default n if PICOLIBC + default y if ASSERT && !USERSPACE + help + The zync facility includes a simple validation layer to + detect circumstances like freeing locks from the wrong + thread, recursively locking (non-recursive) zyncs, failing + to hold the wrapper lock in condition variables, etc... + This has significant performance impact when USERSPACE is + enabled and needs to be enabled manually. Disabled when + PICOLIBC=y to work around #51827 + +config ZYNC_USERSPACE_COMPAT + bool "Legacy userspace API for defining zyncs" + help + When true, k_sem/mutex/condvar objects defined with + K_xxx_DEFINE() macros will be usable as kernel objects from + any userspace thread with access. This was the traditional + API, but as it requires the atom be stored in kernel space + it has a severe performance pentaly vs. "natural" zync + usage. All operations on all zyncs need to be system calls! + Only select this as a migration assistant; code should use + xxx_USER_DEFINE() (a trivial API change) to realize the + performance benefits of zync. + +config MAX_DYN_ZYNCS + int + default 0 if DYNAMIC_OBJECTS + default 32 + help + Zync "pairs" used to implement k_sem/mutex/condvar are + userspace structs, and (if not defined using a + xxx_USER_DEFINE() macro) associate themselves with kernel + objects dynamically at initialization time. This is done + with dynamic kernel objects when enabled, otherwise with a + simple allocate-only pool. + +config ZYNC_PRIO_BOOST + bool "Zync priority inheritance algorithm" + help + When selected, k_zync() will be able to boost the priority + of the "owner" of a Zync to the priority of any of its + waiters. This has significant performance impact on k_mutex + (especially with userspace, where it forces operations to be + syscalls). Most apps that don't need PI semantics should + leave it off. + +config ZYNC_MAX_VAL + bool "Zync maximum value clamping" + default y if NETWORKING + default y if CMSIS_RTOS_V1 + default y if CMSIS_RTOS_V2 + default y if LOG_BACKEND_UART + help + When selected, the maximum atom value for a Zync may be + controlled per-object by application code. Very few apps + need this (historically, it was used to make "semaphores" + behave like "locks"), and it incurs significant performance + overhead with k_sem. Leave it off. This is likely to be + deprecated in the future. + +config ZYNC_RECURSIVE + bool "recursive Zync locks" + default y if NETWORKING + default y if DISK_ACCESS + default y if GPIO + default y if ADC + default y if NEWLIB_LIBC + default y if CMSIS_RTOS_V1 + default y if CMSIS_RTOS_V2 + help + When selected, a k_zync can be configured as a recursive + lock, allowing a single thread to take the lock multiple + times. As with PRIO_BOOST and MAX_VAL, this defeats the + lockless operation of the zync facility and apps should + consider carefully whether to enable it (especially with + userspace enabled, where it forces a syscall). + +config ZYNC_LEGACY + bool "Force zync to honor all legacy semantics" + select ZYNC_USERSPACE_COMPAT + select ZYNC_PRIO_BOOST + select ZYNC_MAX_VAL + select ZYNC_RECURSIVE + help + When true, the zync facility in Zephyr will honor all of the + older semantic rules, keeping full compatibility with older + versions of Zephyr but gaining little performance benefit. + Very few applications depend on this older behavior and most + should select the various features individually. + menu "Kernel Debugging and Metrics" config INIT_STACKS diff --git a/kernel/condvar.c b/kernel/condvar.c deleted file mode 100644 index 87cb36db316bb..0000000000000 --- a/kernel/condvar.c +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright (c) 2020 Intel Corporation. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#include -#include -#include -#include -#include -#include - -static struct k_spinlock lock; - -int z_impl_k_condvar_init(struct k_condvar *condvar) -{ - z_waitq_init(&condvar->wait_q); - z_object_init(condvar); - - SYS_PORT_TRACING_OBJ_INIT(k_condvar, condvar, 0); - - return 0; -} - -#ifdef CONFIG_USERSPACE -int z_vrfy_k_condvar_init(struct k_condvar *condvar) -{ - Z_OOPS(Z_SYSCALL_OBJ_INIT(condvar, K_OBJ_CONDVAR)); - return z_impl_k_condvar_init(condvar); -} -#include -#endif - -int z_impl_k_condvar_signal(struct k_condvar *condvar) -{ - k_spinlock_key_t key = k_spin_lock(&lock); - - SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_condvar, signal, condvar); - - struct k_thread *thread = z_unpend_first_thread(&condvar->wait_q); - - if (thread != NULL) { - SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_condvar, signal, condvar, K_FOREVER); - - arch_thread_return_value_set(thread, 0); - z_ready_thread(thread); - z_reschedule(&lock, key); - } else { - k_spin_unlock(&lock, key); - } - - SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_condvar, signal, condvar, 0); - - return 0; -} - -#ifdef CONFIG_USERSPACE -int z_vrfy_k_condvar_signal(struct k_condvar *condvar) -{ - Z_OOPS(Z_SYSCALL_OBJ(condvar, K_OBJ_CONDVAR)); - return z_impl_k_condvar_signal(condvar); -} -#include -#endif - -int z_impl_k_condvar_broadcast(struct k_condvar *condvar) -{ - struct k_thread *pending_thread; - k_spinlock_key_t key; - int woken = 0; - - key = k_spin_lock(&lock); - - SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_condvar, broadcast, condvar); - - /* wake up any threads that are waiting to write */ - while ((pending_thread = z_unpend_first_thread(&condvar->wait_q)) != - NULL) { - woken++; - arch_thread_return_value_set(pending_thread, 0); - z_ready_thread(pending_thread); - } - - SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_condvar, broadcast, condvar, woken); - - z_reschedule(&lock, key); - - return woken; -} -#ifdef CONFIG_USERSPACE -int z_vrfy_k_condvar_broadcast(struct k_condvar *condvar) -{ - Z_OOPS(Z_SYSCALL_OBJ(condvar, K_OBJ_CONDVAR)); - return z_impl_k_condvar_broadcast(condvar); -} -#include -#endif - -int z_impl_k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex, - k_timeout_t timeout) -{ - k_spinlock_key_t key; - int ret; - - SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_condvar, wait, condvar); - - key = k_spin_lock(&lock); - k_mutex_unlock(mutex); - - ret = z_pend_curr(&lock, key, &condvar->wait_q, timeout); - k_mutex_lock(mutex, K_FOREVER); - - SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_condvar, wait, condvar, ret); - - return ret; -} -#ifdef CONFIG_USERSPACE -int z_vrfy_k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex, - k_timeout_t timeout) -{ - Z_OOPS(Z_SYSCALL_OBJ(condvar, K_OBJ_CONDVAR)); - Z_OOPS(Z_SYSCALL_OBJ(mutex, K_OBJ_MUTEX)); - return z_impl_k_condvar_wait(condvar, mutex, timeout); -} -#include -#endif diff --git a/kernel/mutex.c b/kernel/mutex.c deleted file mode 100644 index 32c889bb2374e..0000000000000 --- a/kernel/mutex.c +++ /dev/null @@ -1,283 +0,0 @@ -/* - * Copyright (c) 2016 Wind River Systems, Inc. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -/** - * @file @brief mutex kernel services - * - * This module contains routines for handling mutex locking and unlocking. - * - * Mutexes implement a priority inheritance algorithm that boosts the priority - * level of the owning thread to match the priority level of the highest - * priority thread waiting on the mutex. - * - * Each mutex that contributes to priority inheritance must be released in the - * reverse order in which it was acquired. Furthermore each subsequent mutex - * that contributes to raising the owning thread's priority level must be - * acquired at a point after the most recent "bumping" of the priority level. - * - * For example, if thread A has two mutexes contributing to the raising of its - * priority level, the second mutex M2 must be acquired by thread A after - * thread A's priority level was bumped due to owning the first mutex M1. - * When releasing the mutex, thread A must release M2 before it releases M1. - * Failure to follow this nested model may result in threads running at - * unexpected priority levels (too high, or too low). - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); - -/* We use a global spinlock here because some of the synchronization - * is protecting things like owner thread priorities which aren't - * "part of" a single k_mutex. Should move those bits of the API - * under the scheduler lock so we can break this up. - */ -static struct k_spinlock lock; - -int z_impl_k_mutex_init(struct k_mutex *mutex) -{ - mutex->owner = NULL; - mutex->lock_count = 0U; - - z_waitq_init(&mutex->wait_q); - - z_object_init(mutex); - - SYS_PORT_TRACING_OBJ_INIT(k_mutex, mutex, 0); - - return 0; -} - -#ifdef CONFIG_USERSPACE -static inline int z_vrfy_k_mutex_init(struct k_mutex *mutex) -{ - Z_OOPS(Z_SYSCALL_OBJ_INIT(mutex, K_OBJ_MUTEX)); - return z_impl_k_mutex_init(mutex); -} -#include -#endif - -static int32_t new_prio_for_inheritance(int32_t target, int32_t limit) -{ - int new_prio = z_is_prio_higher(target, limit) ? target : limit; - - new_prio = z_get_new_prio_with_ceiling(new_prio); - - return new_prio; -} - -static bool adjust_owner_prio(struct k_mutex *mutex, int32_t new_prio) -{ - if (mutex->owner->base.prio != new_prio) { - - LOG_DBG("%p (ready (y/n): %c) prio changed to %d (was %d)", - mutex->owner, z_is_thread_ready(mutex->owner) ? - 'y' : 'n', - new_prio, mutex->owner->base.prio); - - return z_set_prio(mutex->owner, new_prio); - } - return false; -} - -int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout) -{ - int new_prio; - k_spinlock_key_t key; - bool resched = false; - - __ASSERT(!arch_is_in_isr(), "mutexes cannot be used inside ISRs"); - - SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mutex, lock, mutex, timeout); - - key = k_spin_lock(&lock); - - if (likely((mutex->lock_count == 0U) || (mutex->owner == _current))) { - - mutex->owner_orig_prio = (mutex->lock_count == 0U) ? - _current->base.prio : - mutex->owner_orig_prio; - - mutex->lock_count++; - mutex->owner = _current; - - LOG_DBG("%p took mutex %p, count: %d, orig prio: %d", - _current, mutex, mutex->lock_count, - mutex->owner_orig_prio); - - k_spin_unlock(&lock, key); - - SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, lock, mutex, timeout, 0); - - return 0; - } - - if (unlikely(K_TIMEOUT_EQ(timeout, K_NO_WAIT))) { - k_spin_unlock(&lock, key); - - SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, lock, mutex, timeout, -EBUSY); - - return -EBUSY; - } - - SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mutex, lock, mutex, timeout); - - new_prio = new_prio_for_inheritance(_current->base.prio, - mutex->owner->base.prio); - - LOG_DBG("adjusting prio up on mutex %p", mutex); - - if (z_is_prio_higher(new_prio, mutex->owner->base.prio)) { - resched = adjust_owner_prio(mutex, new_prio); - } - - int got_mutex = z_pend_curr(&lock, key, &mutex->wait_q, timeout); - - LOG_DBG("on mutex %p got_mutex value: %d", mutex, got_mutex); - - LOG_DBG("%p got mutex %p (y/n): %c", _current, mutex, - got_mutex ? 'y' : 'n'); - - if (got_mutex == 0) { - SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, lock, mutex, timeout, 0); - return 0; - } - - /* timed out */ - - LOG_DBG("%p timeout on mutex %p", _current, mutex); - - key = k_spin_lock(&lock); - - /* - * Check if mutex was unlocked after this thread was unpended. - * If so, skip adjusting owner's priority down. - */ - if (likely(mutex->owner != NULL)) { - struct k_thread *waiter = z_waitq_head(&mutex->wait_q); - - new_prio = (waiter != NULL) ? - new_prio_for_inheritance(waiter->base.prio, mutex->owner_orig_prio) : - mutex->owner_orig_prio; - - LOG_DBG("adjusting prio down on mutex %p", mutex); - - resched = adjust_owner_prio(mutex, new_prio) || resched; - } - - if (resched) { - z_reschedule(&lock, key); - } else { - k_spin_unlock(&lock, key); - } - - SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, lock, mutex, timeout, -EAGAIN); - - return -EAGAIN; -} - -#ifdef CONFIG_USERSPACE -static inline int z_vrfy_k_mutex_lock(struct k_mutex *mutex, - k_timeout_t timeout) -{ - Z_OOPS(Z_SYSCALL_OBJ(mutex, K_OBJ_MUTEX)); - return z_impl_k_mutex_lock(mutex, timeout); -} -#include -#endif - -int z_impl_k_mutex_unlock(struct k_mutex *mutex) -{ - struct k_thread *new_owner; - - __ASSERT(!arch_is_in_isr(), "mutexes cannot be used inside ISRs"); - - SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mutex, unlock, mutex); - - CHECKIF(mutex->owner == NULL) { - SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, unlock, mutex, -EINVAL); - - return -EINVAL; - } - /* - * The current thread does not own the mutex. - */ - CHECKIF(mutex->owner != _current) { - SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, unlock, mutex, -EPERM); - - return -EPERM; - } - - /* - * Attempt to unlock a mutex which is unlocked. mutex->lock_count - * cannot be zero if the current thread is equal to mutex->owner, - * therefore no underflow check is required. Use assert to catch - * undefined behavior. - */ - __ASSERT_NO_MSG(mutex->lock_count > 0U); - - LOG_DBG("mutex %p lock_count: %d", mutex, mutex->lock_count); - - /* - * If we are the owner and count is greater than 1, then decrement - * the count and return and keep current thread as the owner. - */ - if (mutex->lock_count > 1U) { - mutex->lock_count--; - goto k_mutex_unlock_return; - } - - k_spinlock_key_t key = k_spin_lock(&lock); - - adjust_owner_prio(mutex, mutex->owner_orig_prio); - - /* Get the new owner, if any */ - new_owner = z_unpend_first_thread(&mutex->wait_q); - - mutex->owner = new_owner; - - LOG_DBG("new owner of mutex %p: %p (prio: %d)", - mutex, new_owner, new_owner ? new_owner->base.prio : -1000); - - if (new_owner != NULL) { - /* - * new owner is already of higher or equal prio than first - * waiter since the wait queue is priority-based: no need to - * adjust its priority - */ - mutex->owner_orig_prio = new_owner->base.prio; - arch_thread_return_value_set(new_owner, 0); - z_ready_thread(new_owner); - z_reschedule(&lock, key); - } else { - mutex->lock_count = 0U; - k_spin_unlock(&lock, key); - } - - -k_mutex_unlock_return: - SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, unlock, mutex, 0); - - return 0; -} - -#ifdef CONFIG_USERSPACE -static inline int z_vrfy_k_mutex_unlock(struct k_mutex *mutex) -{ - Z_OOPS(Z_SYSCALL_OBJ(mutex, K_OBJ_MUTEX)); - return z_impl_k_mutex_unlock(mutex); -} -#include -#endif diff --git a/kernel/poll.c b/kernel/poll.c index d3b313f04bd8d..292c5ba85d262 100644 --- a/kernel/poll.c +++ b/kernel/poll.c @@ -62,12 +62,6 @@ void k_poll_event_init(struct k_poll_event *event, uint32_t type, static inline bool is_condition_met(struct k_poll_event *event, uint32_t *state) { switch (event->type) { - case K_POLL_TYPE_SEM_AVAILABLE: - if (k_sem_count_get(event->sem) > 0U) { - *state = K_POLL_STATE_SEM_AVAILABLE; - return true; - } - break; case K_POLL_TYPE_DATA_AVAILABLE: if (!k_queue_is_empty(event->queue)) { *state = K_POLL_STATE_FIFO_DATA_AVAILABLE; @@ -93,6 +87,14 @@ static inline bool is_condition_met(struct k_poll_event *event, uint32_t *state) return true; } #endif + case K_POLL_TYPE_ZYNC: + if (event->zync->poll_events.head == NULL) { + sys_dlist_init(&event->zync->poll_events); + } + if (event->zync->pollable) { + *state = K_POLL_STATE_ZYNC; + return true; + } case K_POLL_TYPE_IGNORE: break; default: @@ -137,10 +139,6 @@ static inline void register_event(struct k_poll_event *event, struct z_poller *poller) { switch (event->type) { - case K_POLL_TYPE_SEM_AVAILABLE: - __ASSERT(event->sem != NULL, "invalid semaphore\n"); - add_event(&event->sem->poll_events, event, poller); - break; case K_POLL_TYPE_DATA_AVAILABLE: __ASSERT(event->queue != NULL, "invalid queue\n"); add_event(&event->queue->poll_events, event, poller); @@ -159,6 +157,9 @@ static inline void register_event(struct k_poll_event *event, add_event(&event->pipe->poll_events, event, poller); break; #endif + case K_POLL_TYPE_ZYNC: + __ASSERT(event->zync != NULL, "invalid zync\n"); + add_event(&event->zync->poll_events, event, poller); case K_POLL_TYPE_IGNORE: /* nothing to do */ break; @@ -178,10 +179,6 @@ static inline void clear_event_registration(struct k_poll_event *event) event->poller = NULL; switch (event->type) { - case K_POLL_TYPE_SEM_AVAILABLE: - __ASSERT(event->sem != NULL, "invalid semaphore\n"); - remove_event = true; - break; case K_POLL_TYPE_DATA_AVAILABLE: __ASSERT(event->queue != NULL, "invalid queue\n"); remove_event = true; @@ -200,6 +197,10 @@ static inline void clear_event_registration(struct k_poll_event *event) remove_event = true; break; #endif + case K_POLL_TYPE_ZYNC: + __ASSERT(event->msgq != NULL, "invalid zync\n"); + remove_event = true; + break; case K_POLL_TYPE_IGNORE: /* nothing to do */ break; @@ -407,9 +408,6 @@ static inline int z_vrfy_k_poll(struct k_poll_event *events, case K_POLL_TYPE_SIGNAL: Z_OOPS(Z_SYSCALL_OBJ(e->signal, K_OBJ_POLL_SIGNAL)); break; - case K_POLL_TYPE_SEM_AVAILABLE: - Z_OOPS(Z_SYSCALL_OBJ(e->sem, K_OBJ_SEM)); - break; case K_POLL_TYPE_DATA_AVAILABLE: Z_OOPS(Z_SYSCALL_OBJ(e->queue, K_OBJ_QUEUE)); break; @@ -421,6 +419,9 @@ static inline int z_vrfy_k_poll(struct k_poll_event *events, Z_OOPS(Z_SYSCALL_OBJ(e->pipe, K_OBJ_PIPE)); break; #endif + case K_POLL_TYPE_ZYNC: + Z_OOPS(Z_SYSCALL_OBJ_INIT(e->zync, K_OBJ_ZYNC)); + break; default: ret = -EINVAL; goto out_free; @@ -471,6 +472,10 @@ void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state) { struct k_poll_event *poll_event; + if (events->head == NULL) { + sys_dlist_init(events); + } + poll_event = (struct k_poll_event *)sys_dlist_get(events); if (poll_event != NULL) { (void) signal_poll_event(poll_event, state); diff --git a/kernel/sem.c b/kernel/sem.c deleted file mode 100644 index 95abad9837b90..0000000000000 --- a/kernel/sem.c +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Copyright (c) 2010-2016 Wind River Systems, Inc. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -/** - * @file - * - * @brief Kernel semaphore object. - * - * The semaphores are of the 'counting' type, i.e. each 'give' operation will - * increment the internal count by 1, if no thread is pending on it. The 'init' - * call initializes the count to 'initial_count'. Following multiple 'give' - * operations, the same number of 'take' operations can be performed without - * the calling thread having to pend on the semaphore, or the calling task - * having to poll. - */ - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -/* We use a system-wide lock to synchronize semaphores, which has - * unfortunate performance impact vs. using a per-object lock - * (semaphores are *very* widely used). But per-object locks require - * significant extra RAM. A properly spin-aware semaphore - * implementation would spin on atomic access to the count variable, - * and not a spinlock per se. Useful optimization for the future... - */ -static struct k_spinlock lock; - -int z_impl_k_sem_init(struct k_sem *sem, unsigned int initial_count, - unsigned int limit) -{ - /* - * Limit cannot be zero and count cannot be greater than limit - */ - CHECKIF(limit == 0U || limit > K_SEM_MAX_LIMIT || initial_count > limit) { - SYS_PORT_TRACING_OBJ_FUNC(k_sem, init, sem, -EINVAL); - - return -EINVAL; - } - - sem->count = initial_count; - sem->limit = limit; - - SYS_PORT_TRACING_OBJ_FUNC(k_sem, init, sem, 0); - - z_waitq_init(&sem->wait_q); -#if defined(CONFIG_POLL) - sys_dlist_init(&sem->poll_events); -#endif - z_object_init(sem); - - return 0; -} - -#ifdef CONFIG_USERSPACE -int z_vrfy_k_sem_init(struct k_sem *sem, unsigned int initial_count, - unsigned int limit) -{ - Z_OOPS(Z_SYSCALL_OBJ_INIT(sem, K_OBJ_SEM)); - return z_impl_k_sem_init(sem, initial_count, limit); -} -#include -#endif - -static inline void handle_poll_events(struct k_sem *sem) -{ -#ifdef CONFIG_POLL - z_handle_obj_poll_events(&sem->poll_events, K_POLL_STATE_SEM_AVAILABLE); -#else - ARG_UNUSED(sem); -#endif -} - -void z_impl_k_sem_give(struct k_sem *sem) -{ - k_spinlock_key_t key = k_spin_lock(&lock); - struct k_thread *thread; - - SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_sem, give, sem); - - thread = z_unpend_first_thread(&sem->wait_q); - - if (thread != NULL) { - arch_thread_return_value_set(thread, 0); - z_ready_thread(thread); - } else { - sem->count += (sem->count != sem->limit) ? 1U : 0U; - handle_poll_events(sem); - } - - z_reschedule(&lock, key); - - SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_sem, give, sem); -} - -#ifdef CONFIG_USERSPACE -static inline void z_vrfy_k_sem_give(struct k_sem *sem) -{ - Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM)); - z_impl_k_sem_give(sem); -} -#include -#endif - -int z_impl_k_sem_take(struct k_sem *sem, k_timeout_t timeout) -{ - int ret = 0; - - __ASSERT(((arch_is_in_isr() == false) || - K_TIMEOUT_EQ(timeout, K_NO_WAIT)), ""); - - k_spinlock_key_t key = k_spin_lock(&lock); - - SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_sem, take, sem, timeout); - - if (likely(sem->count > 0U)) { - sem->count--; - k_spin_unlock(&lock, key); - ret = 0; - goto out; - } - - if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { - k_spin_unlock(&lock, key); - ret = -EBUSY; - goto out; - } - - SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_sem, take, sem, timeout); - - ret = z_pend_curr(&lock, key, &sem->wait_q, timeout); - -out: - SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_sem, take, sem, timeout, ret); - - return ret; -} - -void z_impl_k_sem_reset(struct k_sem *sem) -{ - struct k_thread *thread; - k_spinlock_key_t key = k_spin_lock(&lock); - - while (true) { - thread = z_unpend_first_thread(&sem->wait_q); - if (thread == NULL) { - break; - } - arch_thread_return_value_set(thread, -EAGAIN); - z_ready_thread(thread); - } - sem->count = 0; - - SYS_PORT_TRACING_OBJ_FUNC(k_sem, reset, sem); - - handle_poll_events(sem); - - z_reschedule(&lock, key); -} - -#ifdef CONFIG_USERSPACE -static inline int z_vrfy_k_sem_take(struct k_sem *sem, k_timeout_t timeout) -{ - Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM)); - return z_impl_k_sem_take((struct k_sem *)sem, timeout); -} -#include - -static inline void z_vrfy_k_sem_reset(struct k_sem *sem) -{ - Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM)); - z_impl_k_sem_reset(sem); -} -#include - -static inline unsigned int z_vrfy_k_sem_count_get(struct k_sem *sem) -{ - Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM)); - return z_impl_k_sem_count_get(sem); -} -#include - -#endif diff --git a/kernel/zync.c b/kernel/zync.c new file mode 100644 index 0000000000000..c57e9d335f491 --- /dev/null +++ b/kernel/zync.c @@ -0,0 +1,447 @@ +/* Copyright (c) 2022 Google LLC. + * SPDX-License-Identifier: Apache-2.0 + */ +#include +#include +#include +#include +#include + +#if !defined(Z_ZYNC_INTERNAL_ATOM) && !defined(CONFIG_DYNAMIC_OBJECTS) +static struct k_zync zync_pool[CONFIG_MAX_DYN_ZYNCS]; +static uint32_t num_pool_zyncs; +#endif + +/* Sets the priority of the zync owner (if it exists) to the highest + * logical priority of the pri argument, the thread's base priority, + * and the highest priority waiting thread + */ +static void prio_boost(struct k_zync *zync, int pri) +{ +#ifdef CONFIG_ZYNC_PRIO_BOOST + if (zync->cfg.prio_boost && zync->owner != NULL) { + struct k_thread *th = z_waitq_head(&zync->waiters); + + pri = MIN(pri, zync->orig_prio); + if (th != NULL) { + pri = MIN(pri, th->base.prio); + } + z_set_prio(zync->owner, pri); + } +#endif +} + +static void prio_boost_reset(struct k_zync *zync) +{ +#ifdef CONFIG_ZYNC_PRIO_BOOST + if (zync->cfg.prio_boost) { + z_set_prio(_current, zync->orig_prio); + } +#endif +} + +static void set_owner(struct k_zync *zync, struct k_thread *val) +{ + IF_ENABLED(Z_ZYNC_OWNER, (zync->owner = val)); +} + +static void take_ownership(struct k_zync *zync) +{ +#ifdef Z_ZYNC_OWNER +# ifdef CONFIG_ZYNC_PRIO_BOOST + if (zync->cfg.prio_boost) { + if (zync->owner == NULL) { + zync->orig_prio = _current->base.prio; + } + } +# endif + zync->owner = _current; +#endif +} + +static inline int32_t modclamp(struct k_zync *zync, int32_t mod) +{ + int32_t max = K_ZYNC_ATOM_VAL_MAX; + +#ifdef CONFIG_ZYNC_MAX_VAL + if (zync->cfg.max_val != 0) { + max = MIN(max, zync->cfg.max_val); + } +#endif + return CLAMP(mod, 0, max); +} + +void z_impl_k_zync_set_config(struct k_zync *zync, + const struct k_zync_cfg *cfg) +{ + k_spinlock_key_t key = k_spin_lock(&zync->lock); + + zync->cfg = *cfg; + IF_ENABLED(CONFIG_ZYNC_MAX_VAL, + (zync->cfg.max_val = Z_ZYNC_MVCLAMP(zync->cfg.max_val))); + k_spin_unlock(&zync->lock, key); +} + +void z_impl_k_zync_get_config(struct k_zync *zync, + struct k_zync_cfg *cfg) +{ + k_spinlock_key_t key = k_spin_lock(&zync->lock); + + *cfg = zync->cfg; + k_spin_unlock(&zync->lock, key); +} + +void z_impl_k_zync_init(struct k_zync *zync, k_zync_atom_t *atom, + struct k_zync_cfg *cfg) +{ + memset(zync, 0, sizeof(*zync)); + k_zync_set_config(zync, cfg); + atom->val = cfg->atom_init; +#ifdef CONFIG_POLL + zync->pollable = (cfg->atom_init != 0); +#endif + z_object_init(zync); +} + +#ifndef Z_ZYNC_INTERNAL_ATOM +/* When zyncs and atoms are stored separately (this is the + * default/preferred mode) the kernel size k_zync gets "dynamically" + * allocated at initialization time (thus allowing zero-filled structs + * to be initialized). That's done with the existing object allocator + * if it's configured, otherwise with a simple allocate-once pool. + */ +static struct k_zync *alloc_zync(void) +{ +#ifdef CONFIG_DYNAMIC_OBJECTS + return k_object_alloc(K_OBJ_ZYNC); +#else + if (num_pool_zyncs < ARRAY_SIZE(zync_pool)) { + return &zync_pool[num_pool_zyncs++]; + } + return NULL; +#endif +} +#endif + +void z_impl_z_pzync_init(struct z_zync_pair *zp, struct k_zync_cfg *cfg) +{ +#ifndef Z_ZYNC_INTERNAL_ATOM + if (!k_is_user_context() && zp->zync == NULL) { + zp->zync = alloc_zync(); + } +#endif + k_zync_init(Z_PAIR_ZYNC(zp), Z_PAIR_ATOM(zp), cfg); +} + +static bool try_recursion(struct k_zync *zync, int32_t mod) +{ +#ifdef CONFIG_ZYNC_RECURSIVE + if (zync->cfg.recursive) { + __ASSERT(abs(mod) == 1, "recursive locks aren't semaphores"); + if (mod > 0 && zync->rec_count > 0) { + zync->rec_count--; + return true; + } else if (mod < 0 && _current == zync->owner) { + zync->rec_count++; + return true; + } + } +#endif + return false; +} + +static bool handle_poll(struct k_zync *zync, int32_t val0, int32_t val1) +{ + bool resched = false; + +#ifdef CONFIG_POLL + if (val1 > 0 && val0 == 0) { + z_handle_obj_poll_events(&zync->poll_events, K_POLL_STATE_ZYNC); + resched = true; + } + zync->pollable = (val1 != 0); +#endif + return resched; +} + +static int32_t zync_locked(struct k_zync *zync, k_zync_atom_t *mod_atom, + bool reset_atom, int32_t mod, k_timeout_t timeout, + k_spinlock_key_t key) +{ + bool resched, must_pend, nowait = K_TIMEOUT_EQ(timeout, Z_TIMEOUT_NO_WAIT); + int32_t delta = 0, delta2 = 0, val0 = 0, val1 = 0, pendret = 0, woken; + + if (try_recursion(zync, mod)) { + k_spin_unlock(&zync->lock, key); + return 1; + } + + K_ZYNC_ATOM_SET(mod_atom) { + val0 = old_atom.val; + val1 = modclamp(zync, val0 + mod); + delta = val1 - val0; + new_atom.val = reset_atom ? 0 : val1; + new_atom.waiters = mod < 0 && delta != mod; + } + + must_pend = mod < 0 && mod != delta; + + if (delta > 0) { + if (val0 == 0) { + prio_boost_reset(zync); + } + set_owner(zync, NULL); + } + + resched = handle_poll(zync, val0, val1); + + Z_WAIT_Q_LAZY_INIT(&zync->waiters); + for (woken = 0; woken < delta; woken++) { + if (!z_sched_wake(&zync->waiters, 0, NULL)) { + break; + } + resched = true; + } + + /* Old condvar API wants the count of threads woken as the return value */ + if (delta >= 0 && reset_atom) { + delta = woken; + } + + if (resched) { + K_ZYNC_ATOM_SET(mod_atom) { + new_atom.waiters = z_waitq_head(&zync->waiters) != NULL; + } + } + + if (must_pend) { + pendret = -EAGAIN; + if (!nowait) { + prio_boost(zync, _current->base.prio); + pendret = z_pend_curr(&zync->lock, key, &zync->waiters, timeout); + key = k_spin_lock(&zync->lock); + prio_boost(zync, K_LOWEST_THREAD_PRIO); + + mod -= delta; + K_ZYNC_ATOM_SET(mod_atom) { + new_atom.val = modclamp(zync, old_atom.val + mod); + delta2 = new_atom.val - old_atom.val; + } + delta += delta2; + } + } + + if (delta < 0) { + take_ownership(zync); + } + + if (resched && zync->cfg.fair) { + z_reschedule(&zync->lock, key); + } else { + k_spin_unlock(&zync->lock, key); + } + return pendret < 0 ? pendret : abs(delta); +} + +int32_t z_impl_k_zync(struct k_zync *zync, k_zync_atom_t *mod_atom, + bool reset_atom, int32_t mod, k_timeout_t timeout) +{ + k_spinlock_key_t key = k_spin_lock(&zync->lock); + + return zync_locked(zync, mod_atom, reset_atom, mod, timeout, key); +} + +void z_impl_k_zync_reset(struct k_zync *zync, k_zync_atom_t *atom) +{ + k_spinlock_key_t key = k_spin_lock(&zync->lock); + + atom->val = zync->cfg.atom_init; + + while (z_waitq_head(&zync->waiters)) { + z_sched_wake(&zync->waiters, -EAGAIN, NULL); + } + + IF_ENABLED(CONFIG_ZYNC_RECURSIVE, (zync->rec_count = 0)); + set_owner(zync, NULL); + + k_spin_unlock(&zync->lock, key); +} + +#ifdef Z_ZYNC_ALWAYS_KERNEL +int32_t z_impl_z_pzync(struct k_zync *zync, int32_t mod, k_timeout_t timeout) +{ + return k_zync(zync, &zync->atom, false, mod, timeout); +} +#endif + +#ifdef Z_ZYNC_INTERNAL_ATOM +uint32_t z_impl_z_zync_atom_val(struct k_zync *zync) +{ + return zync->atom.val; +} + +int32_t z_impl_z_zync_unlock_ok(struct k_zync *zync) +{ + if (zync->atom.val != 0) { + return -EINVAL; + } +#ifdef Z_ZYNC_OWNER + if (zync->owner != _current) { + return -EPERM; + } +#endif + return 0; +} +#endif + +int z_impl_z_pzync_condwait(struct z_zync_pair *cv, struct z_zync_pair *mut, + k_timeout_t timeout) +{ + k_spinlock_key_t cvkey = k_spin_lock(&Z_PAIR_ZYNC(cv)->lock); + k_spinlock_key_t mkey = k_spin_lock(&Z_PAIR_ZYNC(mut)->lock); + +#ifdef CONFIG_ZYNC_VALIDATE + __ASSERT_NO_MSG(Z_PAIR_ATOM(mut)->val == 0); +#ifdef CONFIG_ZYNC_RECURSIVE + /* This never worked, and is incredibly dangerous to support, + * it would mean that an outer context, which may have no idea + * a condition variable is in use, would have its lock broken + * and then be put to sleep by the code it called! + */ + __ASSERT(Z_PAIR_ZYNC(mut)->rec_count == 0, "never condwait on recursive locks"); +#endif +#endif + Z_PAIR_ATOM(mut)->val = 1; + set_owner(Z_PAIR_ZYNC(mut), NULL); + if (Z_PAIR_ATOM(mut)->waiters) { + z_sched_wake(&Z_PAIR_ZYNC(mut)->waiters, 0, NULL); + Z_PAIR_ATOM(mut)->waiters = false; + } + k_spin_unlock(&Z_PAIR_ZYNC(mut)->lock, mkey); + + return zync_locked(Z_PAIR_ZYNC(cv), Z_PAIR_ATOM(cv), + NULL, -1, timeout, cvkey); +} + +#ifdef CONFIG_USERSPACE + +void z_vrfy_k_zync_set_config(struct k_zync *zync, const struct k_zync_cfg *cfg) +{ + Z_OOPS(Z_SYSCALL_OBJ(zync, K_OBJ_ZYNC)); + Z_OOPS(Z_SYSCALL_MEMORY_READ(cfg, sizeof(*cfg))); + z_impl_k_zync_set_config(zync, cfg); +} +#include + +void z_vrfy_k_zync_get_config(struct k_zync *zync, struct k_zync_cfg *cfg) +{ + Z_OOPS(Z_SYSCALL_OBJ(zync, K_OBJ_ZYNC)); + Z_OOPS(Z_SYSCALL_MEMORY_WRITE(cfg, sizeof(*cfg))); + z_impl_k_zync_get_config(zync, cfg); +} +#include + +static void chk_atom(struct k_zync *zync, k_zync_atom_t *atom) +{ +#ifdef Z_ZYNC_INTERNAL_ATOM + if (atom == &zync->atom) { + return; + } +#endif + Z_OOPS(Z_SYSCALL_MEMORY_WRITE(atom, sizeof(*atom))); +} + +void z_vrfy_k_zync_init(struct k_zync *zync, k_zync_atom_t *atom, + struct k_zync_cfg *cfg) +{ + Z_OOPS(Z_SYSCALL_OBJ_INIT(zync, K_OBJ_ZYNC)); + chk_atom(zync, atom); + Z_OOPS(Z_SYSCALL_MEMORY_READ(cfg, sizeof(*cfg))); + z_impl_k_zync_init(zync, atom, cfg); +} +#include + +int32_t z_vrfy_k_zync(struct k_zync *zync, k_zync_atom_t *mod_atom, + bool reset_atom, int32_t mod, k_timeout_t timeout) +{ + Z_OOPS(Z_SYSCALL_OBJ(zync, K_OBJ_ZYNC)); + chk_atom(zync, mod_atom); + return z_impl_k_zync(zync, mod_atom, reset_atom, mod, timeout); +} +#include + +static void chk_pair(struct z_zync_pair *p) +{ +#ifdef Z_ZYNC_INTERNAL_ATOM + Z_OOPS(Z_SYSCALL_OBJ(p, K_OBJ_ZYNC)); +#else + struct k_zync *zptr; + + Z_OOPS(z_user_from_copy(&zptr, &p->zync, sizeof(*zptr))); + Z_OOPS(Z_SYSCALL_OBJ(zptr, K_OBJ_ZYNC)); + Z_OOPS(Z_SYSCALL_MEMORY_WRITE(&p->atom, sizeof(p->atom))); +#endif +} + +int z_vrfy_z_pzync_condwait(struct z_zync_pair *cv, struct z_zync_pair *mut, + k_timeout_t timeout) +{ + chk_pair(cv); + chk_pair(mut); + return z_impl_z_pzync_condwait(cv, mut, timeout); +} +#include + +void z_vrfy_k_zync_reset(struct k_zync *zync, k_zync_atom_t *atom) +{ + Z_OOPS(Z_SYSCALL_OBJ(zync, K_OBJ_ZYNC)); + chk_atom(zync, atom); + z_impl_k_zync_reset(zync, atom); +} +#include + +void z_vrfy_z_pzync_init(struct z_zync_pair *zp, struct k_zync_cfg *cfg) +{ +#ifdef Z_ZYNC_INTERNAL_ATOM + z_vrfy_k_zync_init(Z_PAIR_ZYNC(zp), Z_PAIR_ATOM(zp), cfg); +#else + struct z_zync_pair kzp; + + Z_OOPS(z_user_from_copy(&kzp, zp, sizeof(kzp))); + if (kzp.zync == NULL) { + kzp.zync = alloc_zync(); + Z_OOPS(kzp.zync == NULL); + k_object_access_grant(kzp.zync, _current); + Z_OOPS(z_user_to_copy(zp, &kzp, sizeof(kzp))); + } + Z_OOPS(Z_SYSCALL_OBJ_INIT(kzp.zync, K_OBJ_ZYNC)); + + z_impl_z_pzync_init(zp, cfg); +#endif +} +#include + +#ifdef Z_ZYNC_ALWAYS_KERNEL +int32_t z_vrfy_z_pzync(struct k_zync *zync, int32_t mod, k_timeout_t timeout) +{ + Z_OOPS(Z_SYSCALL_OBJ(zync, K_OBJ_ZYNC)); + return z_impl_z_pzync(zync, mod, timeout); +} +#include + +uint32_t z_vrfy_z_zync_atom_val(struct k_zync *zync) +{ + Z_OOPS(Z_SYSCALL_OBJ(zync, K_OBJ_ZYNC)); + return z_impl_z_zync_atom_val(zync); +} +#include + +int32_t z_vrfy_z_zync_unlock_ok(struct k_zync *zync) +{ + Z_OOPS(Z_SYSCALL_OBJ(zync, K_OBJ_ZYNC)); + return z_impl_z_zync_unlock_ok(zync); +} +#include +#endif /* ALWAYS_KERNEL */ + +#endif /* CONFIG_USERSPACE */ diff --git a/lib/libc/picolibc/libc-hooks.c b/lib/libc/picolibc/libc-hooks.c index f97e18fc63042..725d79806cc55 100644 --- a/lib/libc/picolibc/libc-hooks.c +++ b/lib/libc/picolibc/libc-hooks.c @@ -352,7 +352,7 @@ void __retarget_lock_init(_LOCK_T *lock) #ifndef CONFIG_USERSPACE *lock = malloc(sizeof(struct k_sem)); #else - *lock = k_object_alloc(K_OBJ_SEM); + *lock = k_object_alloc(K_OBJ_ZYNC); #endif /* !CONFIG_USERSPACE */ __ASSERT(*lock != NULL, "non-recursive lock allocation failed"); @@ -368,7 +368,7 @@ void __retarget_lock_init_recursive(_LOCK_T *lock) #ifndef CONFIG_USERSPACE *lock = malloc(sizeof(struct k_mutex)); #else - *lock = k_object_alloc(K_OBJ_MUTEX); + *lock = k_object_alloc(K_OBJ_ZYNC); #endif /* !CONFIG_USERSPACE */ __ASSERT(*lock != NULL, "recursive lock allocation failed"); diff --git a/lib/os/fdtable.c b/lib/os/fdtable.c index 99cc680a71e71..329f27a8f0130 100644 --- a/lib/os/fdtable.c +++ b/lib/os/fdtable.c @@ -56,7 +56,7 @@ static struct fd_entry fdtable[CONFIG_POSIX_MAX_FDS] = { #endif }; -static K_MUTEX_DEFINE(fdtable_lock); +K_MUTEX_DEFINE(fdtable_lock); static int z_fd_ref(int fd) { diff --git a/lib/os/mutex.c b/lib/os/mutex.c index 3cd84612a228d..32ebfada14e6b 100644 --- a/lib/os/mutex.c +++ b/lib/os/mutex.c @@ -21,15 +21,6 @@ static struct k_mutex *get_k_mutex(struct sys_mutex *mutex) return obj->data.mutex; } -static bool check_sys_mutex_addr(struct sys_mutex *addr) -{ - /* sys_mutex memory is never touched, just used to lookup the - * underlying k_mutex, but we don't want threads using mutexes - * that are outside their memory domain - */ - return Z_SYSCALL_MEMORY_WRITE(addr, sizeof(struct sys_mutex)); -} - int z_impl_z_sys_mutex_kernel_lock(struct sys_mutex *mutex, k_timeout_t timeout) { struct k_mutex *kernel_mutex = get_k_mutex(mutex); @@ -44,10 +35,6 @@ int z_impl_z_sys_mutex_kernel_lock(struct sys_mutex *mutex, k_timeout_t timeout) static inline int z_vrfy_z_sys_mutex_kernel_lock(struct sys_mutex *mutex, k_timeout_t timeout) { - if (check_sys_mutex_addr(mutex)) { - return -EACCES; - } - return z_impl_z_sys_mutex_kernel_lock(mutex, timeout); } #include @@ -56,7 +43,7 @@ int z_impl_z_sys_mutex_kernel_unlock(struct sys_mutex *mutex) { struct k_mutex *kernel_mutex = get_k_mutex(mutex); - if (kernel_mutex == NULL || kernel_mutex->lock_count == 0) { + if (kernel_mutex == NULL) { return -EINVAL; } @@ -65,10 +52,6 @@ int z_impl_z_sys_mutex_kernel_unlock(struct sys_mutex *mutex) static inline int z_vrfy_z_sys_mutex_kernel_unlock(struct sys_mutex *mutex) { - if (check_sys_mutex_addr(mutex)) { - return -EACCES; - } - return z_impl_z_sys_mutex_kernel_unlock(mutex); } #include diff --git a/modules/hal_nordic/nrf_802154/serialization/platform/nrf_802154_spinel_backend_ipc.c b/modules/hal_nordic/nrf_802154/serialization/platform/nrf_802154_spinel_backend_ipc.c index 06ad1f003e6bf..4404cd35f550c 100644 --- a/modules/hal_nordic/nrf_802154/serialization/platform/nrf_802154_spinel_backend_ipc.c +++ b/modules/hal_nordic/nrf_802154/serialization/platform/nrf_802154_spinel_backend_ipc.c @@ -20,7 +20,7 @@ LOG_MODULE_REGISTER(LOG_MODULE_NAME); #define IPC_BOUND_TIMEOUT_IN_MS K_MSEC(1000) -static K_SEM_DEFINE(edp_bound_sem, 0, 1); +K_SEM_STATIC_DEFINE(edp_bound_sem, 0, 1); static struct ipc_ept ept; static void endpoint_bound(void *priv) @@ -74,7 +74,7 @@ nrf_802154_ser_err_t nrf_802154_backend_init(void) #define RING_BUFFER_LEN 16 #define SEND_THREAD_STACK_SIZE 1024 -static K_SEM_DEFINE(send_sem, 0, RING_BUFFER_LEN); +K_SEM_STATIC_DEFINE(send_sem, 0, RING_BUFFER_LEN); K_THREAD_STACK_DEFINE(send_thread_stack, SEND_THREAD_STACK_SIZE); struct k_thread send_thread_data; diff --git a/modules/hal_nordic/nrf_802154/serialization/platform/nrf_802154_spinel_response_notifier.c b/modules/hal_nordic/nrf_802154/serialization/platform/nrf_802154_spinel_response_notifier.c index bd1c3e0c4c0c0..bbd8f124b86df 100644 --- a/modules/hal_nordic/nrf_802154/serialization/platform/nrf_802154_spinel_response_notifier.c +++ b/modules/hal_nordic/nrf_802154/serialization/platform/nrf_802154_spinel_response_notifier.c @@ -33,7 +33,7 @@ struct spinel_notify_buff_internal { bool free; }; -static K_SEM_DEFINE(notify_sem, 0, 1); +K_SEM_STATIC_DEFINE(notify_sem, 0, 1); static struct k_mutex await_mutex; static struct spinel_notify_buff_internal notify_buff; diff --git a/modules/trusted-firmware-m/interface/interface.c b/modules/trusted-firmware-m/interface/interface.c index 9852fafe612a4..27cb69b5cd0cc 100644 --- a/modules/trusted-firmware-m/interface/interface.c +++ b/modules/trusted-firmware-m/interface/interface.c @@ -71,7 +71,7 @@ int32_t tfm_ns_interface_dispatch(veneer_fn fn, enum tfm_status_e tfm_ns_interface_init(void) { /* - * The static K_MUTEX_DEFINE handles mutex initialization, + * The K_MUTEX_STATIC_DEFINE handles mutex initialization, * so this function may be implemented as no-op. */ return TFM_SUCCESS; diff --git a/samples/bluetooth/broadcast_audio_sink/src/main.c b/samples/bluetooth/broadcast_audio_sink/src/main.c index 66b41933136d1..1230fcf4584c5 100644 --- a/samples/bluetooth/broadcast_audio_sink/src/main.c +++ b/samples/bluetooth/broadcast_audio_sink/src/main.c @@ -10,11 +10,11 @@ #define SEM_TIMEOUT K_SECONDS(10) -static K_SEM_DEFINE(sem_broadcaster_found, 0U, 1U); -static K_SEM_DEFINE(sem_pa_synced, 0U, 1U); -static K_SEM_DEFINE(sem_base_received, 0U, 1U); -static K_SEM_DEFINE(sem_syncable, 0U, 1U); -static K_SEM_DEFINE(sem_pa_sync_lost, 0U, 1U); +K_SEM_STATIC_DEFINE(sem_broadcaster_found, 0U, 1U); +K_SEM_STATIC_DEFINE(sem_pa_synced, 0U, 1U); +K_SEM_STATIC_DEFINE(sem_base_received, 0U, 1U); +K_SEM_STATIC_DEFINE(sem_syncable, 0U, 1U); +K_SEM_STATIC_DEFINE(sem_pa_sync_lost, 0U, 1U); static struct bt_audio_broadcast_sink *broadcast_sink; static struct bt_audio_stream streams[CONFIG_BT_AUDIO_BROADCAST_SNK_STREAM_COUNT]; diff --git a/samples/bluetooth/broadcast_audio_source/src/main.c b/samples/bluetooth/broadcast_audio_source/src/main.c index b5529b494b4d4..ed7f08bdc0f0c 100644 --- a/samples/bluetooth/broadcast_audio_source/src/main.c +++ b/samples/bluetooth/broadcast_audio_source/src/main.c @@ -30,8 +30,8 @@ static uint8_t mock_data[CONFIG_BT_ISO_TX_MTU]; static uint16_t seq_num; static bool stopping; -static K_SEM_DEFINE(sem_started, 0U, ARRAY_SIZE(streams)); -static K_SEM_DEFINE(sem_stopped, 0U, ARRAY_SIZE(streams)); +K_SEM_STATIC_DEFINE(sem_started, 0U, ARRAY_SIZE(streams)); +K_SEM_STATIC_DEFINE(sem_stopped, 0U, ARRAY_SIZE(streams)); #define BROADCAST_SOURCE_LIFETIME 30U /* seconds */ diff --git a/samples/bluetooth/central_past/src/main.c b/samples/bluetooth/central_past/src/main.c index 0b0121983b4de..8002acfd88016 100644 --- a/samples/bluetooth/central_past/src/main.c +++ b/samples/bluetooth/central_past/src/main.c @@ -17,10 +17,10 @@ static bt_addr_le_t per_addr; static uint8_t per_sid; static struct bt_conn *default_conn; -static K_SEM_DEFINE(sem_conn, 0, 1); -static K_SEM_DEFINE(sem_conn_lost, 0, 1); -static K_SEM_DEFINE(sem_per_adv, 0, 1); -static K_SEM_DEFINE(sem_per_sync, 0, 1); +K_SEM_STATIC_DEFINE(sem_conn, 0, 1); +K_SEM_STATIC_DEFINE(sem_conn_lost, 0, 1); +K_SEM_STATIC_DEFINE(sem_per_adv, 0, 1); +K_SEM_STATIC_DEFINE(sem_per_sync, 0, 1); static bool data_cb(struct bt_data *data, void *user_data) { diff --git a/samples/bluetooth/direction_finding_connectionless_rx/src/main.c b/samples/bluetooth/direction_finding_connectionless_rx/src/main.c index d3a413ef4ab00..49cb108061d01 100644 --- a/samples/bluetooth/direction_finding_connectionless_rx/src/main.c +++ b/samples/bluetooth/direction_finding_connectionless_rx/src/main.c @@ -40,9 +40,9 @@ static bool sync_terminated; static uint8_t per_sid; static uint32_t sync_create_timeout_ms; -static K_SEM_DEFINE(sem_per_adv, 0, 1); -static K_SEM_DEFINE(sem_per_sync, 0, 1); -static K_SEM_DEFINE(sem_per_sync_lost, 0, 1); +K_SEM_STATIC_DEFINE(sem_per_adv, 0, 1); +K_SEM_STATIC_DEFINE(sem_per_sync, 0, 1); +K_SEM_STATIC_DEFINE(sem_per_sync_lost, 0, 1); #if defined(CONFIG_BT_DF_CTE_RX_AOA) const static uint8_t ant_patterns[] = { 0x1, 0x2, 0x3, 0x4, 0x5, diff --git a/samples/bluetooth/hci_rpmsg/src/main.c b/samples/bluetooth/hci_rpmsg/src/main.c index 4da4822de3871..a738020541047 100644 --- a/samples/bluetooth/hci_rpmsg/src/main.c +++ b/samples/bluetooth/hci_rpmsg/src/main.c @@ -37,7 +37,7 @@ static struct ipc_ept hci_ept; static K_THREAD_STACK_DEFINE(tx_thread_stack, CONFIG_BT_HCI_TX_STACK_SIZE); static struct k_thread tx_thread_data; static K_FIFO_DEFINE(tx_queue); -static K_SEM_DEFINE(ipc_bound_sem, 0, 1); +K_SEM_STATIC_DEFINE(ipc_bound_sem, 0, 1); #if defined(CONFIG_BT_CTLR_ASSERT_HANDLER) || defined(CONFIG_BT_HCI_VS_FATAL_ERROR) /* A flag used to store information if the IPC endpoint has already been bound. The end point can't * be used before that happens. diff --git a/samples/bluetooth/hci_spi/src/main.c b/samples/bluetooth/hci_spi/src/main.c index 77c97053a54a4..1f5777e0e70d5 100644 --- a/samples/bluetooth/hci_spi/src/main.c +++ b/samples/bluetooth/hci_spi/src/main.c @@ -97,8 +97,8 @@ static const struct gpio_dt_spec irq = GPIO_DT_SPEC_GET(HCI_SPI_NODE, irq_gpios) static K_THREAD_STACK_DEFINE(bt_tx_thread_stack, CONFIG_BT_HCI_TX_STACK_SIZE); static struct k_thread bt_tx_thread_data; -static K_SEM_DEFINE(sem_spi_rx, 0, 1); -static K_SEM_DEFINE(sem_spi_tx, 0, 1); +K_SEM_STATIC_DEFINE(sem_spi_rx, 0, 1); +K_SEM_STATIC_DEFINE(sem_spi_tx, 0, 1); static inline int spi_send(struct net_buf *buf) { diff --git a/samples/bluetooth/iso_broadcast/src/main.c b/samples/bluetooth/iso_broadcast/src/main.c index 2e177b8099cda..315502310c5ab 100644 --- a/samples/bluetooth/iso_broadcast/src/main.c +++ b/samples/bluetooth/iso_broadcast/src/main.c @@ -16,8 +16,8 @@ NET_BUF_POOL_FIXED_DEFINE(bis_tx_pool, BIS_ISO_CHAN_COUNT, BT_ISO_SDU_BUF_SIZE(CONFIG_BT_ISO_TX_MTU), 8, NULL); -static K_SEM_DEFINE(sem_big_cmplt, 0, BIS_ISO_CHAN_COUNT); -static K_SEM_DEFINE(sem_big_term, 0, BIS_ISO_CHAN_COUNT); +K_SEM_STATIC_DEFINE(sem_big_cmplt, 0, BIS_ISO_CHAN_COUNT); +K_SEM_STATIC_DEFINE(sem_big_term, 0, BIS_ISO_CHAN_COUNT); #define INITIAL_TIMEOUT_COUNTER (BIG_TERMINATE_TIMEOUT_US / BIG_SDU_INTERVAL_US) diff --git a/samples/bluetooth/iso_broadcast_benchmark/src/broadcaster.c b/samples/bluetooth/iso_broadcast_benchmark/src/broadcaster.c index cf35d78e30b56..567ed0811481a 100644 --- a/samples/bluetooth/iso_broadcast_benchmark/src/broadcaster.c +++ b/samples/bluetooth/iso_broadcast_benchmark/src/broadcaster.c @@ -26,8 +26,8 @@ LOG_MODULE_REGISTER(iso_broadcast_broadcaster, LOG_LEVEL_DBG); NET_BUF_POOL_FIXED_DEFINE(bis_tx_pool, CONFIG_BT_ISO_TX_BUF_COUNT, BT_ISO_SDU_BUF_SIZE(CONFIG_BT_ISO_TX_MTU), 8, NULL); -static K_SEM_DEFINE(sem_big_complete, 0, 1); -static K_SEM_DEFINE(sem_big_term, 0, 1); +K_SEM_STATIC_DEFINE(sem_big_complete, 0, 1); +K_SEM_STATIC_DEFINE(sem_big_term, 0, 1); static struct k_work_delayable iso_send_work; static uint32_t iso_send_count; static uint8_t iso_data[CONFIG_BT_ISO_TX_MTU]; diff --git a/samples/bluetooth/iso_broadcast_benchmark/src/receiver.c b/samples/bluetooth/iso_broadcast_benchmark/src/receiver.c index 65c6a3c3ddc9a..2de77686fccf2 100644 --- a/samples/bluetooth/iso_broadcast_benchmark/src/receiver.c +++ b/samples/bluetooth/iso_broadcast_benchmark/src/receiver.c @@ -41,12 +41,12 @@ static size_t big_sync_count; static struct iso_recv_stats stats_current_sync; static struct iso_recv_stats stats_overall; -static K_SEM_DEFINE(sem_per_adv, 0, 1); -static K_SEM_DEFINE(sem_per_sync, 0, 1); -static K_SEM_DEFINE(sem_per_sync_lost, 0, 1); -static K_SEM_DEFINE(sem_per_big_info, 0, 1); -static K_SEM_DEFINE(sem_big_sync, 0, 1); -static K_SEM_DEFINE(sem_big_sync_lost, 0, 1); +K_SEM_STATIC_DEFINE(sem_per_adv, 0, 1); +K_SEM_STATIC_DEFINE(sem_per_sync, 0, 1); +K_SEM_STATIC_DEFINE(sem_per_sync_lost, 0, 1); +K_SEM_STATIC_DEFINE(sem_per_big_info, 0, 1); +K_SEM_STATIC_DEFINE(sem_big_sync, 0, 1); +K_SEM_STATIC_DEFINE(sem_big_sync_lost, 0, 1); static const char *phy2str(uint8_t phy) { diff --git a/samples/bluetooth/iso_connected_benchmark/src/main.c b/samples/bluetooth/iso_connected_benchmark/src/main.c index a3382efbe6f57..887f8290209eb 100644 --- a/samples/bluetooth/iso_connected_benchmark/src/main.c +++ b/samples/bluetooth/iso_connected_benchmark/src/main.c @@ -72,12 +72,12 @@ NET_BUF_POOL_FIXED_DEFINE(tx_pool, 1, BT_ISO_SDU_BUF_SIZE(CONFIG_BT_ISO_TX_MTU), 8, NULL); static uint8_t iso_data[CONFIG_BT_ISO_TX_MTU]; -static K_SEM_DEFINE(sem_adv, 0, 1); -static K_SEM_DEFINE(sem_iso_accept, 0, 1); -static K_SEM_DEFINE(sem_iso_connected, 0, CONFIG_BT_ISO_MAX_CHAN); -static K_SEM_DEFINE(sem_iso_disconnected, 0, CONFIG_BT_ISO_MAX_CHAN); -static K_SEM_DEFINE(sem_connected, 0, 1); -static K_SEM_DEFINE(sem_disconnected, 0, 1); +K_SEM_STATIC_DEFINE(sem_adv, 0, 1); +K_SEM_STATIC_DEFINE(sem_iso_accept, 0, 1); +K_SEM_STATIC_DEFINE(sem_iso_connected, 0, CONFIG_BT_ISO_MAX_CHAN); +K_SEM_STATIC_DEFINE(sem_iso_disconnected, 0, CONFIG_BT_ISO_MAX_CHAN); +K_SEM_STATIC_DEFINE(sem_connected, 0, 1); +K_SEM_STATIC_DEFINE(sem_disconnected, 0, 1); static struct bt_iso_chan_io_qos iso_tx_qos = { .sdu = DEFAULT_CIS_SDU_SIZE, /* bytes */ diff --git a/samples/bluetooth/iso_receive/src/main.c b/samples/bluetooth/iso_receive/src/main.c index 8078fc25fa5a0..a020f74e517b1 100644 --- a/samples/bluetooth/iso_receive/src/main.c +++ b/samples/bluetooth/iso_receive/src/main.c @@ -30,12 +30,12 @@ static bt_addr_le_t per_addr; static uint8_t per_sid; static uint16_t per_interval_ms; -static K_SEM_DEFINE(sem_per_adv, 0, 1); -static K_SEM_DEFINE(sem_per_sync, 0, 1); -static K_SEM_DEFINE(sem_per_sync_lost, 0, 1); -static K_SEM_DEFINE(sem_per_big_info, 0, 1); -static K_SEM_DEFINE(sem_big_sync, 0, BIS_ISO_CHAN_COUNT); -static K_SEM_DEFINE(sem_big_sync_lost, 0, BIS_ISO_CHAN_COUNT); +K_SEM_STATIC_DEFINE(sem_per_adv, 0, 1); +K_SEM_STATIC_DEFINE(sem_per_sync, 0, 1); +K_SEM_STATIC_DEFINE(sem_per_sync_lost, 0, 1); +K_SEM_STATIC_DEFINE(sem_per_big_info, 0, 1); +K_SEM_STATIC_DEFINE(sem_big_sync, 0, BIS_ISO_CHAN_COUNT); +K_SEM_STATIC_DEFINE(sem_big_sync_lost, 0, BIS_ISO_CHAN_COUNT); /* The devicetree node identifier for the "led0" alias. */ #define LED0_NODE DT_ALIAS(led0) diff --git a/samples/bluetooth/mesh_demo/src/main.c b/samples/bluetooth/mesh_demo/src/main.c index e08cad2d5ff53..83aa8140ea663 100644 --- a/samples/bluetooth/mesh_demo/src/main.c +++ b/samples/bluetooth/mesh_demo/src/main.c @@ -254,7 +254,7 @@ uint16_t board_set_target(void) return target; } -static K_SEM_DEFINE(tune_sem, 0, 1); +K_SEM_STATIC_DEFINE(tune_sem, 0, 1); static const char *tune_str; void board_play(const char *str) diff --git a/samples/bluetooth/periodic_sync/src/main.c b/samples/bluetooth/periodic_sync/src/main.c index 335b6741403af..7a6f67c226f5a 100644 --- a/samples/bluetooth/periodic_sync/src/main.c +++ b/samples/bluetooth/periodic_sync/src/main.c @@ -16,9 +16,9 @@ static bool per_adv_found; static bt_addr_le_t per_addr; static uint8_t per_sid; -static K_SEM_DEFINE(sem_per_adv, 0, 1); -static K_SEM_DEFINE(sem_per_sync, 0, 1); -static K_SEM_DEFINE(sem_per_sync_lost, 0, 1); +K_SEM_STATIC_DEFINE(sem_per_adv, 0, 1); +K_SEM_STATIC_DEFINE(sem_per_sync, 0, 1); +K_SEM_STATIC_DEFINE(sem_per_sync_lost, 0, 1); /* The devicetree node identifier for the "led0" alias. */ #define LED0_NODE DT_ALIAS(led0) diff --git a/samples/bluetooth/peripheral_past/src/main.c b/samples/bluetooth/peripheral_past/src/main.c index 9df890bb16109..ac9ee03c2ed2d 100644 --- a/samples/bluetooth/peripheral_past/src/main.c +++ b/samples/bluetooth/peripheral_past/src/main.c @@ -12,8 +12,8 @@ static struct bt_conn *default_conn; -static K_SEM_DEFINE(sem_per_sync, 0, 1); -static K_SEM_DEFINE(sem_per_sync_lost, 0, 1); +K_SEM_STATIC_DEFINE(sem_per_sync, 0, 1); +K_SEM_STATIC_DEFINE(sem_per_sync_lost, 0, 1); static void sync_cb(struct bt_le_per_adv_sync *sync, struct bt_le_per_adv_sync_synced_info *info) diff --git a/samples/bluetooth/unicast_audio_client/src/main.c b/samples/bluetooth/unicast_audio_client/src/main.c index 00c91ef5b1c36..c76fa591a84e7 100644 --- a/samples/bluetooth/unicast_audio_client/src/main.c +++ b/samples/bluetooth/unicast_audio_client/src/main.c @@ -44,16 +44,16 @@ static struct bt_audio_lc3_preset codec_configuration = BT_AUDIO_LC3_UNICAST_PRESET_16_2_1(BT_AUDIO_LOCATION_FRONT_LEFT, BT_AUDIO_CONTEXT_TYPE_UNSPECIFIED); -static K_SEM_DEFINE(sem_connected, 0, 1); -static K_SEM_DEFINE(sem_disconnected, 0, 1); -static K_SEM_DEFINE(sem_mtu_exchanged, 0, 1); -static K_SEM_DEFINE(sem_security_updated, 0, 1); -static K_SEM_DEFINE(sem_sinks_discovered, 0, 1); -static K_SEM_DEFINE(sem_sources_discovered, 0, 1); -static K_SEM_DEFINE(sem_stream_configured, 0, 1); -static K_SEM_DEFINE(sem_stream_qos, 0, 1); -static K_SEM_DEFINE(sem_stream_enabled, 0, 1); -static K_SEM_DEFINE(sem_stream_started, 0, 1); +K_SEM_STATIC_DEFINE(sem_connected, 0, 1); +K_SEM_STATIC_DEFINE(sem_disconnected, 0, 1); +K_SEM_STATIC_DEFINE(sem_mtu_exchanged, 0, 1); +K_SEM_STATIC_DEFINE(sem_security_updated, 0, 1); +K_SEM_STATIC_DEFINE(sem_sinks_discovered, 0, 1); +K_SEM_STATIC_DEFINE(sem_sources_discovered, 0, 1); +K_SEM_STATIC_DEFINE(sem_stream_configured, 0, 1); +K_SEM_STATIC_DEFINE(sem_stream_qos, 0, 1); +K_SEM_STATIC_DEFINE(sem_stream_enabled, 0, 1); +K_SEM_STATIC_DEFINE(sem_stream_started, 0, 1); static uint16_t get_and_incr_seq_num(const struct bt_audio_stream *stream) { diff --git a/samples/bluetooth/unicast_audio_server/src/main.c b/samples/bluetooth/unicast_audio_server/src/main.c index fefc6a50ce67f..95e8ee553ed73 100644 --- a/samples/bluetooth/unicast_audio_server/src/main.c +++ b/samples/bluetooth/unicast_audio_server/src/main.c @@ -50,7 +50,7 @@ static size_t configured_source_stream_count; static const struct bt_codec_qos_pref qos_pref = BT_CODEC_QOS_PREF(true, BT_GAP_LE_PHY_2M, 0x02, 10, 40000, 40000, 40000, 40000); -static K_SEM_DEFINE(sem_disconnected, 0, 1); +K_SEM_STATIC_DEFINE(sem_disconnected, 0, 1); static uint8_t unicast_server_addata[] = { BT_UUID_16_ENCODE(BT_UUID_ASCS_VAL), /* ASCS UUID */ diff --git a/samples/boards/bbc_microbit/pong/src/main.c b/samples/boards/bbc_microbit/pong/src/main.c index 78af921ea4ff5..05b806098b279 100644 --- a/samples/boards/bbc_microbit/pong/src/main.c +++ b/samples/boards/bbc_microbit/pong/src/main.c @@ -92,7 +92,7 @@ static int64_t ended; static struct k_work_delayable refresh; /* Semaphore to indicate that there was an update to the display */ -static K_SEM_DEFINE(disp_update, 0, 1); +K_SEM_STATIC_DEFINE(disp_update, 0, 1); /* X coordinate of the left corner of the paddle */ static volatile int paddle_x = PADDLE_MIN; diff --git a/samples/boards/nrf/nrfx_prs/src/main.c b/samples/boards/nrf/nrfx_prs/src/main.c index c827833d3b06b..4e1b259aeeb98 100644 --- a/samples/boards/nrf/nrfx_prs/src/main.c +++ b/samples/boards/nrf/nrfx_prs/src/main.c @@ -32,13 +32,13 @@ static nrfx_uarte_t uarte = NRFX_UARTE_INSTANCE(2); static bool spim_initialized; static bool uarte_initialized; static volatile size_t received; -static K_SEM_DEFINE(transfer_finished, 0, 1); +K_SEM_STATIC_DEFINE(transfer_finished, 0, 1); static enum { PERFORM_TRANSFER, SWITCH_PERIPHERAL } user_request; -static K_SEM_DEFINE(button_pressed, 0, 1); +K_SEM_STATIC_DEFINE(button_pressed, 0, 1); static void sw0_handler(const struct device *dev, struct gpio_callback *cb, uint32_t pins) diff --git a/samples/drivers/i2s/echo/src/main.c b/samples/drivers/i2s/echo/src/main.c index c642b331963cf..f93c1e72c6435 100644 --- a/samples/drivers/i2s/echo/src/main.c +++ b/samples/drivers/i2s/echo/src/main.c @@ -45,7 +45,7 @@ K_MEM_SLAB_DEFINE_STATIC(mem_slab, BLOCK_SIZE, BLOCK_COUNT, 4); static int16_t echo_block[SAMPLES_PER_BLOCK]; static volatile bool echo_enabled = true; -static K_SEM_DEFINE(toggle_transfer, 1, 1); +K_SEM_STATIC_DEFINE(toggle_transfer, 1, 1); #if DT_NODE_HAS_STATUS(SW0_NODE, okay) static void sw0_handler(const struct device *dev, struct gpio_callback *cb, diff --git a/samples/kernel/condition_variables/simple/src/main.c b/samples/kernel/condition_variables/simple/src/main.c index e25deac58b533..dfe9efb72d398 100644 --- a/samples/kernel/condition_variables/simple/src/main.c +++ b/samples/kernel/condition_variables/simple/src/main.c @@ -18,7 +18,7 @@ static struct k_thread t[NUM_THREADS]; K_MUTEX_DEFINE(mutex); K_CONDVAR_DEFINE(condvar); -static int done; +static volatile int done; void worker_thread(void *p1, void *p2, void *p3) { diff --git a/samples/net/cloud/google_iot_mqtt/src/dhcp.c b/samples/net/cloud/google_iot_mqtt/src/dhcp.c index bc06cb424b293..57e66a1dc3c7d 100644 --- a/samples/net/cloud/google_iot_mqtt/src/dhcp.c +++ b/samples/net/cloud/google_iot_mqtt/src/dhcp.c @@ -20,7 +20,7 @@ LOG_MODULE_DECLARE(net_google_iot_mqtt, LOG_LEVEL_DBG); static struct net_mgmt_event_callback mgmt_cb; /* Semaphore to indicate a lease has been acquired. */ -static K_SEM_DEFINE(got_address, 0, 1); +K_SEM_STATIC_DEFINE(got_address, 0, 1); static void handler(struct net_mgmt_event_callback *cb, uint32_t mgmt_event, diff --git a/samples/net/cloud/mqtt_azure/src/main.c b/samples/net/cloud/mqtt_azure/src/main.c index 73d54b960c43f..d037e8c2bb93e 100644 --- a/samples/net/cloud/mqtt_azure/src/main.c +++ b/samples/net/cloud/mqtt_azure/src/main.c @@ -54,7 +54,7 @@ static struct zsock_addrinfo hints; static struct zsock_addrinfo *haddr; #endif -static K_SEM_DEFINE(mqtt_start, 0, 1); +K_SEM_STATIC_DEFINE(mqtt_start, 0, 1); /* Application TLS configuration details */ #define TLS_SNI_HOSTNAME CONFIG_SAMPLE_CLOUD_AZURE_HOSTNAME diff --git a/samples/net/sockets/packet/src/packet.c b/samples/net/sockets/packet/src/packet.c index 48e189f9e3e1c..c6bdd5d1d56b5 100644 --- a/samples/net/sockets/packet/src/packet.c +++ b/samples/net/sockets/packet/src/packet.c @@ -36,7 +36,7 @@ struct packet_data { static struct packet_data packet; static bool finish; -static K_SEM_DEFINE(iface_up, 0, 1); +K_SEM_STATIC_DEFINE(iface_up, 0, 1); static void recv_packet(void); static void send_packet(void); diff --git a/samples/sensor/fxos8700-hid/src/main.c b/samples/sensor/fxos8700-hid/src/main.c index f433ad90285ba..7880193a71cc9 100644 --- a/samples/sensor/fxos8700-hid/src/main.c +++ b/samples/sensor/fxos8700-hid/src/main.c @@ -35,7 +35,7 @@ static const uint8_t hid_report_desc[] = HID_MOUSE_REPORT_DESC(2); static uint32_t def_val[4]; static volatile uint8_t status[4]; -static K_SEM_DEFINE(sem, 0, 1); /* starts off "not available" */ +K_SEM_STATIC_DEFINE(sem, 0, 1); /* starts off "not available" */ static struct gpio_callback callback[4]; #define MOUSE_BTN_REPORT_POS 0 diff --git a/samples/subsys/ipc/ipc_service/static_vrings/remote/src/main.c b/samples/subsys/ipc/ipc_service/static_vrings/remote/src/main.c index 80fbdf5fcbd06..f5bf179858ac3 100644 --- a/samples/subsys/ipc/ipc_service/static_vrings/remote/src/main.c +++ b/samples/subsys/ipc/ipc_service/static_vrings/remote/src/main.c @@ -20,13 +20,13 @@ static volatile uint8_t ipc0A_received_data; static volatile uint8_t ipc0B_received_data; static volatile uint8_t ipc1_received_data; -static K_SEM_DEFINE(ipc0A_bound_sem, 0, 1); -static K_SEM_DEFINE(ipc0B_bound_sem, 0, 1); -static K_SEM_DEFINE(ipc1_bound_sem, 0, 1); +K_SEM_STATIC_DEFINE(ipc0A_bound_sem, 0, 1); +K_SEM_STATIC_DEFINE(ipc0B_bound_sem, 0, 1); +K_SEM_STATIC_DEFINE(ipc1_bound_sem, 0, 1); -static K_SEM_DEFINE(ipc0A_data_sem, 0, 1); -static K_SEM_DEFINE(ipc0B_data_sem, 0, 1); -static K_SEM_DEFINE(ipc1_data_sem, 0, 1); +K_SEM_STATIC_DEFINE(ipc0A_data_sem, 0, 1); +K_SEM_STATIC_DEFINE(ipc0B_data_sem, 0, 1); +K_SEM_STATIC_DEFINE(ipc1_data_sem, 0, 1); /* * ==> THREAD 0A (IPC instance 0 - endpoint A) <== diff --git a/samples/subsys/ipc/ipc_service/static_vrings/src/main.c b/samples/subsys/ipc/ipc_service/static_vrings/src/main.c index 9f26ad54854a3..a1f2539a5dd23 100644 --- a/samples/subsys/ipc/ipc_service/static_vrings/src/main.c +++ b/samples/subsys/ipc/ipc_service/static_vrings/src/main.c @@ -20,13 +20,13 @@ static volatile uint8_t ipc0A_received_data; static volatile uint8_t ipc0B_received_data; static volatile uint8_t ipc1_received_data; -static K_SEM_DEFINE(ipc0A_bound_sem, 0, 1); -static K_SEM_DEFINE(ipc0B_bound_sem, 0, 1); -static K_SEM_DEFINE(ipc1_bound_sem, 0, 1); +K_SEM_STATIC_DEFINE(ipc0A_bound_sem, 0, 1); +K_SEM_STATIC_DEFINE(ipc0B_bound_sem, 0, 1); +K_SEM_STATIC_DEFINE(ipc1_bound_sem, 0, 1); -static K_SEM_DEFINE(ipc0A_data_sem, 0, 1); -static K_SEM_DEFINE(ipc0B_data_sem, 0, 1); -static K_SEM_DEFINE(ipc1_data_sem, 0, 1); +K_SEM_STATIC_DEFINE(ipc0A_data_sem, 0, 1); +K_SEM_STATIC_DEFINE(ipc0B_data_sem, 0, 1); +K_SEM_STATIC_DEFINE(ipc1_data_sem, 0, 1); /* * ==> THREAD 0A (IPC instance 0 - endpoint A) <== diff --git a/samples/subsys/ipc/openamp/remote/src/main.c b/samples/subsys/ipc/openamp/remote/src/main.c index 06124069f49c0..9fafbb630cc5e 100644 --- a/samples/subsys/ipc/openamp/remote/src/main.c +++ b/samples/subsys/ipc/openamp/remote/src/main.c @@ -92,8 +92,8 @@ struct virtio_dispatch dispatch = { .notify = virtio_notify, }; -static K_SEM_DEFINE(data_sem, 0, 1); -static K_SEM_DEFINE(data_rx_sem, 0, 1); +K_SEM_STATIC_DEFINE(data_sem, 0, 1); +K_SEM_STATIC_DEFINE(data_rx_sem, 0, 1); static void platform_ipm_callback(const struct device *dev, void *context, uint32_t id, volatile void *data) @@ -111,7 +111,7 @@ int endpoint_cb(struct rpmsg_endpoint *ept, void *data, return RPMSG_SUCCESS; } -static K_SEM_DEFINE(ept_sem, 0, 1); +K_SEM_STATIC_DEFINE(ept_sem, 0, 1); struct rpmsg_endpoint my_ept; struct rpmsg_endpoint *ep = &my_ept; diff --git a/samples/subsys/ipc/openamp/src/main.c b/samples/subsys/ipc/openamp/src/main.c index bdbfc17bb4394..71fd345fb334d 100644 --- a/samples/subsys/ipc/openamp/src/main.c +++ b/samples/subsys/ipc/openamp/src/main.c @@ -105,8 +105,8 @@ struct virtio_dispatch dispatch = { .notify = virtio_notify, }; -static K_SEM_DEFINE(data_sem, 0, 1); -static K_SEM_DEFINE(data_rx_sem, 0, 1); +K_SEM_STATIC_DEFINE(data_sem, 0, 1); +K_SEM_STATIC_DEFINE(data_rx_sem, 0, 1); static void platform_ipm_callback(const struct device *dev, void *context, uint32_t id, volatile void *data) @@ -124,7 +124,7 @@ int endpoint_cb(struct rpmsg_endpoint *ept, void *data, return RPMSG_SUCCESS; } -static K_SEM_DEFINE(ept_sem, 0, 1); +K_SEM_STATIC_DEFINE(ept_sem, 0, 1); struct rpmsg_endpoint my_ept; struct rpmsg_endpoint *ep = &my_ept; diff --git a/samples/subsys/ipc/openamp_rsc_table/src/main_remote.c b/samples/subsys/ipc/openamp_rsc_table/src/main_remote.c index b1cddc19de469..11a9e4caaa6ac 100644 --- a/samples/subsys/ipc/openamp_rsc_table/src/main_remote.c +++ b/samples/subsys/ipc/openamp_rsc_table/src/main_remote.c @@ -82,9 +82,9 @@ static struct rpmsg_rcv_msg sc_msg = {.data = rx_sc_msg}; static struct rpmsg_endpoint tty_ept; static struct rpmsg_rcv_msg tty_msg; -static K_SEM_DEFINE(data_sem, 0, 1); -static K_SEM_DEFINE(data_sc_sem, 0, 1); -static K_SEM_DEFINE(data_tty_sem, 0, 1); +K_SEM_STATIC_DEFINE(data_sem, 0, 1); +K_SEM_STATIC_DEFINE(data_sc_sem, 0, 1); +K_SEM_STATIC_DEFINE(data_tty_sem, 0, 1); static void platform_ipm_callback(const struct device *dev, void *context, uint32_t id, volatile void *data) diff --git a/samples/subsys/ipc/rpmsg_service/remote/src/main.c b/samples/subsys/ipc/rpmsg_service/remote/src/main.c index 5b804387e5ed1..17489aee9d055 100644 --- a/samples/subsys/ipc/rpmsg_service/remote/src/main.c +++ b/samples/subsys/ipc/rpmsg_service/remote/src/main.c @@ -22,8 +22,8 @@ static struct k_thread thread_data; static volatile unsigned int received_data; -static K_SEM_DEFINE(data_sem, 0, 1); -static K_SEM_DEFINE(data_rx_sem, 0, 1); +K_SEM_STATIC_DEFINE(data_sem, 0, 1); +K_SEM_STATIC_DEFINE(data_rx_sem, 0, 1); int endpoint_cb(struct rpmsg_endpoint *ept, void *data, size_t len, uint32_t src, void *priv) diff --git a/samples/subsys/ipc/rpmsg_service/src/main.c b/samples/subsys/ipc/rpmsg_service/src/main.c index cd0cd4e949a7b..1e439f5bfa76c 100644 --- a/samples/subsys/ipc/rpmsg_service/src/main.c +++ b/samples/subsys/ipc/rpmsg_service/src/main.c @@ -23,7 +23,7 @@ static struct k_thread thread_data; static volatile unsigned int received_data; -static K_SEM_DEFINE(data_rx_sem, 0, 1); +K_SEM_STATIC_DEFINE(data_rx_sem, 0, 1); int endpoint_cb(struct rpmsg_endpoint *ept, void *data, size_t len, uint32_t src, void *priv) diff --git a/samples/subsys/logging/multidomain/remote/src/ipc_service.c b/samples/subsys/logging/multidomain/remote/src/ipc_service.c index 5b776bdbd809d..5d4fab1eaf522 100644 --- a/samples/subsys/logging/multidomain/remote/src/ipc_service.c +++ b/samples/subsys/logging/multidomain/remote/src/ipc_service.c @@ -14,8 +14,8 @@ LOG_MODULE_DECLARE(app); K_THREAD_STACK_DEFINE(ipc1_stack, STACKSIZE); static volatile uint8_t ipc1_received_data; -static K_SEM_DEFINE(ipc1_bound_sem, 0, 1); -static K_SEM_DEFINE(ipc1_data_sem, 0, 1); +K_SEM_STATIC_DEFINE(ipc1_bound_sem, 0, 1); +K_SEM_STATIC_DEFINE(ipc1_data_sem, 0, 1); /* * ==> THREAD 1 (IPC instance 1) <== diff --git a/samples/subsys/logging/multidomain/src/ipc_service.c b/samples/subsys/logging/multidomain/src/ipc_service.c index a837e7bdc33de..2f0d01b41a5cb 100644 --- a/samples/subsys/logging/multidomain/src/ipc_service.c +++ b/samples/subsys/logging/multidomain/src/ipc_service.c @@ -15,8 +15,8 @@ LOG_MODULE_DECLARE(app); K_THREAD_STACK_DEFINE(ipc1_stack, STACKSIZE); static volatile uint8_t ipc1_received_data; -static K_SEM_DEFINE(ipc1_bound_sem, 0, 1); -static K_SEM_DEFINE(ipc1_data_sem, 0, 1); +K_SEM_STATIC_DEFINE(ipc1_bound_sem, 0, 1); +K_SEM_STATIC_DEFINE(ipc1_data_sem, 0, 1); /* * ==> THREAD 1 (IPC instance 1) <== diff --git a/samples/subsys/mgmt/hawkbit/src/dhcp.c b/samples/subsys/mgmt/hawkbit/src/dhcp.c index 3f72a03f3f388..3c5455a6cf18f 100644 --- a/samples/subsys/mgmt/hawkbit/src/dhcp.c +++ b/samples/subsys/mgmt/hawkbit/src/dhcp.c @@ -16,7 +16,7 @@ static struct net_mgmt_event_callback mgmt_cb; /* Semaphore to indicate a lease has been acquired */ -static K_SEM_DEFINE(got_address, 0, 1); +K_SEM_STATIC_DEFINE(got_address, 0, 1); static void handler(struct net_mgmt_event_callback *cb, uint32_t mgmt_event, diff --git a/samples/subsys/usb/hid-cdc/src/main.c b/samples/subsys/usb/hid-cdc/src/main.c index e257676ec3f39..6170836436192 100644 --- a/samples/subsys/usb/hid-cdc/src/main.c +++ b/samples/subsys/usb/hid-cdc/src/main.c @@ -130,8 +130,8 @@ static inline struct app_evt_t *app_evt_alloc(void) static const uint8_t hid_mouse_report_desc[] = HID_MOUSE_REPORT_DESC(2); static const uint8_t hid_kbd_report_desc[] = HID_KEYBOARD_REPORT_DESC(); -static K_SEM_DEFINE(evt_sem, 0, 1); /* starts off "not available" */ -static K_SEM_DEFINE(usb_sem, 1, 1); /* starts off "available" */ +K_SEM_STATIC_DEFINE(evt_sem, 0, 1); /* starts off "not available" */ +K_SEM_STATIC_DEFINE(usb_sem, 1, 1); /* starts off "available" */ static struct gpio_callback callback[4]; static char data_buf_mouse[64], data_buf_kbd[64]; diff --git a/samples/subsys/usb/hid-mouse/src/main.c b/samples/subsys/usb/hid-mouse/src/main.c index e04b05b04c1f1..01385e07d8102 100644 --- a/samples/subsys/usb/hid-mouse/src/main.c +++ b/samples/subsys/usb/hid-mouse/src/main.c @@ -54,7 +54,7 @@ static const uint8_t hid_report_desc[] = HID_MOUSE_REPORT_DESC(2); static uint8_t def_val[4]; static volatile uint8_t status[4]; -static K_SEM_DEFINE(sem, 0, 1); /* starts off "not available" */ +K_SEM_STATIC_DEFINE(sem, 0, 1); /* starts off "not available" */ static struct gpio_callback callback[4]; static enum usb_dc_status_code usb_status; diff --git a/samples/userspace/shared_mem/prj.conf b/samples/userspace/shared_mem/prj.conf index b582b76a869cd..109c49fe466c5 100644 --- a/samples/userspace/shared_mem/prj.conf +++ b/samples/userspace/shared_mem/prj.conf @@ -1,2 +1,3 @@ CONFIG_TEST=y CONFIG_USERSPACE=y +CONFIG_ZYNC_USERSPACE_COMPAT=y diff --git a/scripts/build/gen_kobject_list.py b/scripts/build/gen_kobject_list.py index 728a0eda94a33..e910157d8a499 100755 --- a/scripts/build/gen_kobject_list.py +++ b/scripts/build/gen_kobject_list.py @@ -92,7 +92,6 @@ kobjects = OrderedDict([ ("k_mem_slab", (None, False, True)), ("k_msgq", (None, False, True)), - ("k_mutex", (None, False, True)), ("k_pipe", (None, False, True)), ("k_queue", (None, False, True)), ("k_poll_signal", (None, False, True)), @@ -107,6 +106,8 @@ ("sys_mutex", (None, True, False)), ("k_futex", (None, True, False)), ("k_condvar", (None, False, True)), + ("k_zync", (None, False, True)), + ("k_mutex", (None, False, False)), ("k_event", ("CONFIG_EVENTS", False, True)), ("ztest_suite_node", ("CONFIG_ZTEST", True, False)), ("ztest_suite_stats", ("CONFIG_ZTEST", True, False)), @@ -116,7 +117,20 @@ ("rtio_iodev", ("CONFIG_RTIO", False, False)) ]) +# Some types are handled as "aliases" at this level, so they can be +# represented identically at the syscall handler layer but still look +# like distinct struct types in C code (for the benefit of +# compiler-provided typesafety) +kobj_aliases = { + "k_condvar" : "k_zync", + "k_mutex" : "k_zync", + "k_sem" : "k_zync", +} + def kobject_to_enum(kobj): + if kobj in kobj_aliases: + kobj = kobj_aliases[kobj] + if kobj.startswith("k_") or kobj.startswith("z_"): name = kobj[2:] else: @@ -913,6 +927,9 @@ def write_kobj_types_output(fp): if kobj == "device": continue + if kobj in kobj_aliases: + continue + if dep: fp.write("#ifdef %s\n" % dep) @@ -934,6 +951,9 @@ def write_kobj_otype_output(fp): if kobj == "device": continue + if kobj in kobj_aliases: + continue + if dep: fp.write("#ifdef %s\n" % dep) @@ -959,6 +979,9 @@ def write_kobj_size_output(fp): if not alloc: continue + if kobj in kobj_aliases: + continue + if dep: fp.write("#ifdef %s\n" % dep) diff --git a/subsys/bluetooth/host/smp.c b/subsys/bluetooth/host/smp.c index 9d689475bc3e8..c47175e172627 100644 --- a/subsys/bluetooth/host/smp.c +++ b/subsys/bluetooth/host/smp.c @@ -280,7 +280,7 @@ static bool bondable = IS_ENABLED(CONFIG_BT_BONDABLE); static bool oobd_present; static bool sc_supported; static const uint8_t *sc_public_key; -static K_SEM_DEFINE(sc_local_pkey_ready, 0, 1); +K_SEM_STATIC_DEFINE(sc_local_pkey_ready, 0, 1); /* Pointer to internal data is used to mark that callbacks of given SMP channel are not initialized. * Value of NULL represents no authenticaiton capabilities and cannot be used for that purpose. diff --git a/subsys/net/ip/dhcpv4.c b/subsys/net/ip/dhcpv4.c index b22c7bedc2a99..6ebd3972e5467 100644 --- a/subsys/net/ip/dhcpv4.c +++ b/subsys/net/ip/dhcpv4.c @@ -32,7 +32,7 @@ LOG_MODULE_REGISTER(net_dhcpv4, CONFIG_NET_DHCPV4_LOG_LEVEL); #define PKT_WAIT_TIME K_SECONDS(1) -static K_MUTEX_DEFINE(lock); +K_MUTEX_STATIC_DEFINE(lock); static sys_slist_t dhcpv4_ifaces; static struct k_work_delayable timeout_work; diff --git a/subsys/net/ip/net_if.c b/subsys/net/ip/net_if.c index f8a4d2ebc595e..2f0f5e9de24c6 100644 --- a/subsys/net/ip/net_if.c +++ b/subsys/net/ip/net_if.c @@ -39,7 +39,7 @@ LOG_MODULE_REGISTER(net_if, CONFIG_NET_IF_LOG_LEVEL); #define MAX_RANDOM_NUMER (3) #define MAX_RANDOM_DENOM (2) -static K_MUTEX_DEFINE(lock); +K_MUTEX_STATIC_DEFINE(lock); /* net_if dedicated section limiters */ extern struct net_if _net_if_list_start[]; diff --git a/subsys/net/ip/net_mgmt.c b/subsys/net/ip/net_mgmt.c index 956ecf27064b6..3253044963663 100644 --- a/subsys/net/ip/net_mgmt.c +++ b/subsys/net/ip/net_mgmt.c @@ -33,8 +33,8 @@ struct mgmt_event_wait { struct net_if *iface; }; -static K_SEM_DEFINE(network_event, 0, K_SEM_MAX_LIMIT); -static K_MUTEX_DEFINE(net_mgmt_lock); +K_SEM_STATIC_DEFINE(network_event, 0, K_SEM_MAX_LIMIT); +K_MUTEX_STATIC_DEFINE(net_mgmt_lock); K_KERNEL_STACK_DEFINE(mgmt_stack, CONFIG_NET_MGMT_EVENT_STACK_SIZE); static struct k_thread mgmt_thread_data; diff --git a/subsys/net/ip/route.c b/subsys/net/ip/route.c index 0f2921feb6955..cf060a15dec4b 100644 --- a/subsys/net/ip/route.c +++ b/subsys/net/ip/route.c @@ -44,7 +44,7 @@ static sys_slist_t active_route_lifetime_timers; /* Timer that manages expired route entries. */ static struct k_work_delayable route_lifetime_timer; -static K_MUTEX_DEFINE(lock); +K_MUTEX_STATIC_DEFINE(lock); static void net_route_nexthop_remove(struct net_nbr *nbr) { diff --git a/subsys/net/ip/tcp.c b/subsys/net/ip/tcp.c index 73356d5e49b12..a046e2a540d84 100644 --- a/subsys/net/ip/tcp.c +++ b/subsys/net/ip/tcp.c @@ -50,7 +50,7 @@ static int tcp_window = static sys_slist_t tcp_conns = SYS_SLIST_STATIC_INIT(&tcp_conns); -static K_MUTEX_DEFINE(tcp_lock); +K_MUTEX_STATIC_DEFINE(tcp_lock); K_MEM_SLAB_DEFINE_STATIC(tcp_conns_slab, sizeof(struct tcp), CONFIG_NET_MAX_CONTEXTS, 4); diff --git a/subsys/net/lib/capture/capture.c b/subsys/net/lib/capture/capture.c index 91ed775ad4588..03ba2af6196a8 100644 --- a/subsys/net/lib/capture/capture.c +++ b/subsys/net/lib/capture/capture.c @@ -33,7 +33,7 @@ LOG_MODULE_REGISTER(net_capture, CONFIG_NET_CAPTURE_LOG_LEVEL); #define DEBUG_TX 0 #endif -static K_MUTEX_DEFINE(lock); +K_MUTEX_STATIC_DEFINE(lock); NET_PKT_SLAB_DEFINE(capture_pkts, CONFIG_NET_CAPTURE_PKT_COUNT); diff --git a/subsys/net/lib/config/init.c b/subsys/net/lib/config/init.c index e0489ab8b266c..79e9bbc686ce3 100644 --- a/subsys/net/lib/config/init.c +++ b/subsys/net/lib/config/init.c @@ -31,8 +31,8 @@ LOG_MODULE_REGISTER(net_config, CONFIG_NET_CONFIG_LOG_LEVEL); extern const struct log_backend *log_backend_net_get(void); extern int net_init_clock_via_sntp(void); -static K_SEM_DEFINE(waiter, 0, 1); -static K_SEM_DEFINE(counter, 0, UINT_MAX); +K_SEM_STATIC_DEFINE(waiter, 0, 1); +K_SEM_STATIC_DEFINE(counter, 0, UINT_MAX); static atomic_t services_flags; #if defined(CONFIG_NET_NATIVE) diff --git a/subsys/net/lib/lwm2m/lwm2m_pull_context.c b/subsys/net/lib/lwm2m/lwm2m_pull_context.c index e1eac08554ff5..43c02ed6f37c9 100644 --- a/subsys/net/lib/lwm2m/lwm2m_pull_context.c +++ b/subsys/net/lib/lwm2m/lwm2m_pull_context.c @@ -21,7 +21,7 @@ LOG_MODULE_REGISTER(LOG_MODULE_NAME); #include "lwm2m_pull_context.h" #include "lwm2m_engine.h" -static K_SEM_DEFINE(lwm2m_pull_sem, 1, 1); +K_SEM_STATIC_DEFINE(lwm2m_pull_sem, 1, 1); #if defined(CONFIG_LWM2M_FIRMWARE_UPDATE_PULL_COAP_PROXY_SUPPORT) #define COAP2COAP_PROXY_URI_PATH "coap2coap" diff --git a/subsys/net/lib/lwm2m/lwm2m_registry.c b/subsys/net/lib/lwm2m/lwm2m_registry.c index 13ab78e8963fc..05ef14b235b29 100644 --- a/subsys/net/lib/lwm2m/lwm2m_registry.c +++ b/subsys/net/lib/lwm2m/lwm2m_registry.c @@ -43,7 +43,7 @@ LOG_MODULE_REGISTER(LOG_MODULE_NAME); #define QUEUE_OPT_MAX_LEN 2 /* "Q" */ /* Thread safety */ -static K_MUTEX_DEFINE(registry_lock); +K_MUTEX_STATIC_DEFINE(registry_lock); void lwm2m_registry_lock(void) { diff --git a/subsys/net/lib/sockets/socket_dispatcher.c b/subsys/net/lib/sockets/socket_dispatcher.c index df30defd8d2fc..a5c8e54ac1059 100644 --- a/subsys/net/lib/sockets/socket_dispatcher.c +++ b/subsys/net/lib/sockets/socket_dispatcher.c @@ -22,7 +22,7 @@ __net_socket struct dispatcher_context { static struct dispatcher_context dispatcher_context[CONFIG_NET_SOCKETS_OFFLOAD_DISPATCHER_CONTEXT_MAX]; -static K_MUTEX_DEFINE(dispatcher_lock); +K_MUTEX_STATIC_DEFINE(dispatcher_lock); static int sock_dispatch_create(int family, int type, int proto); diff --git a/subsys/portability/cmsis_rtos_v1/cmsis_semaphore.c b/subsys/portability/cmsis_rtos_v1/cmsis_semaphore.c index dbf21f61acbf9..16011befd7959 100644 --- a/subsys/portability/cmsis_rtos_v1/cmsis_semaphore.c +++ b/subsys/portability/cmsis_rtos_v1/cmsis_semaphore.c @@ -88,7 +88,7 @@ osStatus osSemaphoreRelease(osSemaphoreId semaphore_id) } /* All tokens have already been released */ - if (k_sem_count_get(semaphore) == semaphore->limit) { + if (k_sem_count_get(semaphore) == Z_PAIR_ZYNC(&semaphore->zp)->cfg.max_val) { return osErrorResource; } diff --git a/subsys/portability/cmsis_rtos_v2/mutex.c b/subsys/portability/cmsis_rtos_v2/mutex.c index d29b4c7fdc223..36d69de99caf3 100644 --- a/subsys/portability/cmsis_rtos_v2/mutex.c +++ b/subsys/portability/cmsis_rtos_v2/mutex.c @@ -147,12 +147,7 @@ osThreadId_t osMutexGetOwner(osMutexId_t mutex_id) return NULL; } - /* Mutex was not obtained before */ - if (mutex->z_mutex.lock_count == 0U) { - return NULL; - } - - return get_cmsis_thread_id(mutex->z_mutex.owner); + return get_cmsis_thread_id(Z_PAIR_ZYNC(&mutex->z_mutex.zp)->owner); } const char *osMutexGetName(osMutexId_t mutex_id) diff --git a/subsys/portability/cmsis_rtos_v2/semaphore.c b/subsys/portability/cmsis_rtos_v2/semaphore.c index a4271f0527ccc..a6ad23ba5973e 100644 --- a/subsys/portability/cmsis_rtos_v2/semaphore.c +++ b/subsys/portability/cmsis_rtos_v2/semaphore.c @@ -113,7 +113,7 @@ osStatus_t osSemaphoreRelease(osSemaphoreId_t semaphore_id) /* All tokens have already been released */ if (k_sem_count_get(&semaphore->z_semaphore) == - semaphore->z_semaphore.limit) { + Z_PAIR_ZYNC(&semaphore->z_semaphore.zp)->cfg.max_val) { return osErrorResource; } diff --git a/subsys/random/rand32_ctr_drbg.c b/subsys/random/rand32_ctr_drbg.c index 7d4b858a0686d..0dfe55a6a2ebd 100644 --- a/subsys/random/rand32_ctr_drbg.c +++ b/subsys/random/rand32_ctr_drbg.c @@ -26,7 +26,7 @@ #endif /* CONFIG_MBEDTLS */ -static K_SEM_DEFINE(state_sem, 1, 1); +K_SEM_STATIC_DEFINE(state_sem, 1, 1); /* * entropy_dev is initialized at runtime to allow first time initialization diff --git a/subsys/testsuite/ztest/src/ztest_new.c b/subsys/testsuite/ztest/src/ztest_new.c index 4d0821111937a..b956be02ca208 100644 --- a/subsys/testsuite/ztest/src/ztest_new.c +++ b/subsys/testsuite/ztest/src/ztest_new.c @@ -110,8 +110,7 @@ static int cleanup_test(struct ztest_unit_test *test) #define CPUHOLD_STACK_SZ (512 + CONFIG_TEST_EXTRA_STACK_SIZE) static struct k_thread cpuhold_threads[MAX_NUM_CPUHOLD]; K_KERNEL_STACK_ARRAY_DEFINE(cpuhold_stacks, MAX_NUM_CPUHOLD, CPUHOLD_STACK_SZ); - -static struct k_sem cpuhold_sem; +K_SEM_DEFINE(cpuhold_sem, 0, K_SEM_MAX_LIMIT); volatile int cpuhold_active; /* "Holds" a CPU for use with the "1cpu" test cases. Note that we diff --git a/subsys/tracing/tracing_core.c b/subsys/tracing/tracing_core.c index 62112ab714506..6727d49e84fe9 100644 --- a/subsys/tracing/tracing_core.c +++ b/subsys/tracing/tracing_core.c @@ -48,7 +48,7 @@ static struct tracing_backend *working_backend; static k_tid_t tracing_thread_tid; static struct k_thread tracing_thread; static struct k_timer tracing_thread_timer; -static K_SEM_DEFINE(tracing_thread_sem, 0, 1); +K_SEM_STATIC_DEFINE(tracing_thread_sem, 0, 1); static K_THREAD_STACK_DEFINE(tracing_thread_stack, CONFIG_TRACING_THREAD_STACK_SIZE); diff --git a/tests/bluetooth/bsim_bt/bsim_test_adv_chain/src/main.c b/tests/bluetooth/bsim_bt/bsim_test_adv_chain/src/main.c index feb3c255d5ecc..c49086647eed7 100644 --- a/tests/bluetooth/bsim_bt/bsim_test_adv_chain/src/main.c +++ b/tests/bluetooth/bsim_bt/bsim_test_adv_chain/src/main.c @@ -38,7 +38,7 @@ BT_AD_DATA_MFG_DATA_SIZE), \ CONFIG_BT_CTLR_ADV_DATA_LEN_MAX) -static K_SEM_DEFINE(sem_recv, 0, 1); +K_SEM_STATIC_DEFINE(sem_recv, 0, 1); extern enum bst_result_t bst_result; diff --git a/tests/bluetooth/bsim_bt/bsim_test_audio/src/broadcast_sink_test.c b/tests/bluetooth/bsim_bt/bsim_test_audio/src/broadcast_sink_test.c index 12837f994e09a..df956976fb687 100644 --- a/tests/bluetooth/bsim_bt/bsim_test_audio/src/broadcast_sink_test.c +++ b/tests/bluetooth/bsim_bt/bsim_test_audio/src/broadcast_sink_test.c @@ -27,8 +27,8 @@ static struct bt_audio_lc3_preset preset_16_2_1 = BT_AUDIO_LC3_BROADCAST_PRESET_16_2_1(BT_AUDIO_LOCATION_FRONT_LEFT, BT_AUDIO_CONTEXT_TYPE_UNSPECIFIED); -static K_SEM_DEFINE(sem_started, 0U, ARRAY_SIZE(streams)); -static K_SEM_DEFINE(sem_stopped, 0U, ARRAY_SIZE(streams)); +K_SEM_STATIC_DEFINE(sem_started, 0U, ARRAY_SIZE(streams)); +K_SEM_STATIC_DEFINE(sem_stopped, 0U, ARRAY_SIZE(streams)); /* Create a mask for the maximum BIS we can sync to using the number of streams * we have. We add an additional 1 since the bis indexes start from 1 and not diff --git a/tests/bluetooth/bsim_bt/bsim_test_audio/src/broadcast_source_test.c b/tests/bluetooth/bsim_bt/bsim_test_audio/src/broadcast_source_test.c index 6dfc5abe9d665..0171f5ef8e7f0 100644 --- a/tests/bluetooth/bsim_bt/bsim_test_audio/src/broadcast_source_test.c +++ b/tests/bluetooth/bsim_bt/bsim_test_audio/src/broadcast_source_test.c @@ -35,8 +35,8 @@ static struct bt_audio_lc3_preset preset_16_2_2 = BT_AUDIO_CONTEXT_TYPE_UNSPECIFIED); CREATE_FLAG(flag_stopping); -static K_SEM_DEFINE(sem_started, 0U, ARRAY_SIZE(streams)); -static K_SEM_DEFINE(sem_stopped, 0U, ARRAY_SIZE(streams)); +K_SEM_STATIC_DEFINE(sem_started, 0U, ARRAY_SIZE(streams)); +K_SEM_STATIC_DEFINE(sem_stopped, 0U, ARRAY_SIZE(streams)); static void started_cb(struct bt_audio_stream *stream) { diff --git a/tests/bluetooth/bsim_bt/bsim_test_l2cap/src/main_l2cap_ecred.c b/tests/bluetooth/bsim_bt/bsim_test_l2cap/src/main_l2cap_ecred.c index 19a18712508bd..79e9d2a701aa5 100644 --- a/tests/bluetooth/bsim_bt/bsim_test_l2cap/src/main_l2cap_ecred.c +++ b/tests/bluetooth/bsim_bt/bsim_test_l2cap/src/main_l2cap_ecred.c @@ -59,10 +59,10 @@ CREATE_FLAG(unsequenced_data); #define T_PRIORITY 5 static K_THREAD_STACK_ARRAY_DEFINE(stack_area, L2CAP_CHANNELS, T_STACK_SIZE); -static K_SEM_DEFINE(chan_conn_sem, 0, L2CAP_CHANNELS); -static K_SEM_DEFINE(all_chan_conn_sem, 0, 1); -static K_SEM_DEFINE(all_chan_disconn_sem, 0, 1); -static K_SEM_DEFINE(sent_sem, 0, L2CAP_CHANNELS); +K_SEM_STATIC_DEFINE(chan_conn_sem, 0, L2CAP_CHANNELS); +K_SEM_STATIC_DEFINE(all_chan_conn_sem, 0, 1); +K_SEM_STATIC_DEFINE(all_chan_disconn_sem, 0, 1); +K_SEM_STATIC_DEFINE(sent_sem, 0, L2CAP_CHANNELS); static void init_workqs(void) { diff --git a/tests/bluetooth/bsim_bt/bsim_test_mesh/src/test_advertiser.c b/tests/bluetooth/bsim_bt/bsim_test_mesh/src/test_advertiser.c index ec6b13eb08688..f942fe727a358 100644 --- a/tests/bluetooth/bsim_bt/bsim_test_mesh/src/test_advertiser.c +++ b/tests/bluetooth/bsim_bt/bsim_test_mesh/src/test_advertiser.c @@ -54,7 +54,7 @@ static struct bt_mesh_test_gatt gatt_param; static int num_adv_sent; static uint8_t previous_checker = 0xff; -static K_SEM_DEFINE(observer_sem, 0, 1); +K_SEM_STATIC_DEFINE(observer_sem, 0, 1); static void test_tx_init(void) { diff --git a/tests/bluetooth/hci_prop_evt/src/main.c b/tests/bluetooth/hci_prop_evt/src/main.c index 978d3f95d911b..55e28860913dd 100644 --- a/tests/bluetooth/hci_prop_evt/src/main.c +++ b/tests/bluetooth/hci_prop_evt/src/main.c @@ -275,7 +275,7 @@ static void bt_recv_job_submit(struct net_buf *buf) } /* Semaphore to test if the prop callback was called. */ -static K_SEM_DEFINE(prop_cb_sem, 0, 1); +K_SEM_STATIC_DEFINE(prop_cb_sem, 0, 1); /* Used to verify prop event data. */ static uint8_t *prop_cb_data; diff --git a/tests/bluetooth/host_long_adv_recv/src/main.c b/tests/bluetooth/host_long_adv_recv/src/main.c index 2e1b2b7a7f218..431acba44e7ef 100644 --- a/tests/bluetooth/host_long_adv_recv/src/main.c +++ b/tests/bluetooth/host_long_adv_recv/src/main.c @@ -285,7 +285,7 @@ static void bt_recv_job_submit(struct net_buf *buf) } /* Semaphore to test if the prop callback was called. */ -static K_SEM_DEFINE(prop_cb_sem, 0, 1); +K_SEM_STATIC_DEFINE(prop_cb_sem, 0, 1); static void *adv_report_evt(struct net_buf *buf, uint8_t data_len, uint16_t evt_type, const bt_addr_le_t *const addr) diff --git a/tests/boards/intel_adsp/smoke/src/ipm.c b/tests/boards/intel_adsp/smoke/src/ipm.c index f16cab35fedbc..aa828fa2d2cc1 100644 --- a/tests/boards/intel_adsp/smoke/src/ipm.c +++ b/tests/boards/intel_adsp/smoke/src/ipm.c @@ -11,7 +11,7 @@ #define ID_INBOUND 0xfffffff0 #define ID_INVALID 0xffffffff -static K_SEM_DEFINE(ipm_sem, 0, 1); +K_SEM_STATIC_DEFINE(ipm_sem, 0, 1); static const uint32_t msg[] = { 29, 15, 58, 71, 99 }; diff --git a/tests/drivers/spi/spi_loopback/src/spi.c b/tests/drivers/spi/spi_loopback/src/spi.c index 5390f9a69bdb8..775dc45300039 100644 --- a/tests/drivers/spi/spi_loopback/src/spi.c +++ b/tests/drivers/spi/spi_loopback/src/spi.c @@ -429,7 +429,7 @@ static struct k_poll_event async_evt = K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SIGNAL, K_POLL_MODE_NOTIFY_ONLY, &async_sig); -static K_SEM_DEFINE(caller, 0, 1); +K_SEM_STATIC_DEFINE(caller, 0, 1); K_THREAD_STACK_DEFINE(spi_async_stack, STACK_SIZE); static int result = 1; diff --git a/tests/kernel/condvar/condvar_api/prj.conf b/tests/kernel/condvar/condvar_api/prj.conf index 3fe44daafe588..494f00d915868 100644 --- a/tests/kernel/condvar/condvar_api/prj.conf +++ b/tests/kernel/condvar/condvar_api/prj.conf @@ -4,3 +4,4 @@ CONFIG_TEST_USERSPACE=y CONFIG_ZTEST_FATAL_HOOK=y CONFIG_MP_MAX_NUM_CPUS=1 CONFIG_ZTEST_NEW_API=y +CONFIG_ZYNC_USERSPACE_COMPAT=y diff --git a/tests/kernel/device/prj.conf b/tests/kernel/device/prj.conf index bf4d69ece2085..0de7a57b2a190 100644 --- a/tests/kernel/device/prj.conf +++ b/tests/kernel/device/prj.conf @@ -3,3 +3,4 @@ CONFIG_TEST_USERSPACE=y CONFIG_PM_DEVICE=y CONFIG_PM_DEVICE_RUNTIME=y CONFIG_ZTEST_NEW_API=y +CONFIG_ZYNC_USERSPACE_COMPAT=y diff --git a/tests/kernel/events/event_api/src/test_event_apis.c b/tests/kernel/events/event_api/src/test_event_apis.c index f4eb0f80fc2e9..3094f862a113d 100644 --- a/tests/kernel/events/event_api/src/test_event_apis.c +++ b/tests/kernel/events/event_api/src/test_event_apis.c @@ -24,8 +24,8 @@ static K_THREAD_STACK_DEFINE(sextra2, STACK_SIZE); static K_EVENT_DEFINE(test_event); static K_EVENT_DEFINE(sync_event); -static K_SEM_DEFINE(receiver_sem, 0, 1); -static K_SEM_DEFINE(sync_sem, 0, 1); +K_SEM_STATIC_DEFINE(receiver_sem, 0, 1); +K_SEM_STATIC_DEFINE(sync_sem, 0, 1); volatile static uint32_t test_events; diff --git a/tests/kernel/events/sys_event/src/main.c b/tests/kernel/events/sys_event/src/main.c index 0dc34fb6fc1e8..d46c7c429eacb 100644 --- a/tests/kernel/events/sys_event/src/main.c +++ b/tests/kernel/events/sys_event/src/main.c @@ -28,8 +28,8 @@ static K_EVENT_DEFINE(sync_event); static struct k_event init_event; static struct k_event deliver_event; -static K_SEM_DEFINE(receiver_sem, 0, 1); -static K_SEM_DEFINE(sync_sem, 0, 1); +K_SEM_STATIC_DEFINE(receiver_sem, 0, 1); +K_SEM_STATIC_DEFINE(sync_sem, 0, 1); volatile static uint32_t test_events; diff --git a/tests/kernel/fpu_sharing/generic/src/load_store.c b/tests/kernel/fpu_sharing/generic/src/load_store.c index d1ff353f46cdc..5214053bf2315 100644 --- a/tests/kernel/fpu_sharing/generic/src/load_store.c +++ b/tests/kernel/fpu_sharing/generic/src/load_store.c @@ -93,7 +93,7 @@ static volatile unsigned int load_store_high_count; static volatile bool test_exited; /* Semaphore for signaling end of test */ -static K_SEM_DEFINE(test_exit_sem, 0, 1); +K_SEM_STATIC_DEFINE(test_exit_sem, 0, 1); /** * @brief Low priority FPU load/store thread diff --git a/tests/kernel/fpu_sharing/generic/src/pi.c b/tests/kernel/fpu_sharing/generic/src/pi.c index 48491cd465171..417f83ea4cbfe 100644 --- a/tests/kernel/fpu_sharing/generic/src/pi.c +++ b/tests/kernel/fpu_sharing/generic/src/pi.c @@ -55,7 +55,7 @@ static volatile unsigned int calc_pi_high_count; static volatile bool test_exited; /* Semaphore for signaling end of test */ -static K_SEM_DEFINE(test_exit_sem, 0, 1); +K_SEM_STATIC_DEFINE(test_exit_sem, 0, 1); /** * @brief Entry point for the low priority pi compute task diff --git a/tests/kernel/mem_protect/futex/prj.conf b/tests/kernel/mem_protect/futex/prj.conf index d8e436e42a277..96ae8ce058836 100644 --- a/tests/kernel/mem_protect/futex/prj.conf +++ b/tests/kernel/mem_protect/futex/prj.conf @@ -3,3 +3,4 @@ CONFIG_ZTEST_NEW_API=y CONFIG_IRQ_OFFLOAD=y CONFIG_TEST_USERSPACE=y CONFIG_MP_MAX_NUM_CPUS=1 +CONFIG_ZYNC_USERSPACE_COMPAT=y diff --git a/tests/kernel/mem_protect/mem_protect/prj.conf b/tests/kernel/mem_protect/mem_protect/prj.conf index 26e3e5c04df69..87e0c932be8f8 100644 --- a/tests/kernel/mem_protect/mem_protect/prj.conf +++ b/tests/kernel/mem_protect/mem_protect/prj.conf @@ -6,3 +6,5 @@ CONFIG_MAX_THREAD_BYTES=4 CONFIG_TEST_USERSPACE=y CONFIG_APPLICATION_DEFINED_SYSCALL=y CONFIG_PIPES=y +CONFIG_ZYNC_MAX_VAL=y +CONFIG_ZYNC_PRIO_BOOST=y diff --git a/tests/kernel/mem_protect/mem_protect/src/mem_domain.c b/tests/kernel/mem_protect/mem_protect/src/mem_domain.c index 57a25f00ff440..d7fd657c03e06 100644 --- a/tests/kernel/mem_protect/mem_protect/src/mem_domain.c +++ b/tests/kernel/mem_protect/mem_protect/src/mem_domain.c @@ -294,7 +294,7 @@ ZTEST(mem_protect_domain, test_mem_domain_boot_threads) } static ZTEST_BMEM volatile bool spin_done; -static K_SEM_DEFINE(spin_sem, 0, 1); +K_SEM_STATIC_DEFINE(spin_sem, 0, 1); static void spin_entry(void *p1, void *p2, void *p3) { diff --git a/tests/kernel/mem_protect/obj_validation/prj.conf b/tests/kernel/mem_protect/obj_validation/prj.conf index 99a7ff95e16b2..2bf7d5eb5c320 100644 --- a/tests/kernel/mem_protect/obj_validation/prj.conf +++ b/tests/kernel/mem_protect/obj_validation/prj.conf @@ -3,3 +3,4 @@ CONFIG_ZTEST_NEW_API=y CONFIG_TEST_USERSPACE=y CONFIG_DYNAMIC_OBJECTS=y CONFIG_HEAP_MEM_POOL_SIZE=8192 +CONFIG_ZYNC_USERSPACE_COMPAT=y diff --git a/tests/kernel/mem_protect/sys_sem/prj.conf b/tests/kernel/mem_protect/sys_sem/prj.conf index 57b97c472ee4d..f6cb427c77e42 100644 --- a/tests/kernel/mem_protect/sys_sem/prj.conf +++ b/tests/kernel/mem_protect/sys_sem/prj.conf @@ -2,3 +2,4 @@ CONFIG_ZTEST=y CONFIG_ZTEST_NEW_API=y CONFIG_IRQ_OFFLOAD=y CONFIG_TEST_USERSPACE=y +CONFIG_ZYNC_MAX_VAL=y diff --git a/tests/kernel/mem_protect/userspace/prj.conf b/tests/kernel/mem_protect/userspace/prj.conf index 471a4af8f4153..1d002f3c16570 100644 --- a/tests/kernel/mem_protect/userspace/prj.conf +++ b/tests/kernel/mem_protect/userspace/prj.conf @@ -5,3 +5,4 @@ CONFIG_INIT_STACKS=y CONFIG_APPLICATION_DEFINED_SYSCALL=y CONFIG_TEST_USERSPACE=y CONFIG_PIPES=y +CONFIG_ZYNC_USERSPACE_COMPAT=y diff --git a/tests/kernel/msgq/msgq_api/prj.conf b/tests/kernel/msgq/msgq_api/prj.conf index 57b97c472ee4d..f6cb427c77e42 100644 --- a/tests/kernel/msgq/msgq_api/prj.conf +++ b/tests/kernel/msgq/msgq_api/prj.conf @@ -2,3 +2,4 @@ CONFIG_ZTEST=y CONFIG_ZTEST_NEW_API=y CONFIG_IRQ_OFFLOAD=y CONFIG_TEST_USERSPACE=y +CONFIG_ZYNC_MAX_VAL=y diff --git a/tests/kernel/mutex/mutex_api/prj.conf b/tests/kernel/mutex/mutex_api/prj.conf index d8e436e42a277..e9c38872657c2 100644 --- a/tests/kernel/mutex/mutex_api/prj.conf +++ b/tests/kernel/mutex/mutex_api/prj.conf @@ -3,3 +3,5 @@ CONFIG_ZTEST_NEW_API=y CONFIG_IRQ_OFFLOAD=y CONFIG_TEST_USERSPACE=y CONFIG_MP_MAX_NUM_CPUS=1 +CONFIG_ZYNC_RECURSIVE=y +CONFIG_ZYNC_PRIO_BOOST=y diff --git a/tests/kernel/mutex/mutex_api/src/test_mutex_apis.c b/tests/kernel/mutex/mutex_api/src/test_mutex_apis.c index 171bac0fcf765..32abd5b4237b4 100644 --- a/tests/kernel/mutex/mutex_api/src/test_mutex_apis.c +++ b/tests/kernel/mutex/mutex_api/src/test_mutex_apis.c @@ -248,8 +248,11 @@ ZTEST_USER(mutex_api, test_mutex_recursive) k_mutex_init(&mutex); /**TESTPOINT: when mutex has no owner, we cannot unlock it */ +#ifndef CONFIG_ZYNC_VALIDATE + /* This is now an assertion w/zync that will blow up the test */ zassert_true(k_mutex_unlock(&mutex) == -EINVAL, "fail: mutex has no owner"); +#endif zassert_true(k_mutex_lock(&mutex, K_NO_WAIT) == 0, "Failed to lock mutex"); @@ -313,9 +316,11 @@ ZTEST_USER(mutex_api_1cpu, test_mutex_priority_inheritance) /* wait for spawn thread t1 to take action */ k_msleep(TIMEOUT); +#ifndef CONFIG_ZYNC_VALIDATE /**TESTPOINT: The current thread does not own the mutex.*/ zassert_true(k_mutex_unlock(&mutex) == -EPERM, "fail: current thread does not own the mutex"); +#endif /* spawn a higher priority thread t2 for holding the mutex */ k_thread_create(&tdata2, tstack2, STACK_SIZE, diff --git a/tests/kernel/mutex/mutex_error_case/prj.conf b/tests/kernel/mutex/mutex_error_case/prj.conf index e9ab90559cad2..0ae3f3e64053b 100644 --- a/tests/kernel/mutex/mutex_error_case/prj.conf +++ b/tests/kernel/mutex/mutex_error_case/prj.conf @@ -5,3 +5,4 @@ CONFIG_TEST_USERSPACE=y CONFIG_MP_MAX_NUM_CPUS=1 CONFIG_ZTEST_FATAL_HOOK=y CONFIG_PIPES=y +CONFIG_ZYNC_PRIO_BOOST=y diff --git a/tests/kernel/mutex/sys_mutex/prj.conf b/tests/kernel/mutex/sys_mutex/prj.conf index 19669cdffb6e5..35ab65a499ec6 100644 --- a/tests/kernel/mutex/sys_mutex/prj.conf +++ b/tests/kernel/mutex/sys_mutex/prj.conf @@ -2,3 +2,7 @@ CONFIG_MAIN_THREAD_PRIORITY=10 CONFIG_ZTEST=y CONFIG_ZTEST_NEW_API=y CONFIG_TEST_USERSPACE=y +CONFIG_ZYNC_USERSPACE_COMPAT=y +CONFIG_ZYNC_PRIO_BOOST=y +CONFIG_ZYNC_RECURSIVE=y +CONFIG_ZYNC_VALIDATE=n diff --git a/tests/kernel/pipe/pipe/prj.conf b/tests/kernel/pipe/pipe/prj.conf index 36ceb4e45010a..2c25131c4d6d7 100644 --- a/tests/kernel/pipe/pipe/prj.conf +++ b/tests/kernel/pipe/pipe/prj.conf @@ -3,3 +3,4 @@ CONFIG_TEST_USERSPACE=y CONFIG_MP_MAX_NUM_CPUS=1 CONFIG_ZTEST_NEW_API=y CONFIG_PIPES=y +CONFIG_ZYNC_USERSPACE_COMPAT=y diff --git a/tests/kernel/pipe/pipe_api/prj.conf b/tests/kernel/pipe/pipe_api/prj.conf index 8afa8604acad6..304459689726d 100644 --- a/tests/kernel/pipe/pipe_api/prj.conf +++ b/tests/kernel/pipe/pipe_api/prj.conf @@ -6,3 +6,4 @@ CONFIG_MP_MAX_NUM_CPUS=1 CONFIG_ZTEST_FATAL_HOOK=y CONFIG_ZTEST_NEW_API=y CONFIG_PIPES=y +CONFIG_ZYNC_USERSPACE_COMPAT=y diff --git a/tests/kernel/poll/prj.conf b/tests/kernel/poll/prj.conf index 43621e0d7eb52..9f8cd4c9cb8ae 100644 --- a/tests/kernel/poll/prj.conf +++ b/tests/kernel/poll/prj.conf @@ -7,3 +7,5 @@ CONFIG_MP_MAX_NUM_CPUS=1 CONFIG_ZTEST_FATAL_HOOK=y CONFIG_ZTEST_ASSERT_HOOK=y CONFIG_SYS_CLOCK_EXISTS=y +CONFIG_ZYNC_USERSPACE_COMPAT=y +CONFIG_ZYNC_MAX_VAL=y diff --git a/tests/kernel/poll/src/test_poll.c b/tests/kernel/poll/src/test_poll.c index 3faa591826a1e..4e3e4345bb2b7 100644 --- a/tests/kernel/poll/src/test_poll.c +++ b/tests/kernel/poll/src/test_poll.c @@ -178,7 +178,7 @@ ZTEST_USER(poll_api_1cpu, test_poll_no_wait) static struct k_msgq wait_msgq; static struct k_msgq *wait_msgq_ptr; -static K_SEM_DEFINE(wait_sem, 0, 1); +K_SEM_STATIC_DEFINE(wait_sem, 0, 1); static K_FIFO_DEFINE(wait_fifo); static struct k_poll_signal wait_signal = K_POLL_SIGNAL_INITIALIZER(wait_signal); @@ -599,7 +599,7 @@ ZTEST(poll_api_1cpu, test_poll_cancel_main_high_prio) } /* verify multiple pollers */ -static K_SEM_DEFINE(multi_sem, 0, 1); +K_SEM_STATIC_DEFINE(multi_sem, 0, 1); static void multi_lowprio(void *p1, void *p2, void *p3) { @@ -616,7 +616,7 @@ static void multi_lowprio(void *p1, void *p2, void *p3) zassert_equal(rc, 0, ""); } -static K_SEM_DEFINE(multi_reply, 0, 1); +K_SEM_STATIC_DEFINE(multi_reply, 0, 1); static void multi(void *p1, void *p2, void *p3) { @@ -632,7 +632,7 @@ static void multi(void *p1, void *p2, void *p3) k_sem_give(&multi_reply); } -static K_SEM_DEFINE(multi_ready_sem, 1, 1); +K_SEM_STATIC_DEFINE(multi_ready_sem, 1, 1); /** * @brief Test polling of multiple events diff --git a/tests/kernel/queue/prj.conf b/tests/kernel/queue/prj.conf index 44334ab367d36..51a82bfef92e4 100644 --- a/tests/kernel/queue/prj.conf +++ b/tests/kernel/queue/prj.conf @@ -3,3 +3,4 @@ CONFIG_ZTEST_NEW_API=y CONFIG_IRQ_OFFLOAD=y CONFIG_TEST_USERSPACE=y CONFIG_ZTEST_FATAL_HOOK=y +CONFIG_ZYNC_USERSPACE_COMPAT=y diff --git a/tests/kernel/sched/schedule_api/prj.conf b/tests/kernel/sched/schedule_api/prj.conf index b24271c4232c4..c918611bde6b6 100644 --- a/tests/kernel/sched/schedule_api/prj.conf +++ b/tests/kernel/sched/schedule_api/prj.conf @@ -8,3 +8,4 @@ CONFIG_MAX_THREAD_BYTES=5 CONFIG_TEST_USERSPACE=y CONFIG_MP_MAX_NUM_CPUS=1 CONFIG_ZTEST_FATAL_HOOK=y +CONFIG_ZYNC_USERSPACE_COMPAT=y diff --git a/tests/kernel/sched/schedule_api/prj_dumb.conf b/tests/kernel/sched/schedule_api/prj_dumb.conf index 9d996d2c9fb24..48f6e6fdf422f 100644 --- a/tests/kernel/sched/schedule_api/prj_dumb.conf +++ b/tests/kernel/sched/schedule_api/prj_dumb.conf @@ -6,3 +6,4 @@ CONFIG_SCHED_DUMB=y CONFIG_MAX_THREAD_BYTES=5 CONFIG_MP_MAX_NUM_CPUS=1 CONFIG_ZTEST_FATAL_HOOK=y +CONFIG_ZYNC_USERSPACE_COMPAT=y diff --git a/tests/kernel/sched/schedule_api/prj_multiq.conf b/tests/kernel/sched/schedule_api/prj_multiq.conf index d1074c0758574..afa821f82f7eb 100644 --- a/tests/kernel/sched/schedule_api/prj_multiq.conf +++ b/tests/kernel/sched/schedule_api/prj_multiq.conf @@ -6,3 +6,4 @@ CONFIG_SCHED_MULTIQ=y CONFIG_MAX_THREAD_BYTES=5 CONFIG_MP_MAX_NUM_CPUS=1 CONFIG_ZTEST_FATAL_HOOK=y +CONFIG_ZYNC_USERSPACE_COMPAT=y diff --git a/tests/kernel/sched/schedule_api/src/test_priority_scheduling.c b/tests/kernel/sched/schedule_api/src/test_priority_scheduling.c index a851544107377..7772ce983d60e 100644 --- a/tests/kernel/sched/schedule_api/src/test_priority_scheduling.c +++ b/tests/kernel/sched/schedule_api/src/test_priority_scheduling.c @@ -22,13 +22,13 @@ BUILD_ASSERT(NUM_THREAD <= MAX_NUM_THREAD); /* Semaphore on which Ztest thread wait */ -static K_SEM_DEFINE(sema2, 0, NUM_THREAD); +K_SEM_STATIC_DEFINE(sema2, 0, NUM_THREAD); /* Semaphore on which application threads wait */ -static K_SEM_DEFINE(sema3, 0, NUM_THREAD); +K_SEM_STATIC_DEFINE(sema3, 0, NUM_THREAD); /* Semaphore to flag the next iteration */ -static K_SEM_DEFINE(sema4, 0, NUM_THREAD); +K_SEM_STATIC_DEFINE(sema4, 0, NUM_THREAD); static int thread_idx; static struct k_thread t[NUM_THREAD]; diff --git a/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c b/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c index af2d1ff9884e7..461549e896fbb 100644 --- a/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c +++ b/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c @@ -30,7 +30,7 @@ BUILD_ASSERT(NUM_THREAD <= MAX_NUM_THREAD); #define BUSY_MS (SLICE_SIZE + 20) static struct k_thread t[NUM_THREAD]; -static K_SEM_DEFINE(sema1, 0, NUM_THREAD); +K_SEM_STATIC_DEFINE(sema1, 0, NUM_THREAD); /* elapsed_slice taken by last thread */ static int64_t elapsed_slice; @@ -144,7 +144,7 @@ ZTEST(threads_scheduling, test_slice_scheduling) static volatile int32_t perthread_count; static volatile uint32_t last_cyc; static volatile bool perthread_running; -static K_SEM_DEFINE(perthread_sem, 0, 1); +K_SEM_STATIC_DEFINE(perthread_sem, 0, 1); static void slice_expired(struct k_thread *thread, void *data) { diff --git a/tests/kernel/semaphore/semaphore/prj.conf b/tests/kernel/semaphore/semaphore/prj.conf index c882831edb0e3..6ea632d004db2 100644 --- a/tests/kernel/semaphore/semaphore/prj.conf +++ b/tests/kernel/semaphore/semaphore/prj.conf @@ -4,3 +4,4 @@ CONFIG_IRQ_OFFLOAD=y CONFIG_TEST_USERSPACE=y CONFIG_ZTEST_FATAL_HOOK=y CONFIG_PIPES=y +CONFIG_ZYNC_MAX_VAL=y diff --git a/tests/kernel/semaphore/sys_sem/prj.conf b/tests/kernel/semaphore/sys_sem/prj.conf index 2abfa9b67e93d..744dbc7c41402 100644 --- a/tests/kernel/semaphore/sys_sem/prj.conf +++ b/tests/kernel/semaphore/sys_sem/prj.conf @@ -2,3 +2,4 @@ CONFIG_ZTEST=y CONFIG_ZTEST_NEW_API=y CONFIG_TEST_USERSPACE=y CONFIG_ZTEST_FATAL_HOOK=y +CONFIG_ZYNC_MAX_VAL=y diff --git a/tests/kernel/sleep/prj.conf b/tests/kernel/sleep/prj.conf index d658dc251226b..fabd1f5f5a341 100644 --- a/tests/kernel/sleep/prj.conf +++ b/tests/kernel/sleep/prj.conf @@ -3,3 +3,4 @@ CONFIG_ZTEST=y CONFIG_TEST_USERSPACE=y CONFIG_MP_MAX_NUM_CPUS=1 CONFIG_ZTEST_NEW_API=y +CONFIG_ZYNC_USERSPACE_COMPAT=y diff --git a/tests/kernel/stack/stack/prj.conf b/tests/kernel/stack/stack/prj.conf index 9baa4dcc23c6d..67e3108e77b7d 100644 --- a/tests/kernel/stack/stack/prj.conf +++ b/tests/kernel/stack/stack/prj.conf @@ -4,3 +4,4 @@ CONFIG_TEST_USERSPACE=y CONFIG_MP_MAX_NUM_CPUS=1 CONFIG_ZTEST_FATAL_HOOK=y CONFIG_ZTEST_NEW_API=y +CONFIG_ZYNC_USERSPACE_COMPAT=y diff --git a/tests/kernel/threads/dynamic_thread/prj.conf b/tests/kernel/threads/dynamic_thread/prj.conf index 8174ea885293a..818d7a5ca5f76 100644 --- a/tests/kernel/threads/dynamic_thread/prj.conf +++ b/tests/kernel/threads/dynamic_thread/prj.conf @@ -2,3 +2,4 @@ CONFIG_ZTEST=y CONFIG_ZTEST_NEW_API=y CONFIG_TEST_USERSPACE=y CONFIG_HEAP_MEM_POOL_SIZE=20000 +CONFIG_ZYNC_MAX_VAL=y diff --git a/tests/kernel/threads/dynamic_thread/src/main.c b/tests/kernel/threads/dynamic_thread/src/main.c index 84b9805e76bb5..27652dc0c5743 100644 --- a/tests/kernel/threads/dynamic_thread/src/main.c +++ b/tests/kernel/threads/dynamic_thread/src/main.c @@ -11,8 +11,8 @@ #define STACKSIZE (256 + CONFIG_TEST_EXTRA_STACK_SIZE) static K_THREAD_STACK_DEFINE(dyn_thread_stack, STACKSIZE); -static K_SEM_DEFINE(start_sem, 0, 1); -static K_SEM_DEFINE(end_sem, 0, 1); +K_SEM_STATIC_DEFINE(start_sem, 0, 1); +K_SEM_STATIC_DEFINE(end_sem, 0, 1); static ZTEST_BMEM struct k_thread *dyn_thread; static struct k_thread *dynamic_threads[CONFIG_MAX_THREAD_BYTES * 8]; diff --git a/tests/kernel/threads/thread_init/prj.conf b/tests/kernel/threads/thread_init/prj.conf index e2cf6732b3f60..043b3da875d98 100644 --- a/tests/kernel/threads/thread_init/prj.conf +++ b/tests/kernel/threads/thread_init/prj.conf @@ -2,3 +2,4 @@ CONFIG_ZTEST=y CONFIG_ZTEST_NEW_API=y CONFIG_TEST_USERSPACE=y CONFIG_SMP=n +CONFIG_ZYNC_MAX_VAL=y diff --git a/tests/kernel/workq/user_work/prj.conf b/tests/kernel/workq/user_work/prj.conf index a93770f3515ee..cf5b0d9007042 100644 --- a/tests/kernel/workq/user_work/prj.conf +++ b/tests/kernel/workq/user_work/prj.conf @@ -7,3 +7,4 @@ CONFIG_TEST_USERSPACE=y CONFIG_POLL=y CONFIG_MP_MAX_NUM_CPUS=1 CONFIG_MAX_THREAD_BYTES=3 +CONFIG_ZYNC_USERSPACE_COMPAT=y diff --git a/tests/kernel/workq/work/prj.conf b/tests/kernel/workq/work/prj.conf index 8dda06ff724d8..8ae48463ed6c8 100644 --- a/tests/kernel/workq/work/prj.conf +++ b/tests/kernel/workq/work/prj.conf @@ -9,3 +9,4 @@ CONFIG_NUM_COOP_PRIORITIES=4 CONFIG_NUM_PREEMPT_PRIORITIES=4 CONFIG_SYSTEM_WORKQUEUE_PRIORITY=-3 CONFIG_ZTEST_THREAD_PRIORITY=-2 +CONFIG_ZYNC_MAX_VAL=y diff --git a/tests/lib/mem_alloc/prj.conf b/tests/lib/mem_alloc/prj.conf index 87969fc03ff9c..e0fe42c38b56a 100644 --- a/tests/lib/mem_alloc/prj.conf +++ b/tests/lib/mem_alloc/prj.conf @@ -1,3 +1,4 @@ CONFIG_ZTEST=y CONFIG_MINIMAL_LIBC_MALLOC_ARENA_SIZE=2048 CONFIG_TEST_USERSPACE=y +CONFIG_ZYNC_USERSPACE_COMPAT=y diff --git a/tests/net/checksum_offload/src/main.c b/tests/net/checksum_offload/src/main.c index 90beb4deb3ccb..507dc485599ae 100644 --- a/tests/net/checksum_offload/src/main.c +++ b/tests/net/checksum_offload/src/main.c @@ -80,7 +80,7 @@ static bool test_failed; static bool test_started; static bool start_receiving; -static K_SEM_DEFINE(wait_data, 0, UINT_MAX); +K_SEM_STATIC_DEFINE(wait_data, 0, UINT_MAX); #define WAIT_TIME K_SECONDS(1) diff --git a/tests/net/ptp/clock/src/main.c b/tests/net/ptp/clock/src/main.c index 34cb6e19f3c67..67e38bf806c5c 100644 --- a/tests/net/ptp/clock/src/main.c +++ b/tests/net/ptp/clock/src/main.c @@ -70,7 +70,7 @@ static int non_ptp_interface; static bool test_failed; static bool test_started; -static K_SEM_DEFINE(wait_data, 0, UINT_MAX); +K_SEM_STATIC_DEFINE(wait_data, 0, UINT_MAX); #define WAIT_TIME K_SECONDS(1) diff --git a/tests/net/socket/offload_dispatcher/src/main.c b/tests/net/socket/offload_dispatcher/src/main.c index cf0d9f2d16147..29e6f8deb4423 100644 --- a/tests/net/socket/offload_dispatcher/src/main.c +++ b/tests/net/socket/offload_dispatcher/src/main.c @@ -388,7 +388,7 @@ NET_DEVICE_OFFLOAD_INIT(offloaded_2, "offloaded_2", offloaded_2_init, NULL, static uint8_t lladdr[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 }; static struct in_addr in4addr_my = { { { 192, 0, 2, 1 } } }; -static K_SEM_DEFINE(test_native_send_called, 0, 1); +K_SEM_STATIC_DEFINE(test_native_send_called, 0, 1); static void dummy_native_iface_init(struct net_if *iface) { diff --git a/tests/net/tcp/src/main.c b/tests/net/tcp/src/main.c index 5c2232d2fee33..b23951f1d345b 100644 --- a/tests/net/tcp/src/main.c +++ b/tests/net/tcp/src/main.c @@ -98,7 +98,7 @@ static uint8_t test_case_no; static uint32_t seq; static uint32_t ack; -static K_SEM_DEFINE(test_sem, 0, 1); +K_SEM_STATIC_DEFINE(test_sem, 0, 1); static bool sem; enum test_state { diff --git a/tests/net/tx_timestamp/src/main.c b/tests/net/tx_timestamp/src/main.c index 573dcef58efaf..a93cc7e75f525 100644 --- a/tests/net/tx_timestamp/src/main.c +++ b/tests/net/tx_timestamp/src/main.c @@ -74,7 +74,7 @@ static struct net_if_timestamp_cb timestamp_cb; static struct net_if_timestamp_cb timestamp_cb_2; static struct net_if_timestamp_cb timestamp_cb_3; -static K_SEM_DEFINE(wait_data, 0, UINT_MAX); +K_SEM_STATIC_DEFINE(wait_data, 0, UINT_MAX); #define WAIT_TIME K_SECONDS(1) diff --git a/tests/net/virtual/src/main.c b/tests/net/virtual/src/main.c index da24e1e1c4130..99999e4a1291a 100644 --- a/tests/net/virtual/src/main.c +++ b/tests/net/virtual/src/main.c @@ -84,7 +84,7 @@ static bool test_failed; static bool test_started; static bool data_received; -static K_SEM_DEFINE(wait_data, 0, UINT_MAX); +K_SEM_STATIC_DEFINE(wait_data, 0, UINT_MAX); #define WAIT_TIME K_SECONDS(1) diff --git a/tests/net/vlan/src/main.c b/tests/net/vlan/src/main.c index 7f5781a923150..077722c71ae99 100644 --- a/tests/net/vlan/src/main.c +++ b/tests/net/vlan/src/main.c @@ -81,7 +81,7 @@ static struct net_context *udp_v6_ctx; static bool test_failed; static bool test_started; -static K_SEM_DEFINE(wait_data, 0, UINT_MAX); +K_SEM_STATIC_DEFINE(wait_data, 0, UINT_MAX); #define WAIT_TIME K_SECONDS(1) diff --git a/tests/subsys/logging/log_benchmark/testcase.yaml b/tests/subsys/logging/log_benchmark/testcase.yaml index 71fad9f34c778..6c63fa93551b4 100644 --- a/tests/subsys/logging/log_benchmark/testcase.yaml +++ b/tests/subsys/logging/log_benchmark/testcase.yaml @@ -25,3 +25,4 @@ tests: - CONFIG_LOG_MODE_DEFERRED=y - CONFIG_CBPRINTF_COMPLETE=y - CONFIG_TEST_USERSPACE=y + - CONFIG_ZYNC_USERSPACE_COMPAT=y diff --git a/tests/subsys/logging/log_core_additional/src/log_test.c b/tests/subsys/logging/log_core_additional/src/log_test.c index 90f2b1ebb8c56..2359a9be0ea93 100644 --- a/tests/subsys/logging/log_core_additional/src/log_test.c +++ b/tests/subsys/logging/log_core_additional/src/log_test.c @@ -22,7 +22,7 @@ #define LOG_MODULE_NAME log_test LOG_MODULE_REGISTER(LOG_MODULE_NAME, LOG_LEVEL_INF); -static K_SEM_DEFINE(log_sem, 0, 1); +K_SEM_STATIC_DEFINE(log_sem, 0, 1); #define TIMESTAMP_FREC (2000000) ZTEST_BMEM uint32_t source_id; diff --git a/tests/subsys/portability/cmsis_rtos_v1/prj.conf b/tests/subsys/portability/cmsis_rtos_v1/prj.conf index 4a0cb1c2a2062..df30d60acd88d 100644 --- a/tests/subsys/portability/cmsis_rtos_v1/prj.conf +++ b/tests/subsys/portability/cmsis_rtos_v1/prj.conf @@ -8,3 +8,6 @@ CONFIG_CMSIS_THREAD_MAX_STACK_SIZE=2048 CONFIG_SMP=n CONFIG_THREAD_STACK_INFO=y CONFIG_ZTEST_NEW_API=y + +# This exercises pathological k_mutex state deliberately, don't panic the kernel +CONFIG_ZYNC_VALIDATE=n diff --git a/tests/subsys/portability/cmsis_rtos_v1/src/mutex.c b/tests/subsys/portability/cmsis_rtos_v1/src/mutex.c index f64436f99389c..044adcd6b5c7f 100644 --- a/tests/subsys/portability/cmsis_rtos_v1/src/mutex.c +++ b/tests/subsys/portability/cmsis_rtos_v1/src/mutex.c @@ -105,7 +105,7 @@ void tThread_entry_lock_timeout(void const *arg) * Trying to release it here should fail. */ status = osMutexRelease((osMutexId)arg); - zassert_true(status == osErrorResource, "Mutex unexpectedly released"); + zassert_true(status == osErrorResource, "Mutex unexpectedly released got %d", status); /* This delay ensures that the mutex gets released by the other * thread in the meantime diff --git a/tests/subsys/portability/cmsis_rtos_v2/prj.conf b/tests/subsys/portability/cmsis_rtos_v2/prj.conf index 8df8cf48aba43..417f39c32acec 100644 --- a/tests/subsys/portability/cmsis_rtos_v2/prj.conf +++ b/tests/subsys/portability/cmsis_rtos_v2/prj.conf @@ -18,3 +18,6 @@ CONFIG_CMSIS_V2_THREAD_MAX_STACK_SIZE=1024 CONFIG_CMSIS_V2_THREAD_DYNAMIC_STACK_SIZE=1024 CONFIG_MP_MAX_NUM_CPUS=1 CONFIG_ZTEST_NEW_API=y + +# This exercises pathological k_mutex state deliberately, don't panic the kernel +CONFIG_ZYNC_VALIDATE=n \ No newline at end of file diff --git a/tests/subsys/rtio/rtio_api/testcase.yaml b/tests/subsys/rtio/rtio_api/testcase.yaml index 2c79f0a100446..c2d363ed9acee 100644 --- a/tests/subsys/rtio/rtio_api/testcase.yaml +++ b/tests/subsys/rtio/rtio_api/testcase.yaml @@ -17,4 +17,5 @@ tests: extra_configs: - CONFIG_USERSPACE=y - CONFIG_RTIO_SUBMIT_SEM=y + - CONFIG_ZYNC_USERSPACE_COMPAT=y tags: rtio userspace From 2ddc12ae326c6043e284abfac3ebd1c3163381a8 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Sat, 9 Jul 2022 14:27:19 -0400 Subject: [PATCH 10/18] tests/kernel: Add zync unit test Simple coverage and behavior test cases for k_zync. The expectation is that we'll preserve existing synchronization tests elsewhere in the tree, and those can provide extra stress/regression/edge testing. Signed-off-by: Andy Ross --- tests/kernel/zync/CMakeLists.txt | 8 + tests/kernel/zync/prj.conf | 3 + tests/kernel/zync/src/main.c | 460 +++++++++++++++++++++++++++++++ tests/kernel/zync/testcase.yaml | 20 ++ 4 files changed, 491 insertions(+) create mode 100644 tests/kernel/zync/CMakeLists.txt create mode 100644 tests/kernel/zync/prj.conf create mode 100644 tests/kernel/zync/src/main.c create mode 100644 tests/kernel/zync/testcase.yaml diff --git a/tests/kernel/zync/CMakeLists.txt b/tests/kernel/zync/CMakeLists.txt new file mode 100644 index 0000000000000..b3abe3fe1fba6 --- /dev/null +++ b/tests/kernel/zync/CMakeLists.txt @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.20.0) +find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE}) +project(zync) + +FILE(GLOB app_sources src/*.c) +target_sources(app PRIVATE ${app_sources}) diff --git a/tests/kernel/zync/prj.conf b/tests/kernel/zync/prj.conf new file mode 100644 index 0000000000000..3971afe4e555f --- /dev/null +++ b/tests/kernel/zync/prj.conf @@ -0,0 +1,3 @@ +CONFIG_ZTEST=y +CONFIG_ZTEST_NEW_API=y +CONFIG_TEST_USERSPACE=y diff --git a/tests/kernel/zync/src/main.c b/tests/kernel/zync/src/main.c new file mode 100644 index 0000000000000..603de7107663d --- /dev/null +++ b/tests/kernel/zync/src/main.c @@ -0,0 +1,460 @@ +/* Copyright (c) 2022 Google LLC. + * SPDX-License-Identifier: Apache-2.0 + */ +#include +#include +#include + +#define NUM_THREADS 4 +#define STACKSZ (512 + CONFIG_TEST_EXTRA_STACK_SIZE) +#define WAIT_THREAD_PRIO 0 + +struct k_zync zync = K_ZYNC_INITIALIZER(0, true, false, false, 0); +ZTEST_DMEM k_zync_atom_t mod_atom, reset_atom; + +struct k_thread wait_threads[NUM_THREADS]; +K_THREAD_STACK_ARRAY_DEFINE(wait_stacks, NUM_THREADS, STACKSZ); + +ZTEST_DMEM atomic_t awoken_count, awaiting_count; + +K_MUTEX_USER_DEFINE(wrapped_mutex, ztest_mem_partition); + +/* Resets the zync to a test initial-state, returns current config */ +static void reset_zync(struct k_zync_cfg *cfg) +{ + struct k_zync_cfg base_cfg = { + .fair = true, + }; + + k_zync_reset(&zync, &mod_atom); + k_zync_set_config(&zync, &base_cfg); + if (cfg) { + k_zync_get_config(&zync, cfg); + } +} + +static void wait_thread_fn(void *pa, void *pb, void *pc) +{ + int ret; + + atomic_inc(&awaiting_count); + ret = k_zync(&zync, &mod_atom, false, -1, K_FOREVER); + zassert_equal(ret, 1, "wrong return from k_zync()"); + atomic_dec(&awaiting_count); + atomic_inc(&awoken_count); +} + +static void spawn_wait_thread(int id, bool start) +{ + k_thread_create(&wait_threads[id], wait_stacks[id], + K_THREAD_STACK_SIZEOF(wait_stacks[id]), + wait_thread_fn, (void *)(long)id, NULL, NULL, + WAIT_THREAD_PRIO, K_USER | K_INHERIT_PERMS, + start ? K_NO_WAIT : K_FOREVER); +} + +ZTEST_USER(zync_tests, test_zync0_updown) +{ + reset_zync(NULL); + + zassert_true(mod_atom.val == 0, "wrong init val"); + k_zync(&zync, &mod_atom, false, 1, K_NO_WAIT); + zassert_true(mod_atom.val == 1, "val didn't increment"); + k_zync(&zync, &mod_atom, false, -1, K_NO_WAIT); + zassert_true(mod_atom.val == 0, "val didn't decrement"); +} + +ZTEST_USER(zync_tests, test_zync_downfail) +{ + int32_t t0, t1, ret; + + reset_zync(NULL); + + zassert_true(mod_atom.val == 0, "atom not zero"); + + ret = k_zync(&zync, &mod_atom, false, -1, K_NO_WAIT); + + zassert_true(ret == -EAGAIN, "wrong return value"); + zassert_true(mod_atom.val == 0, "atom changed unexpectedly"); + + k_usleep(1); /* tick align */ + t0 = (int32_t) k_uptime_ticks(); + ret = k_zync(&zync, &mod_atom, false, -1, K_TICKS(1)); + t1 = (int32_t) k_uptime_ticks(); + + zassert_true(ret == -EAGAIN, "wrong return value: %d", ret); + zassert_true(mod_atom.val == 0, "atom changed unexpectedly"); + zassert_true(t1 > t0, "timeout didn't elapse"); +} + +ZTEST_USER(zync_tests, test_zync_updown_n) +{ + const int count = 44, count2 = -14; + struct k_zync_cfg cfg; + + reset_zync(&cfg); + + k_zync(&zync, &mod_atom, false, count, K_NO_WAIT); + zassert_true(mod_atom.val == count, "wrong atom val"); + + k_zync(&zync, &mod_atom, false, count2, K_NO_WAIT); + zassert_true(mod_atom.val == count + count2, "wrong atom val"); + +#ifdef CONFIG_ZYNC_MAX_VAL + const int32_t max = 99; + + cfg.max_val = max; + k_zync_set_config(&zync, &cfg); + + k_zync(&zync, &mod_atom, false, 2 * max, K_NO_WAIT); + zassert_true(mod_atom.val == max, "wrong atom val: %d", mod_atom.val); + + cfg.max_val = 0; + k_zync_set_config(&zync, &cfg); +#endif + + k_zync_reset(&zync, &mod_atom); + zassert_true(mod_atom.val == 0, "atom did not reset"); +} + +ZTEST_USER(zync_tests, test_zync_waiters) +{ + k_zync_reset(&zync, &mod_atom); + zassert_true(mod_atom.atomic == 0, "atom did not reset"); + + awaiting_count = awoken_count = 0; + + for (int i = 0; i < NUM_THREADS; i++) { + spawn_wait_thread(i, true); + } + + k_sleep(K_TICKS(1)); + zassert_equal(awoken_count, 0, "someone woke up"); + zassert_equal(awaiting_count, NUM_THREADS, "wrong count of wait threads"); + + for (int i = 0; i < NUM_THREADS; i++) { + k_zync(&zync, &mod_atom, NULL, 1, K_NO_WAIT); + k_sleep(K_TICKS(1)); + zassert_equal(awoken_count, i + 1, "wrong woken count"); + zassert_equal(awaiting_count, NUM_THREADS - 1 - i, + "wrong woken count"); + } + + for (int i = 0; i < NUM_THREADS; i++) { + k_thread_join(&wait_threads[i], K_FOREVER); + } +} + +ZTEST_USER(zync_tests, test_zync_wake_all) +{ + k_zync_reset(&zync, &mod_atom); + zassert_true(mod_atom.atomic == 0, "atom did not reset"); + + awaiting_count = awoken_count = 0; + + for (int i = 0; i < NUM_THREADS; i++) { + spawn_wait_thread(i, true); + } + + k_sleep(K_TICKS(1)); + zassert_equal(awoken_count, 0, "someone woke up"); + zassert_equal(awaiting_count, NUM_THREADS, "wrong count of wait threads"); + + k_zync(&zync, &mod_atom, false, NUM_THREADS + 1, K_NO_WAIT); + k_sleep(K_TICKS(NUM_THREADS)); /* be generous, there are a lot of threads */ + zassert_equal(awoken_count, NUM_THREADS, "wrong woken count"); + zassert_equal(awaiting_count, 0, "wrong woken count"); + zassert_equal(mod_atom.val, 1, "wrong atom value"); + + for (int i = 0; i < NUM_THREADS; i++) { + k_thread_join(&wait_threads[i], K_FOREVER); + } +} + +ZTEST_USER(zync_tests, test_reset_atom) +{ + int32_t ret; + + reset_zync(NULL); + reset_atom.val = 2; + + ret = k_zync(&zync, &mod_atom, true, 1, K_NO_WAIT); + zassert_equal(ret, 0, "wrong return value"); + zassert_equal(mod_atom.val, 0, "atom value didn't remain zero"); +} + +/* Not userspace: whiteboxes zync object */ +ZTEST(zync_tests, test_zync_config) +{ + struct k_zync_cfg cfg; + + k_zync_get_config(&zync, &cfg); + k_zync_reset(&zync, &mod_atom); + + cfg.fair = false; + IF_ENABLED(CONFIG_ZYNC_PRIO_BOOST, + (cfg.prio_boost = true)); + IF_ENABLED(CONFIG_ZYNC_MAX_VAL, + (cfg.max_val = 3)); + k_zync_set_config(&zync, &cfg); + + zassert_equal(zync.cfg.fair, false, "wrong fair"); + IF_ENABLED(CONFIG_ZYNC_PRIO_BOOST, + (zassert_equal(zync.cfg.prio_boost, true, + "wrong prio_boost"))); + IF_ENABLED(CONFIG_ZYNC_MAX_VAL, + (zassert_equal(zync.cfg.max_val, 3, + "wrong max_val"))); + + cfg.fair = true; + IF_ENABLED(CONFIG_ZYNC_PRIO_BOOST, + (cfg.prio_boost = false)); + IF_ENABLED(CONFIG_ZYNC_MAX_VAL, + (cfg.max_val = 0)); + k_zync_set_config(&zync, &cfg); + + zassert_equal(zync.cfg.fair, true, "wrong fair"); + IF_ENABLED(CONFIG_ZYNC_PRIO_BOOST, + (zassert_equal(zync.cfg.prio_boost, false, + "wrong prio_boost"))); + IF_ENABLED(CONFIG_ZYNC_MAX_VAL, + (zassert_equal(zync.cfg.max_val, K_ZYNC_ATOM_VAL_MAX, + "wrong max val"))); +} + +/* To exercise "fairness", we need to test for preemption of the + * current thread, which is impossible if another cpu can pick up the + * thread that should preempt us. Ideally we want this to be 1cpu, + * but that's a problem during initial work because ztest's 1cpu + * feature uses a semaphore internally that is wrapped by a zync and + * keeps breaking on me. We can come back later to clean up. In the + * interrim there are LOTS of single core platforms to provide + * coverage here. + */ +#if !defined(CONFIG_SMP) || (CONFIG_MP_NUM_CPUS == 1) +ZTEST(zync_tests, test_fair) +{ + struct k_zync_cfg cfg; + + /* The 1cpu feature uses a semaphore internally, making this + * difficult during initial work where semaphore gets wrapped + * by a zync. We have plenty of single core platforms though, + * so no big coverage loss. + */ + + /* Make sure we're lower priority and preemptible */ + k_thread_priority_set(k_current_get(), WAIT_THREAD_PRIO + 1); + __ASSERT_NO_MSG(k_thread_priority_get(k_current_get()) >= 0); + + for (int pass = 0; pass < 2; pass++) { + bool is_fair = pass == 0; + + reset_zync(&cfg); + + cfg.fair = is_fair; + k_zync_set_config(&zync, &cfg); + + awaiting_count = awoken_count = 0; + spawn_wait_thread(0, true); + + /* Make sure it blocked */ + zassert_equal(awoken_count, 0, "thread woke up"); + zassert_equal(awaiting_count, 1, "thread didn't run"); + + /* Wake it up, see if we're preempted */ + k_zync(&zync, &mod_atom, false, 1, K_NO_WAIT); + + if (is_fair) { + zassert_equal(awoken_count, 1, "thread didn't run"); + } else { + zassert_equal(awoken_count, 0, "thread ran unexpectedly"); + } + + k_sleep(K_TICKS(1)); /* let thread terminate */ + + zassert_equal(awoken_count, 1, "thread didn't resume"); + + k_thread_join(&wait_threads[0], K_FOREVER); + } +} +#endif + +/* Not userspace: increases wait_threads[0] priority */ +ZTEST(zync_tests, test_prio_boost) +{ + struct k_zync_cfg cfg; + + reset_zync(&cfg); + + if (!IS_ENABLED(CONFIG_ZYNC_PRIO_BOOST)) { + ztest_test_skip(); + } + + IF_ENABLED(CONFIG_ZYNC_PRIO_BOOST, + (cfg.prio_boost = true)); + k_zync_set_config(&zync, &cfg); + + int curr_prio = k_thread_priority_get(k_current_get()); + int thread_prio = curr_prio - 1; + + /* "Take the lock" */ + mod_atom.val = 1; + k_zync(&zync, &mod_atom, false, -1, K_NO_WAIT); + + zassert_equal(k_thread_priority_get(k_current_get()), curr_prio, + "thread priority changed unexpectedly"); + + spawn_wait_thread(0, false); + k_thread_priority_set(&wait_threads[0], thread_prio); + k_thread_start(&wait_threads[0]); + k_sleep(K_TICKS(1)); + + /* We should get its priority */ + zassert_equal(k_thread_priority_get(k_current_get()), thread_prio, + "thread priority didn't boost"); + + /* Wake it up, check our priority resets */ + k_zync(&zync, &mod_atom, false, 1, K_NO_WAIT); + + zassert_equal(k_thread_priority_get(k_current_get()), curr_prio, + "thread priority wasn't restored"); + + k_thread_join(&wait_threads[0], K_FOREVER); +} + +ZTEST_USER(zync_tests, test_recursive) +{ + const int lock_count = 16; + struct k_zync_cfg cfg; + + if (!IS_ENABLED(CONFIG_ZYNC_RECURSIVE)) { + ztest_test_skip(); + } + + reset_zync(&cfg); + IF_ENABLED(CONFIG_ZYNC_RECURSIVE, + (cfg.recursive = true)); + k_zync_set_config(&zync, &cfg); + + mod_atom.val = 1; /* start "unlocked" */ + + k_zync(&zync, &mod_atom, NULL, -1, K_NO_WAIT); + zassert_equal(mod_atom.val, 0, "recursive zync didn't lock"); + + /* Spawn a thread to try to lock it, make sure it doesn't get it */ + awaiting_count = awoken_count = 0; + spawn_wait_thread(0, true); + k_sleep(K_TICKS(1)); + zassert_equal(awaiting_count, 1, "thread not waiting"); + zassert_equal(awoken_count, 0, "thread woke up"); + + for (int i = 0; i < (lock_count - 1); i++) { + k_zync(&zync, &mod_atom, NULL, -1, K_NO_WAIT); + zassert_equal(mod_atom.val, 0, "recursive zync didn't lock"); + k_sleep(K_TICKS(1)); + zassert_equal(awaiting_count, 1, "thread not waiting"); + zassert_equal(awoken_count, 0, "thread woke up"); + } + + for (int i = 0; i < (lock_count - 1); i++) { + k_zync(&zync, &mod_atom, NULL, 1, K_NO_WAIT); + zassert_equal(mod_atom.val, 0, "recursive zync unlocked early"); + k_sleep(K_TICKS(1)); + zassert_equal(awaiting_count, 1, "thread not waiting"); + zassert_equal(awoken_count, 0, "thread woke up"); + } + + k_zync(&zync, &mod_atom, NULL, 1, K_NO_WAIT); + + /* now the thread can get it */ + k_sleep(K_TICKS(1)); + zassert_equal(mod_atom.val, 0, "zync not locked"); + zassert_equal(awaiting_count, 0, "thread still waiting"); + zassert_equal(awoken_count, 1, "thread didn't wake up"); + k_thread_join(&wait_threads[0], K_FOREVER); +} + +/* Not userspace, whiteboxes mutex */ +ZTEST(zync_tests, test_wrap_mutex) +{ + int ret; + + zassert_equal(Z_PAIR_ATOM(&wrapped_mutex.zp)->val, 1, + "atom doesn't show unlocked"); + + ret = k_mutex_lock(&wrapped_mutex, K_NO_WAIT); + zassert_equal(ret, 0, "mutex didn't lock"); + + zassert_equal(Z_PAIR_ATOM(&wrapped_mutex.zp)->val, 0, + "atom doesn't show locked"); + + ret = k_mutex_unlock(&wrapped_mutex); + zassert_equal(ret, 0, "mutex didn't unlock"); +} + +static void atom_set_loop(void *a, void *b, void *c) +{ + uint32_t field = (long) a; + uint16_t val = 0; + k_zync_atom_t atom = {}; + + printk("Thread %p field %d\n", k_current_get(), field); + + for (int i = 0; i < 100000; i++) { + uint16_t newval = (val + 1) & 0xfff; + + /* Increment our own field, and make sure it is not + * modified by the other thread making a nonatomic + * update + */ + K_ZYNC_ATOM_SET(&atom) { + int old = field == 0 ? (old_atom.val & 0xfff) + : (old_atom.val >> 12); + + zassert_equal(old, val, + "Wrong val, expected %d got %d\n", val, old); + + if (field == 0) { + new_atom.val &= 0xfffff000; + new_atom.val |= newval; + } else { + new_atom.val &= 0xff000fff; + new_atom.val |= (newval << 12); + } + } + + val = newval; + } +} + +/* Stress test of the K_ZYNC_ATOM_SET() utility, spins, setting + * independent fields of a single atom from two different CPUs looking + * for mixups + */ +ZTEST(zync_tests, test_atom_set) +{ + if (!IS_ENABLED(CONFIG_SMP)) { + ztest_test_skip(); + } + + k_thread_create(&wait_threads[0], wait_stacks[0], + K_THREAD_STACK_SIZEOF(wait_stacks[0]), + atom_set_loop, (void *)0, NULL, NULL, + 0, 0, K_NO_WAIT); + atom_set_loop((void *)1, NULL, NULL); + k_thread_abort(&wait_threads[0]); +} + +static void *suite_setup(void) +{ + z_object_init(&zync); + k_object_access_grant(&zync, k_current_get()); + for (int i = 0; i < NUM_THREADS; i++) { + k_object_access_grant(&wait_threads[i], k_current_get()); + k_object_access_grant(wait_stacks[i], k_current_get()); + } + + return NULL; +} + +ZTEST_SUITE(zync_tests, NULL, suite_setup, NULL, NULL, NULL); diff --git a/tests/kernel/zync/testcase.yaml b/tests/kernel/zync/testcase.yaml new file mode 100644 index 0000000000000..64dab81ec0fbf --- /dev/null +++ b/tests/kernel/zync/testcase.yaml @@ -0,0 +1,20 @@ +common: + tags: kernel userspace +tests: + kernel.zync: + tags: kernel userspace + kernel.zync.max_val: + extra_configs: + - CONFIG_ZYNC_MAX_VAL=y + kernel.zync.recursive: + extra_configs: + - CONFIG_ZYNC_RECURSIVE=y + kernel.zync.prio_boost: + extra_configs: + - CONFIG_ZYNC_PRIO_BOOST=y + kernel.zync.nouser: + extra_configs: + - CONFIG_TEST_USERSPACE=n + - CONFIG_ZYNC_RECURSIVE=y + - CONFIG_ZYNC_PRIO_BOOST=y + - CONFIG_ZYNC_MAX_VAL=y From 2218a8ce8f5aaa1f32d5a0cfdcb61b151a976d55 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Sat, 8 Oct 2022 19:47:54 -0700 Subject: [PATCH 11/18] tests/kernel/mutex_api: Adjust kobj type for zync When using the zync backend, mutexes and semaphores are in fact identical kernel objects (though they have different C types). This test expects that k_mutex_init() on a k_sem object will fail, but in fact that works now (it initializes the "semaphore" to a zero state, which is a perfectly valid semaphore). Signed-off-by: Andy Ross --- tests/kernel/mutex/mutex_error_case/src/test_mutex_error.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/kernel/mutex/mutex_error_case/src/test_mutex_error.c b/tests/kernel/mutex/mutex_error_case/src/test_mutex_error.c index 955af138b7d1b..611db56070cd9 100644 --- a/tests/kernel/mutex/mutex_error_case/src/test_mutex_error.c +++ b/tests/kernel/mutex/mutex_error_case/src/test_mutex_error.c @@ -61,7 +61,7 @@ static void tThread_entry_negative(void *p1, void *p2, void *p3) break; case MUTEX_INIT_INVALID_OBJ: ztest_set_fault_valid(true); - k_mutex_init((struct k_mutex *)&sem); + k_mutex_init((struct k_mutex *)&pipe); break; case MUTEX_LOCK_NULL: ztest_set_fault_valid(true); From 77ca333e3b7d7f883fc0e70c6ef783a27deb490a Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Tue, 1 Nov 2022 14:09:26 -0700 Subject: [PATCH 12/18] tests/benchmarks/latency_measure: Needs recursive zyncs This test tries to bench mark a k_mutex by iteratively locking it 1000 times in a row. Obviously if you try that on a non-recursive mutex, you'll deadlock on the second call. So enable recursion. But that's only a half solution, because now it means our benchmark app is benchmarking our mutex primitive in its slowest configuration! Needs rework. Signed-off-by: Andy Ross --- tests/benchmarks/latency_measure/prj.conf | 2 ++ tests/benchmarks/latency_measure/src/mutex_lock_unlock.c | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/tests/benchmarks/latency_measure/prj.conf b/tests/benchmarks/latency_measure/prj.conf index 4a786177bbc00..1315509a1f678 100644 --- a/tests/benchmarks/latency_measure/prj.conf +++ b/tests/benchmarks/latency_measure/prj.conf @@ -24,3 +24,5 @@ CONFIG_MP_MAX_NUM_CPUS=1 CONFIG_TIMING_FUNCTIONS=y CONFIG_HEAP_MEM_POOL_SIZE=2048 + +CONFIG_ZYNC_USERSPACE_COMPAT=n diff --git a/tests/benchmarks/latency_measure/src/mutex_lock_unlock.c b/tests/benchmarks/latency_measure/src/mutex_lock_unlock.c index d236f75a37f83..ad6be77509e11 100644 --- a/tests/benchmarks/latency_measure/src/mutex_lock_unlock.c +++ b/tests/benchmarks/latency_measure/src/mutex_lock_unlock.c @@ -31,6 +31,11 @@ int mutex_lock_unlock(void) timing_t timestamp_start; timing_t timestamp_end; + if (!IS_ENABLED(CONFIG_ZYNC_RECURSIVE)) { + PRINT_STATS_AVG("Average time to lock a mutex (N/A)", 0, 1); + return 0; + } + timing_start(); timestamp_start = timing_counter_get(); From 7face791bd07a140273ded37c8eaefc19cd62381 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Mon, 14 Nov 2022 08:50:59 -0800 Subject: [PATCH 13/18] kernel/sched: Refactor, unready_thread() There's an existing unready_thread() utility whose core function get repeated in a few different variants in multiple places in the scheduler. Unify them all to a single API. Pure refactoring No functional change. Signed-off-by: Andy Ross --- kernel/sched.c | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 32b54948480b1..4c927401ef79a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -309,6 +309,14 @@ static inline bool is_aborting(struct k_thread *thread) } #endif +static void unready_thread(struct k_thread *thread, bool recache) +{ + if (z_is_thread_queued(thread)) { + dequeue_thread(thread); + } + update_cache(recache); +} + static ALWAYS_INLINE struct k_thread *next_up(void) { struct k_thread *thread = runq_best(); @@ -669,11 +677,8 @@ void z_impl_k_thread_suspend(struct k_thread *thread) (void)z_abort_thread_timeout(thread); LOCKED(&sched_spinlock) { - if (z_is_thread_queued(thread)) { - dequeue_thread(thread); - } + unready_thread(thread, thread == _current); z_mark_thread_as_suspended(thread); - update_cache(thread == _current); } if (thread == _current) { @@ -728,18 +733,10 @@ static _wait_q_t *pended_on_thread(struct k_thread *thread) return thread->base.pended_on; } -static void unready_thread(struct k_thread *thread) -{ - if (z_is_thread_queued(thread)) { - dequeue_thread(thread); - } - update_cache(thread == _current); -} - /* sched_spinlock must be held */ static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q) { - unready_thread(thread); + unready_thread(thread, true); z_mark_thread_as_pending(thread); SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread); @@ -1444,7 +1441,7 @@ static int32_t z_tick_sleep(k_ticks_t ticks) #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) pending_current = _current; #endif - unready_thread(_current); + unready_thread(_current, true); z_add_thread_timeout(_current, timeout); z_mark_thread_as_suspended(_current); @@ -1700,15 +1697,12 @@ static void end_thread(struct k_thread *thread) if ((thread->base.thread_state & _THREAD_DEAD) == 0U) { thread->base.thread_state |= _THREAD_DEAD; thread->base.thread_state &= ~_THREAD_ABORTING; - if (z_is_thread_queued(thread)) { - dequeue_thread(thread); - } if (thread->base.pended_on != NULL) { unpend_thread_no_timeout(thread); } (void)z_abort_thread_timeout(thread); unpend_all(&thread->join_queue); - update_cache(1); + unready_thread(thread, true); SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread); From 7956e7a9bc41415f35d24d93adb6a67cd9325f8c Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Mon, 14 Nov 2022 09:28:54 -0800 Subject: [PATCH 14/18] kernel/sched: refactor wake() Split out the unlocked core of z_sched_wake() to be used elsewhere Signed-off-by: Andy Ross --- kernel/sched.c | 61 ++++++++++++++++++++++++++------------------------ 1 file changed, 32 insertions(+), 29 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 4c927401ef79a..cfe405d73654a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -57,7 +57,7 @@ struct k_spinlock sched_spinlock; static void update_cache(int preempt_ok); static void end_thread(struct k_thread *thread); - +static void ready_thread(struct k_thread *thread); static inline int is_preempt(struct k_thread *thread) { @@ -309,6 +309,35 @@ static inline bool is_aborting(struct k_thread *thread) } #endif +static _wait_q_t *pended_on_thread(struct k_thread *thread) +{ + __ASSERT_NO_MSG(thread->base.pended_on); + + return thread->base.pended_on; +} + +static inline void unpend_thread_no_timeout(struct k_thread *thread) +{ + _priq_wait_remove(&pended_on_thread(thread)->waitq, thread); + z_mark_thread_as_not_pending(thread); + thread->base.pended_on = NULL; +} + +static struct k_thread *wake(_wait_q_t *wait_q, int swap_retval, void *swap_data) +{ + struct k_thread *thread = _priq_wait_best(&wait_q->waitq); + + if (thread != NULL) { + z_thread_return_value_set_with_data(thread, + swap_retval, + swap_data); + unpend_thread_no_timeout(thread); + (void)z_abort_thread_timeout(thread); + ready_thread(thread); + } + return thread; +} + static void unready_thread(struct k_thread *thread, bool recache) { if (z_is_thread_queued(thread)) { @@ -726,13 +755,6 @@ static inline void z_vrfy_k_thread_resume(struct k_thread *thread) #include #endif -static _wait_q_t *pended_on_thread(struct k_thread *thread) -{ - __ASSERT_NO_MSG(thread->base.pended_on); - - return thread->base.pended_on; -} - /* sched_spinlock must be held */ static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q) { @@ -773,13 +795,6 @@ void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, } } -static inline void unpend_thread_no_timeout(struct k_thread *thread) -{ - _priq_wait_remove(&pended_on_thread(thread)->waitq, thread); - z_mark_thread_as_not_pending(thread); - thread->base.pended_on = NULL; -} - ALWAYS_INLINE void z_unpend_thread_no_timeout(struct k_thread *thread) { LOCKED(&sched_spinlock) { @@ -1880,23 +1895,11 @@ static inline void z_vrfy_k_thread_abort(k_tid_t thread) */ bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data) { - struct k_thread *thread; - bool ret = false; + bool ret; LOCKED(&sched_spinlock) { - thread = _priq_wait_best(&wait_q->waitq); - - if (thread != NULL) { - z_thread_return_value_set_with_data(thread, - swap_retval, - swap_data); - unpend_thread_no_timeout(thread); - (void)z_abort_thread_timeout(thread); - ready_thread(thread); - ret = true; - } + ret = wake(wait_q, swap_retval, swap_data) != NULL; } - return ret; } From d293aca3c9bba41ce73ac6dc697f2a0cf2fc5b7e Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Mon, 14 Nov 2022 10:11:11 -0800 Subject: [PATCH 15/18] kernel/zync: Handle the case of aborted just-awoken threads There is an edge condition in zync where a thread can be awoken in k_zync(), but then killed before it can decrement the atom value. This creates a situation where the zync can have a non-zero atom count but also pended threads. That may be surprising for users trying to use a zync as a semaphore (though for a mutex it's indistinguishable from just "killing the thread holding the lock", which is clearly an app bug). Fix this by holding a backpointer to the zync between the moment where a zync is unpended and when it reacquires the zync spinlock (and thus can't be externally affected anymore). Signed-off-by: Andy Ross --- include/zephyr/kernel/thread.h | 7 ++++++ kernel/include/ksched.h | 1 + kernel/sched.c | 39 ++++++++++++++++++++++++++++------ kernel/thread.c | 1 + kernel/zync.c | 3 ++- 5 files changed, 44 insertions(+), 7 deletions(-) diff --git a/include/zephyr/kernel/thread.h b/include/zephyr/kernel/thread.h index f84c02d32d4c2..893dfa35ff2e1 100644 --- a/include/zephyr/kernel/thread.h +++ b/include/zephyr/kernel/thread.h @@ -67,6 +67,13 @@ struct _thread_base { */ _wait_q_t *pended_on; + /* Zync object on which this thread was just unpended. Stays + * set until the thread runs and reaquires the zync's spinlock + * so that the kernel can wake up a replacement thread if the + * awoken thread is killed/suspended. + */ + struct k_zync *zync_unpended; + /* user facing 'thread options'; values defined in include/kernel.h */ uint8_t user_options; diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index b160bd2fabb8d..4be9aa0b10362 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -64,6 +64,7 @@ void z_ready_thread(struct k_thread *thread); void z_requeue_current(struct k_thread *curr); struct k_thread *z_swap_next_thread(void); void z_thread_abort(struct k_thread *thread); +bool z_zync_wake(_wait_q_t *wait_q, struct k_zync *zync); static inline void z_pend_curr_unlocked(_wait_q_t *wait_q, k_timeout_t timeout) { diff --git a/kernel/sched.c b/kernel/sched.c index cfe405d73654a..6b32e9d519af9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -323,26 +323,50 @@ static inline void unpend_thread_no_timeout(struct k_thread *thread) thread->base.pended_on = NULL; } +/* Find a thread to wake up, must be followed by ready_thread() to schedule it */ static struct k_thread *wake(_wait_q_t *wait_q, int swap_retval, void *swap_data) { struct k_thread *thread = _priq_wait_best(&wait_q->waitq); if (thread != NULL) { - z_thread_return_value_set_with_data(thread, - swap_retval, - swap_data); + z_thread_return_value_set_with_data(thread, swap_retval, swap_data); unpend_thread_no_timeout(thread); (void)z_abort_thread_timeout(thread); + } + return thread; +} + +static struct k_thread *zync_wake(_wait_q_t *wait_q, struct k_zync *zync) +{ + struct k_thread *thread = wake(&zync->waiters, 0, NULL); + + if (thread != NULL) { + thread->base.zync_unpended = zync; ready_thread(thread); } return thread; } +bool z_zync_wake(_wait_q_t *wait_q, struct k_zync *zync) +{ + bool ret = false; + + LOCKED(&sched_spinlock) { + ret = zync_wake(wait_q, zync) != NULL; + } + return ret; +} + static void unready_thread(struct k_thread *thread, bool recache) { if (z_is_thread_queued(thread)) { dequeue_thread(thread); } + if (thread->base.zync_unpended != NULL) { + zync_wake(&thread->base.zync_unpended->waiters, + thread->base.zync_unpended); + thread->base.zync_unpended = NULL; + } update_cache(recache); } @@ -1895,12 +1919,15 @@ static inline void z_vrfy_k_thread_abort(k_tid_t thread) */ bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data) { - bool ret; + struct k_thread *th = NULL; LOCKED(&sched_spinlock) { - ret = wake(wait_q, swap_retval, swap_data) != NULL; + th = wake(wait_q, swap_retval, swap_data); + if (th != NULL) { + ready_thread(th); + } } - return ret; + return th != NULL; } int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key, diff --git a/kernel/thread.c b/kernel/thread.c index 2c94554fbfcd2..3f65c9755f245 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -793,6 +793,7 @@ void z_init_thread_base(struct _thread_base *thread_base, int priority, { /* k_q_node is initialized upon first insertion in a list */ thread_base->pended_on = NULL; + thread_base->zync_unpended = NULL; thread_base->user_options = (uint8_t)options; thread_base->thread_state = (uint8_t)initial_state; diff --git a/kernel/zync.c b/kernel/zync.c index c57e9d335f491..c0fb032662e17 100644 --- a/kernel/zync.c +++ b/kernel/zync.c @@ -197,7 +197,7 @@ static int32_t zync_locked(struct k_zync *zync, k_zync_atom_t *mod_atom, Z_WAIT_Q_LAZY_INIT(&zync->waiters); for (woken = 0; woken < delta; woken++) { - if (!z_sched_wake(&zync->waiters, 0, NULL)) { + if (!z_zync_wake(&zync->waiters, zync)) { break; } resched = true; @@ -220,6 +220,7 @@ static int32_t zync_locked(struct k_zync *zync, k_zync_atom_t *mod_atom, prio_boost(zync, _current->base.prio); pendret = z_pend_curr(&zync->lock, key, &zync->waiters, timeout); key = k_spin_lock(&zync->lock); + _current->base.zync_unpended = NULL; prio_boost(zync, K_LOWEST_THREAD_PRIO); mod -= delta; From 3b715e3325f0bb81659321e50cbc47d050d2ac5d Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Mon, 14 Nov 2022 08:32:41 -0800 Subject: [PATCH 16/18] tests/kernel/zync: Add case for abort/recovery Zync has a new feature where it will wake up a replacement thread when a just-awoken zync thread is killed before it gets a chance to decrement the atom. Exercise that case. Signed-off-by: Andy Ross --- tests/kernel/zync/src/main.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/tests/kernel/zync/src/main.c b/tests/kernel/zync/src/main.c index 603de7107663d..9837170cc4c1b 100644 --- a/tests/kernel/zync/src/main.c +++ b/tests/kernel/zync/src/main.c @@ -445,6 +445,36 @@ ZTEST(zync_tests, test_atom_set) k_thread_abort(&wait_threads[0]); } +/* Start a thread, let it pend on a zync, wake it up, but then kill it + * before it reacquires the zync spinlock and decrements the atom. + * Verify that the kernel wakes up another thread to take its place. + */ +ZTEST(zync_tests_1cpu, test_abort_recover) +{ + reset_zync(NULL); + awaiting_count = awoken_count = 0; + + spawn_wait_thread(0, true); + k_sleep(K_TICKS(1)); + spawn_wait_thread(1, true); + + k_sleep(K_TICKS(2)); + zassert_equal(awaiting_count, 2, "wrong count of wait threads"); + + k_tid_t kth = &wait_threads[0]; + + k_sched_lock(); + k_zync(&zync, &mod_atom, false, 1, K_NO_WAIT); + + zassert_true((kth->base.thread_state & _THREAD_PENDING) == 0, "still pended"); + zassert_equal(awoken_count, 0, "someone woke up?"); + k_thread_abort(kth); + k_sched_unlock(); + + k_sleep(K_TICKS(1)); + zassert_equal(awoken_count, 1, "didn't wake up"); +} + static void *suite_setup(void) { z_object_init(&zync); @@ -458,3 +488,5 @@ static void *suite_setup(void) } ZTEST_SUITE(zync_tests, NULL, suite_setup, NULL, NULL, NULL); +ZTEST_SUITE(zync_tests_1cpu, NULL, suite_setup, + ztest_simple_1cpu_before, ztest_simple_1cpu_after, NULL); From fccce4f3c1ed423f5c26440843ecb14646a78ff9 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Mon, 14 Nov 2022 10:53:09 -0800 Subject: [PATCH 17/18] kernel/zync: Augment k_zync() timeout loop to be optionally stricter There is concern about the early-return semantics of k_zync, where a thread that passed a non-FOREVER/non-NO_WAIT timeout might return before the expiration of the time if the thread gets awoken for spurious reasons (e.g. if it was unpended to service a zync atom count, but a higher priority thread got there first). Add a computed retry loop to make sure the thread stays pending for at least as long as requested. This is an optional feature based on kconfig. In practice, it's preferable to handle conditions like this in applicatio code, which can make better decisions about what to optimize. Signed-off-by: Andy Ross --- include/zephyr/kernel.h | 65 +++++++++++++++++++++++++++++++++++++++ include/zephyr/sys/zync.h | 26 ---------------- kernel/Kconfig | 13 ++++++++ 3 files changed, 78 insertions(+), 26 deletions(-) diff --git a/include/zephyr/kernel.h b/include/zephyr/kernel.h index 6e8151f424893..be7d95050e911 100644 --- a/include/zephyr/kernel.h +++ b/include/zephyr/kernel.h @@ -2707,6 +2707,71 @@ extern struct k_work_q k_sys_work_q; * INTERNAL_HIDDEN @endcond */ +/* Wrapper for the zync utilities below. Here in kernel.h because it + * needs to appear in both kernel and non-kernel contexts (i.e. can't + * be a simple function) AND needs to call out to external syscalls. + * Our include structure isn't friendly to this kind of usage. + */ +static inline int32_t z_pzyncmod(struct z_zync_pair *zp, int32_t mod, + k_timeout_t timeout) +{ + int32_t ret; + int64_t start; + k_ticks_t t0; + bool retry, forever = K_TIMEOUT_EQ(timeout, Z_FOREVER); + bool ticking = !forever && !K_TIMEOUT_EQ(timeout, Z_TIMEOUT_NO_WAIT); + + if (!IS_ENABLED(CONFIG_ZYNC_STRICT_TIMEOUTS)) { + ticking = false; + } + + if (ticking) { + t0 = timeout.ticks; + start = k_uptime_ticks(); +#ifdef CONFIG_TIMEOUT_64BIT + if (Z_TICK_ABS(t0) < 0) { + timeout = K_TIMEOUT_ABS_TICKS(t0 + start); + } +#endif + } + + do { + if (IS_ENABLED(Z_ZYNC_ALWAYS_KERNEL)) { + ret = z_pzync(Z_PAIR_ZYNC(zp), mod, timeout); + } else if (k_zync_try_mod(Z_PAIR_ATOM(zp), mod)) { + return 0; + } else { + ret = k_zync(Z_PAIR_ZYNC(zp), Z_PAIR_ATOM(zp), + false, mod, timeout); + } + + retry = mod < 0 && ret == 0; + if (retry) { + if (ticking) { + int64_t dt = k_uptime_ticks() - start; + + if (!IS_ENABLED(CONFIG_TIMEOUT_64BIT)) { + timeout = Z_TIMEOUT_TICKS(t0 - dt); + } + if (dt <= 0) { + retry = false; + } + } else { + retry = forever; + } + } + } while (retry); + + /* Infuriating historical API requirements in test suite */ + if (ret == 0) { + ret = -EAGAIN; + } + if (ret == -EAGAIN && K_TIMEOUT_EQ(timeout, Z_TIMEOUT_NO_WAIT)) { + ret = -EBUSY; + } + return ret < 0 ? ret : 0; +} + /** * @defgroup mutex_apis Mutex APIs * @ingroup kernel_apis diff --git a/include/zephyr/sys/zync.h b/include/zephyr/sys/zync.h index 2bc42ae840aac..b8937f3096792 100644 --- a/include/zephyr/sys/zync.h +++ b/include/zephyr/sys/zync.h @@ -343,32 +343,6 @@ struct z_zync_pair { __syscall int32_t z_pzync(struct k_zync *zync, int32_t mod, k_timeout_t timeout); __syscall void z_pzync_init(struct z_zync_pair *zp, struct k_zync_cfg *cfg); -static inline int32_t z_pzyncmod(struct z_zync_pair *zp, int32_t mod, - k_timeout_t timeout) -{ - int32_t ret; - - do { - if (IS_ENABLED(Z_ZYNC_ALWAYS_KERNEL)) { - ret = z_pzync(Z_PAIR_ZYNC(zp), mod, timeout); - } else if (k_zync_try_mod(Z_PAIR_ATOM(zp), mod)) { - return 0; - } else { - ret = k_zync(Z_PAIR_ZYNC(zp), Z_PAIR_ATOM(zp), - false, mod, timeout); - } - } while (mod < 0 && K_TIMEOUT_EQ(timeout, Z_FOREVER) && ret == 0); - - /* Infuriating historical API requirements in test suite */ - if (ret == 0) { - ret = -EAGAIN; - } - if (ret == -EAGAIN && K_TIMEOUT_EQ(timeout, Z_TIMEOUT_NO_WAIT)) { - ret = -EBUSY; - } - return ret < 0 ? ret : 0; -} - /* Low level "wait on condition variable" utility. Atomically: sets * the "mut" zync to 1, wakes up a waiting thread if there is one, and * pends on the "cv" zync. Unlike k_condvar_wait() it does not diff --git a/kernel/Kconfig b/kernel/Kconfig index afc58980d305d..9ff019d6e3e56 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -403,6 +403,19 @@ config ZYNC_LEGACY Very few applications depend on this older behavior and most should select the various features individually. +config ZYNC_STRICT_TIMEOUTS + bool "Add retries to k_zync-based timeouts" + help + When set, zync-wrapped IPC primitives like k_sem and k_mutex + will have a timeout-sensitive retry loop placed around their + k_zync() invocation. This will force them to wait for the + specified time even in circumstances (like being + suspended/restarted, or having their intended atom count + "stolen" by a higher priority thread) where k_zync would + naturally return early with -EAGAIN. Optional; most code + should be prepared to handle -EAGAIN on its own, where the + app can make a better choice as to what to do. + menu "Kernel Debugging and Metrics" config INIT_STACKS From 988f530a9fe00a70b5c0a11a732b6149e8586317 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Mon, 14 Nov 2022 12:05:01 -0800 Subject: [PATCH 18/18] tests/kernel/zync: Add a case to exercise strict timeouts Test CONFIG_ZYNC_STRICT_TIMEOUTS, both with and without absolute timeouts. Signed-off-by: Andy Ross --- tests/kernel/zync/src/main.c | 49 ++++++++++++++++++++++++++++++++- tests/kernel/zync/testcase.yaml | 8 ++++++ 2 files changed, 56 insertions(+), 1 deletion(-) diff --git a/tests/kernel/zync/src/main.c b/tests/kernel/zync/src/main.c index 9837170cc4c1b..2f177320e66e5 100644 --- a/tests/kernel/zync/src/main.c +++ b/tests/kernel/zync/src/main.c @@ -19,6 +19,8 @@ ZTEST_DMEM atomic_t awoken_count, awaiting_count; K_MUTEX_USER_DEFINE(wrapped_mutex, ztest_mem_partition); +K_SEM_DEFINE(wrapped_sem, 0, K_SEM_MAX_LIMIT); + /* Resets the zync to a test initial-state, returns current config */ static void reset_zync(struct k_zync_cfg *cfg) { @@ -472,7 +474,52 @@ ZTEST(zync_tests_1cpu, test_abort_recover) k_sched_unlock(); k_sleep(K_TICKS(1)); - zassert_equal(awoken_count, 1, "didn't wake up"); + zassert_equal(awoken_count, 1, "replacement thread didn't wake up"); +} + +static void timeout_wakeup(void *pa, void *pb, void *pc) +{ + int32_t ticks = k_ms_to_ticks_ceil32(300); + k_timeout_t timeout = K_TICKS(ticks); + + int64_t start = k_uptime_ticks(); + int32_t ret = k_sem_take(&wrapped_sem, timeout); + int64_t end = k_uptime_ticks(); + + zassert_equal(ret, -EAGAIN, "k_sem_take() should return -EAGAIN"); + + int64_t dt = end - start; + + if (IS_ENABLED(CONFIG_ZYNC_STRICT_TIMEOUTS)) { + zassert_true(dt >= ticks, "didn't wait long enough: dt == %d", dt); + } else { + /* 3-tick threshold for 2 context switches and a 1 + * tick sleep in the main thread. + */ + zassert_true(dt <= 3, "should have woken up immediately"); + } +} + +/* Tests the zync pair retry behavior */ +ZTEST(zync_tests_1cpu, test_early_wakeup) +{ + /* Spawn the thread and let it pend */ + k_thread_create(&wait_threads[0], wait_stacks[0], + K_THREAD_STACK_SIZEOF(wait_stacks[0]), + timeout_wakeup, NULL, NULL, NULL, + 0, 0, K_NO_WAIT); + k_sleep(K_TICKS(1)); + + /* Hold the sched lock so it won't run, wake it up, but then + * take the atom count ourselves + */ + k_sched_lock(); + k_sem_give(&wrapped_sem); + zassert_equal(0, k_sem_take(&wrapped_sem, K_NO_WAIT), + "failed to retake zync"); + k_sched_unlock(); + + k_msleep(200); } static void *suite_setup(void) diff --git a/tests/kernel/zync/testcase.yaml b/tests/kernel/zync/testcase.yaml index 64dab81ec0fbf..44b5a458e6283 100644 --- a/tests/kernel/zync/testcase.yaml +++ b/tests/kernel/zync/testcase.yaml @@ -12,6 +12,14 @@ tests: kernel.zync.prio_boost: extra_configs: - CONFIG_ZYNC_PRIO_BOOST=y + kernel.zync.strict_timeouts: + extra_configs: + - CONFIG_ZYNC_STRICT_TIMEOUTS=y + - CONFIG_TIMEOUT_64BIT=y + kernel.zync.strict_timeouts32: + extra_configs: + - CONFIG_ZYNC_STRICT_TIMEOUTS=y + - CONFIG_TIMEOUT_64BIT=n kernel.zync.nouser: extra_configs: - CONFIG_TEST_USERSPACE=n