From bbb5dd532f75234a20409c36a631449387616c58 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 7 Jan 2025 12:00:43 -0500 Subject: [PATCH 1/3] Revert "arch: deprecate `_current`" Mostly a revert of commit b1def7145fd1 ("arch: deprecate `_current`"). This commit was part of PR #80716 whose initial purpose was about providing an architecture specific optimization for _current. The actual deprecation was sneaked in later on without proper discussion. The Zephyr core always used _current before and that was fine. It is quite prevalent as well and the alternative is proving rather verbose. Furthermore, as a concept, the "current thread" is not something that is necessarily architecture specific. Therefore the primary abstraction should not carry the arch_ prefix. Hence this revert. Signed-off-by: Nicolas Pitre --- arch/arc/Kconfig | 2 +- arch/arc/core/fault.c | 2 +- arch/arc/core/irq_offload.c | 4 +- arch/arc/core/thread.c | 16 +- arch/arc/core/tls.c | 2 +- arch/arm/core/cortex_a_r/fault.c | 4 +- arch/arm/core/cortex_a_r/swap_helper.S | 4 +- arch/arm/core/cortex_a_r/thread.c | 36 ++--- arch/arm/core/cortex_m/swap_helper.S | 2 +- arch/arm/core/cortex_m/thread.c | 32 ++-- arch/arm/core/cortex_m/thread_abort.c | 2 +- .../arm/include/cortex_a_r/kernel_arch_func.h | 6 +- arch/arm/include/cortex_m/kernel_arch_func.h | 6 +- arch/arm64/core/cortex_r/arm_mpu.c | 6 +- arch/arm64/core/fatal.c | 7 +- arch/arm64/core/fpu.c | 14 +- arch/arm64/core/mmu.c | 2 +- arch/arm64/core/smp.c | 2 +- arch/arm64/core/thread.c | 10 +- arch/posix/core/swap.c | 12 +- arch/posix/core/thread.c | 2 +- arch/riscv/Kconfig | 2 +- arch/riscv/core/fatal.c | 28 ++-- arch/riscv/core/fpu.c | 24 +-- arch/riscv/core/isr.S | 4 +- arch/riscv/core/pmp.c | 6 +- arch/riscv/core/stacktrace.c | 13 +- arch/riscv/core/thread.c | 27 ++-- arch/sparc/core/thread.c | 2 +- arch/x86/core/fatal.c | 18 +-- arch/x86/core/ia32/float.c | 8 +- arch/x86/core/userspace.c | 14 +- arch/x86/core/x86_mmu.c | 15 +- arch/xtensa/core/fatal.c | 2 +- arch/xtensa/core/ptables.c | 2 +- arch/xtensa/core/thread.c | 2 +- arch/xtensa/core/vector_handlers.c | 2 +- boards/native/native_posix/irq_handler.c | 2 +- boards/native/native_sim/irq_handler.c | 2 +- boards/native/nrf_bsim/irq_handler.c | 2 +- doc/kernel/services/smp/smp.rst | 2 +- doc/releases/migration-guide-4.1.rst | 4 - doc/releases/release-notes-4.1.rst | 4 +- drivers/wifi/eswifi/eswifi.h | 4 +- include/zephyr/arch/arch_interface.h | 2 +- include/zephyr/arch/common/arch_inlines.h | 2 +- include/zephyr/arch/x86/ia32/arch.h | 2 +- include/zephyr/internal/syscall_handler.h | 4 +- include/zephyr/kernel_structs.h | 8 +- kernel/Kconfig | 2 +- kernel/errno.c | 4 +- kernel/fatal.c | 2 +- kernel/idle.c | 4 +- kernel/include/kernel_internal.h | 2 +- kernel/include/ksched.h | 4 +- kernel/include/kswap.h | 8 +- kernel/include/kthread.h | 6 +- kernel/include/priority_q.h | 20 +-- kernel/ipi.c | 2 +- kernel/mailbox.c | 10 +- kernel/mem_domain.c | 2 +- kernel/mem_slab.c | 2 +- kernel/mempool.c | 2 +- kernel/mmu.c | 2 +- kernel/msg_q.c | 4 +- kernel/mutex.c | 16 +- kernel/pipes.c | 12 +- kernel/poll.c | 2 +- kernel/queue.c | 4 +- kernel/sched.c | 138 +++++++++--------- kernel/smp.c | 10 +- kernel/spinlock_validate.c | 8 +- kernel/stack.c | 2 +- kernel/thread.c | 53 +++---- kernel/timeslicing.c | 6 +- kernel/userspace.c | 12 +- kernel/userspace_handler.c | 2 +- kernel/work.c | 2 +- lib/libc/armstdc/src/libc-hooks.c | 2 +- lib/os/p4wq.c | 12 +- scripts/build/gen_syscalls.py | 8 +- soc/espressif/esp32/soc.c | 2 +- soc/espressif/esp32/soc_appcpu.c | 2 +- soc/espressif/esp32s2/soc.c | 2 +- soc/espressif/esp32s3/soc.c | 2 +- soc/espressif/esp32s3/soc_appcpu.c | 2 +- subsys/net/lib/sockets/sockets.c | 2 +- subsys/portability/cmsis_rtos_v2/kernel.c | 6 +- subsys/profiling/perf/backends/perf_riscv.c | 4 +- subsys/profiling/perf/backends/perf_x86.c | 2 +- subsys/profiling/perf/backends/perf_x86_64.c | 8 +- .../modules/kernel_service/thread/unwind.c | 2 +- .../arm/arm_interrupt/src/arm_interrupt.c | 6 +- .../arm/arm_thread_swap/src/arm_syscalls.c | 28 ++-- .../arm/arm_thread_swap/src/arm_thread_arch.c | 54 +++---- .../arch/riscv/userspace/riscv_gp/src/main.c | 2 +- .../benchmarks/footprints/src/system_thread.c | 4 +- tests/kernel/context/src/main.c | 8 +- tests/kernel/fatal/exception/src/main.c | 2 +- tests/kernel/fatal/message_capture/src/main.c | 2 +- tests/kernel/ipi_cascade/src/main.c | 10 +- .../mem_protect/mem_protect/src/inherit.c | 2 +- .../mem_protect/obj_validation/src/main.c | 4 +- tests/kernel/mem_protect/userspace/src/main.c | 8 +- tests/kernel/sched/deadline/src/main.c | 12 +- tests/kernel/smp/src/main.c | 17 +-- tests/kernel/threads/thread_apis/src/main.c | 6 +- .../thread_apis/src/test_essential_thread.c | 14 +- .../src/test_thread_runtime_stats.c | 28 ++-- tests/subsys/pm/power_mgmt/src/main.c | 6 +- tests/ztest/error_hook/src/main.c | 6 +- 111 files changed, 494 insertions(+), 504 deletions(-) diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index fa00c3722e6be..d44ca272b11c8 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -262,7 +262,7 @@ config ARC_CURRENT_THREAD_USE_NO_TLS RGF_NUM_BANKS the parameter is disabled by-default because banks syncronization requires significant time, and it slows down performance. ARCMWDT works with tls pointer in different way then GCC. Optimized access to - TLS pointer via arch_current_thread() does not provide significant advantages + TLS pointer via the _current symbol does not provide significant advantages in case of MetaWare. config GEN_ISR_TABLES diff --git a/arch/arc/core/fault.c b/arch/arc/core/fault.c index a6c8410e63357..6f9da3cd1e0e9 100644 --- a/arch/arc/core/fault.c +++ b/arch/arc/core/fault.c @@ -55,7 +55,7 @@ static bool z_check_thread_stack_fail(const uint32_t fault_addr, uint32_t sp) { #if defined(CONFIG_MULTITHREADING) uint32_t guard_end, guard_start; - const struct k_thread *thread = arch_current_thread(); + const struct k_thread *thread = _current; if (!thread) { /* TODO: Under what circumstances could we get here ? */ diff --git a/arch/arc/core/irq_offload.c b/arch/arc/core/irq_offload.c index f24a3e7dd8a5b..d1a3f900ca3f0 100644 --- a/arch/arc/core/irq_offload.c +++ b/arch/arc/core/irq_offload.c @@ -49,8 +49,8 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter) __asm__ volatile("sync"); - /* If arch_current_thread() was aborted in the offload routine, we shouldn't be here */ - __ASSERT_NO_MSG((arch_current_thread()->base.thread_state & _THREAD_DEAD) == 0); + /* If _current was aborted in the offload routine, we shouldn't be here */ + __ASSERT_NO_MSG((_current->base.thread_state & _THREAD_DEAD) == 0); } /* need to be executed on every core in the system */ diff --git a/arch/arc/core/thread.c b/arch/arc/core/thread.c index cb5352bc47547..4b1d836103eed 100644 --- a/arch/arc/core/thread.c +++ b/arch/arc/core/thread.c @@ -210,7 +210,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, #ifdef CONFIG_MULTITHREADING void *z_arch_get_next_switch_handle(struct k_thread **old_thread) { - *old_thread = arch_current_thread(); + *old_thread = _current; return z_get_next_switch_handle(NULL); } @@ -227,16 +227,16 @@ void *z_arch_get_next_switch_handle(struct k_thread **old_thread) FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3) { - setup_stack_vars(arch_current_thread()); + setup_stack_vars(_current); /* possible optimizaiton: no need to load mem domain anymore */ /* need to lock cpu here ? */ - configure_mpu_thread(arch_current_thread()); + configure_mpu_thread(_current); z_arc_userspace_enter(user_entry, p1, p2, p3, - (uint32_t)arch_current_thread()->stack_info.start, - (arch_current_thread()->stack_info.size - - arch_current_thread()->stack_info.delta), arch_current_thread()); + (uint32_t)_current->stack_info.start, + (_current->stack_info.size - + _current->stack_info.delta), _current); CODE_UNREACHABLE; } #endif @@ -336,7 +336,7 @@ int arc_vpx_lock(k_timeout_t timeout) id = _current_cpu->id; #if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK) - __ASSERT(!arch_is_in_isr() && (arch_current_thread()->base.cpu_mask == BIT(id)), ""); + __ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), ""); #endif k_spin_unlock(&lock, key); @@ -355,7 +355,7 @@ void arc_vpx_unlock(void) key = k_spin_lock(&lock); #if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK) - __ASSERT(!arch_is_in_isr() && (arch_current_thread()->base.cpu_mask == BIT(id)), ""); + __ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), ""); #endif id = _current_cpu->id; k_spin_unlock(&lock, key); diff --git a/arch/arc/core/tls.c b/arch/arc/core/tls.c index 9585b228926c2..3cf7d45cab913 100644 --- a/arch/arc/core/tls.c +++ b/arch/arc/core/tls.c @@ -29,7 +29,7 @@ size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr) void *_Preserve_flags _mwget_tls(void) { - return (void *)(arch_current_thread()->tls); + return (void *)(_current->tls); } #else diff --git a/arch/arm/core/cortex_a_r/fault.c b/arch/arm/core/cortex_a_r/fault.c index 5e3d38a66b45d..daf1d2345ca06 100644 --- a/arch/arm/core/cortex_a_r/fault.c +++ b/arch/arm/core/cortex_a_r/fault.c @@ -178,7 +178,7 @@ bool z_arm_fault_undef_instruction_fp(void) * context because it is about to be overwritten. */ if (((_current_cpu->nested == 2) - && (arch_current_thread()->base.user_options & K_FP_REGS)) + && (_current->base.user_options & K_FP_REGS)) || ((_current_cpu->nested > 2) && (spill_esf->undefined & FPEXC_EN))) { /* @@ -196,7 +196,7 @@ bool z_arm_fault_undef_instruction_fp(void) * means that a thread that uses the VFP does not have to, * but should, set K_FP_REGS on thread creation. */ - arch_current_thread()->base.user_options |= K_FP_REGS; + _current->base.user_options |= K_FP_REGS; } return false; diff --git a/arch/arm/core/cortex_a_r/swap_helper.S b/arch/arm/core/cortex_a_r/swap_helper.S index 36dd9a9654806..a41e1ab5942fe 100644 --- a/arch/arm/core/cortex_a_r/swap_helper.S +++ b/arch/arm/core/cortex_a_r/swap_helper.S @@ -70,7 +70,7 @@ SECTION_FUNC(TEXT, z_arm_do_swap) #if defined(CONFIG_FPU_SHARING) ldrb r0, [r2, #_thread_offset_to_user_options] - tst r0, #K_FP_REGS /* arch_current_thread()->base.user_options & K_FP_REGS */ + tst r0, #K_FP_REGS /* _current->base.user_options & K_FP_REGS */ beq out_fp_inactive mov ip, #FPEXC_EN @@ -152,7 +152,7 @@ out_fp_inactive: #if defined(CONFIG_FPU_SHARING) ldrb r0, [r2, #_thread_offset_to_user_options] - tst r0, #K_FP_REGS /* arch_current_thread()->base.user_options & K_FP_REGS */ + tst r0, #K_FP_REGS /* _current->base.user_options & K_FP_REGS */ beq in_fp_inactive mov r3, #FPEXC_EN diff --git a/arch/arm/core/cortex_a_r/thread.c b/arch/arm/core/cortex_a_r/thread.c index 43be2d5069022..b3bd91ce5c11c 100644 --- a/arch/arm/core/cortex_a_r/thread.c +++ b/arch/arm/core/cortex_a_r/thread.c @@ -198,8 +198,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, { /* Set up privileged stack before entering user mode */ - arch_current_thread()->arch.priv_stack_start = - (uint32_t)z_priv_stack_find(arch_current_thread()->stack_obj); + _current->arch.priv_stack_start = + (uint32_t)z_priv_stack_find(_current->stack_obj); #if defined(CONFIG_MPU_STACK_GUARD) #if defined(CONFIG_THREAD_STACK_INFO) /* We're dropping to user mode which means the guard area is no @@ -208,13 +208,13 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, * which accounted for memory borrowed from the thread stack. */ #if FP_GUARD_EXTRA_SIZE > 0 - if ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) { - arch_current_thread()->stack_info.start -= FP_GUARD_EXTRA_SIZE; - arch_current_thread()->stack_info.size += FP_GUARD_EXTRA_SIZE; + if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) { + _current->stack_info.start -= FP_GUARD_EXTRA_SIZE; + _current->stack_info.size += FP_GUARD_EXTRA_SIZE; } #endif /* FP_GUARD_EXTRA_SIZE */ - arch_current_thread()->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE; - arch_current_thread()->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE; + _current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE; + _current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE; #endif /* CONFIG_THREAD_STACK_INFO */ /* Stack guard area reserved at the bottom of the thread's @@ -222,23 +222,23 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, * buffer area accordingly. */ #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) - arch_current_thread()->arch.priv_stack_start += - ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? + _current->arch.priv_stack_start += + ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE; #else - arch_current_thread()->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE; + _current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE; #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */ #endif /* CONFIG_MPU_STACK_GUARD */ #if defined(CONFIG_CPU_AARCH32_CORTEX_R) - arch_current_thread()->arch.priv_stack_end = - arch_current_thread()->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE; + _current->arch.priv_stack_end = + _current->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE; #endif z_arm_userspace_enter(user_entry, p1, p2, p3, - (uint32_t)arch_current_thread()->stack_info.start, - arch_current_thread()->stack_info.size - - arch_current_thread()->stack_info.delta); + (uint32_t)_current->stack_info.start, + _current->stack_info.size - + _current->stack_info.delta); CODE_UNREACHABLE; } @@ -304,7 +304,7 @@ EXPORT_SYMBOL(z_arm_thread_is_in_user_mode); uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp) { #if defined(CONFIG_MULTITHREADING) - const struct k_thread *thread = arch_current_thread(); + const struct k_thread *thread = _current; if (thread == NULL) { return 0; @@ -314,7 +314,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp #if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \ defined(CONFIG_MPU_STACK_GUARD) uint32_t guard_len = - ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? + ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE; #else /* If MPU_STACK_GUARD is not enabled, the guard length is @@ -377,7 +377,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) int arch_float_disable(struct k_thread *thread) { - if (thread != arch_current_thread()) { + if (thread != _current) { return -EINVAL; } diff --git a/arch/arm/core/cortex_m/swap_helper.S b/arch/arm/core/cortex_m/swap_helper.S index 23a49cb87ef54..c6207084b5ea6 100644 --- a/arch/arm/core/cortex_m/swap_helper.S +++ b/arch/arm/core/cortex_m/swap_helper.S @@ -288,7 +288,7 @@ in_fp_endif: #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE) /* Re-program dynamic memory map */ push {r2,lr} - mov r0, r2 /* arch_current_thread() thread */ + mov r0, r2 /* _current thread */ bl z_arm_configure_dynamic_mpu_regions pop {r2,lr} #endif diff --git a/arch/arm/core/cortex_m/thread.c b/arch/arm/core/cortex_m/thread.c index 1f8d945c3eb41..b67cbe8ee3e33 100644 --- a/arch/arm/core/cortex_m/thread.c +++ b/arch/arm/core/cortex_m/thread.c @@ -231,8 +231,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, { /* Set up privileged stack before entering user mode */ - arch_current_thread()->arch.priv_stack_start = - (uint32_t)z_priv_stack_find(arch_current_thread()->stack_obj); + _current->arch.priv_stack_start = + (uint32_t)z_priv_stack_find(_current->stack_obj); #if defined(CONFIG_MPU_STACK_GUARD) #if defined(CONFIG_THREAD_STACK_INFO) /* We're dropping to user mode which means the guard area is no @@ -241,13 +241,13 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, * which accounted for memory borrowed from the thread stack. */ #if FP_GUARD_EXTRA_SIZE > 0 - if ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) { - arch_current_thread()->stack_info.start -= FP_GUARD_EXTRA_SIZE; - arch_current_thread()->stack_info.size += FP_GUARD_EXTRA_SIZE; + if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) { + _current->stack_info.start -= FP_GUARD_EXTRA_SIZE; + _current->stack_info.size += FP_GUARD_EXTRA_SIZE; } #endif /* FP_GUARD_EXTRA_SIZE */ - arch_current_thread()->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE; - arch_current_thread()->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE; + _current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE; + _current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE; #endif /* CONFIG_THREAD_STACK_INFO */ /* Stack guard area reserved at the bottom of the thread's @@ -255,18 +255,18 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, * buffer area accordingly. */ #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) - arch_current_thread()->arch.priv_stack_start += - ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? + _current->arch.priv_stack_start += + ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE; #else - arch_current_thread()->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE; + _current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE; #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */ #endif /* CONFIG_MPU_STACK_GUARD */ z_arm_userspace_enter(user_entry, p1, p2, p3, - (uint32_t)arch_current_thread()->stack_info.start, - arch_current_thread()->stack_info.size - - arch_current_thread()->stack_info.delta); + (uint32_t)_current->stack_info.start, + _current->stack_info.size - + _current->stack_info.delta); CODE_UNREACHABLE; } @@ -379,7 +379,7 @@ void configure_builtin_stack_guard(struct k_thread *thread) uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp) { #if defined(CONFIG_MULTITHREADING) - const struct k_thread *thread = arch_current_thread(); + const struct k_thread *thread = _current; if (thread == NULL) { return 0; @@ -389,7 +389,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp #if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \ defined(CONFIG_MPU_STACK_GUARD) uint32_t guard_len = - ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? + ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ? MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE; #else /* If MPU_STACK_GUARD is not enabled, the guard length is @@ -452,7 +452,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) int arch_float_disable(struct k_thread *thread) { - if (thread != arch_current_thread()) { + if (thread != _current) { return -EINVAL; } diff --git a/arch/arm/core/cortex_m/thread_abort.c b/arch/arm/core/cortex_m/thread_abort.c index 235adeab0b9eb..99af867110705 100644 --- a/arch/arm/core/cortex_m/thread_abort.c +++ b/arch/arm/core/cortex_m/thread_abort.c @@ -27,7 +27,7 @@ void z_impl_k_thread_abort(k_tid_t thread) { SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread); - if (arch_current_thread() == thread) { + if (_current == thread) { if (arch_is_in_isr()) { /* ARM is unlike most arches in that this is true * even for non-peripheral interrupts, even though diff --git a/arch/arm/include/cortex_a_r/kernel_arch_func.h b/arch/arm/include/cortex_a_r/kernel_arch_func.h index 9ac2b2a1d9086..7a100db07ddb1 100644 --- a/arch/arm/include/cortex_a_r/kernel_arch_func.h +++ b/arch/arm/include/cortex_a_r/kernel_arch_func.h @@ -40,8 +40,8 @@ static ALWAYS_INLINE void arch_kernel_init(void) static ALWAYS_INLINE int arch_swap(unsigned int key) { /* store off key and return value */ - arch_current_thread()->arch.basepri = key; - arch_current_thread()->arch.swap_return_value = -EAGAIN; + _current->arch.basepri = key; + _current->arch.swap_return_value = -EAGAIN; z_arm_cortex_r_svc(); irq_unlock(key); @@ -49,7 +49,7 @@ static ALWAYS_INLINE int arch_swap(unsigned int key) /* Context switch is performed here. Returning implies the * thread has been context-switched-in again. */ - return arch_current_thread()->arch.swap_return_value; + return _current->arch.swap_return_value; } static ALWAYS_INLINE void diff --git a/arch/arm/include/cortex_m/kernel_arch_func.h b/arch/arm/include/cortex_m/kernel_arch_func.h index 9183eb691b14b..2a24103a32de0 100644 --- a/arch/arm/include/cortex_m/kernel_arch_func.h +++ b/arch/arm/include/cortex_m/kernel_arch_func.h @@ -87,8 +87,8 @@ extern void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf); static ALWAYS_INLINE int arch_swap(unsigned int key) { /* store off key and return value */ - arch_current_thread()->arch.basepri = key; - arch_current_thread()->arch.swap_return_value = -EAGAIN; + _current->arch.basepri = key; + _current->arch.swap_return_value = -EAGAIN; /* set pending bit to make sure we will take a PendSV exception */ SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk; @@ -99,7 +99,7 @@ static ALWAYS_INLINE int arch_swap(unsigned int key) /* Context switch is performed here. Returning implies the * thread has been context-switched-in again. */ - return arch_current_thread()->arch.swap_return_value; + return _current->arch.swap_return_value; } diff --git a/arch/arm64/core/cortex_r/arm_mpu.c b/arch/arm64/core/cortex_r/arm_mpu.c index 5bc9ecbf3e5ac..2bd6d265c9a9e 100644 --- a/arch/arm64/core/cortex_r/arm_mpu.c +++ b/arch/arm64/core/cortex_r/arm_mpu.c @@ -727,7 +727,7 @@ static int configure_dynamic_mpu_regions(struct k_thread *thread) */ thread->arch.region_num = (uint8_t)region_num; - if (thread == arch_current_thread()) { + if (thread == _current) { ret = flush_dynamic_regions_to_mpu(dyn_regions, region_num); } @@ -795,7 +795,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread) ret = configure_dynamic_mpu_regions(thread); #ifdef CONFIG_SMP - if (ret == 0 && thread != arch_current_thread()) { + if (ret == 0 && thread != _current) { /* the thread could be running on another CPU right now */ z_arm64_mem_cfg_ipi(); } @@ -810,7 +810,7 @@ int arch_mem_domain_thread_remove(struct k_thread *thread) ret = configure_dynamic_mpu_regions(thread); #ifdef CONFIG_SMP - if (ret == 0 && thread != arch_current_thread()) { + if (ret == 0 && thread != _current) { /* the thread could be running on another CPU right now */ z_arm64_mem_cfg_ipi(); } diff --git a/arch/arm64/core/fatal.c b/arch/arm64/core/fatal.c index 0e793ea18bbe3..7955b6f7d6d16 100644 --- a/arch/arm64/core/fatal.c +++ b/arch/arm64/core/fatal.c @@ -306,9 +306,8 @@ static bool z_arm64_stack_corruption_check(struct arch_esf *esf, uint64_t esr, u } } #ifdef CONFIG_USERSPACE - else if ((arch_current_thread()->base.user_options & K_USER) != 0 && - GET_ESR_EC(esr) == 0x24) { - sp_limit = (uint64_t)arch_current_thread()->stack_info.start; + else if ((_current->base.user_options & K_USER) != 0 && GET_ESR_EC(esr) == 0x24) { + sp_limit = (uint64_t)_current->stack_info.start; guard_start = sp_limit - Z_ARM64_STACK_GUARD_SIZE; sp = esf->sp; if (sp <= sp_limit || (guard_start <= far && far <= sp_limit)) { @@ -435,7 +434,7 @@ void z_arm64_do_kernel_oops(struct arch_esf *esf) * User mode is only allowed to induce oopses and stack check * failures via software-triggered system fatal exceptions. */ - if (((arch_current_thread()->base.user_options & K_USER) != 0) && + if (((_current->base.user_options & K_USER) != 0) && reason != K_ERR_STACK_CHK_FAIL) { reason = K_ERR_KERNEL_OOPS; } diff --git a/arch/arm64/core/fpu.c b/arch/arm64/core/fpu.c index 00abd59632a61..a585165b94339 100644 --- a/arch/arm64/core/fpu.c +++ b/arch/arm64/core/fpu.c @@ -36,7 +36,7 @@ static void DBG(char *msg, struct k_thread *th) strcpy(buf, "CPU# exc# "); buf[3] = '0' + _current_cpu->id; buf[8] = '0' + arch_exception_depth(); - strcat(buf, arch_current_thread()->name); + strcat(buf, _current->name); strcat(buf, ": "); strcat(buf, msg); strcat(buf, " "); @@ -125,7 +125,7 @@ static void flush_owned_fpu(struct k_thread *thread) * replace it, and this avoids a deadlock where * two CPUs want to pull each other's FPU context. */ - if (thread == arch_current_thread()) { + if (thread == _current) { arch_flush_local_fpu(); while (atomic_ptr_get(&_kernel.cpus[i].arch.fpu_owner) == thread) { barrier_dsync_fence_full(); @@ -260,15 +260,15 @@ void z_arm64_fpu_trap(struct arch_esf *esf) * Make sure the FPU context we need isn't live on another CPU. * The current CPU's FPU context is NULL at this point. */ - flush_owned_fpu(arch_current_thread()); + flush_owned_fpu(_current); #endif /* become new owner */ - atomic_ptr_set(&_current_cpu->arch.fpu_owner, arch_current_thread()); + atomic_ptr_set(&_current_cpu->arch.fpu_owner, _current); /* restore our content */ - z_arm64_fpu_restore(&arch_current_thread()->arch.saved_fp_context); - DBG("restore", arch_current_thread()); + z_arm64_fpu_restore(&_current->arch.saved_fp_context); + DBG("restore", _current); } /* @@ -287,7 +287,7 @@ static void fpu_access_update(unsigned int exc_update_level) if (arch_exception_depth() == exc_update_level) { /* We're about to execute non-exception code */ - if (atomic_ptr_get(&_current_cpu->arch.fpu_owner) == arch_current_thread()) { + if (atomic_ptr_get(&_current_cpu->arch.fpu_owner) == _current) { /* turn on FPU access */ write_cpacr_el1(cpacr | CPACR_EL1_FPEN_NOTRAP); } else { diff --git a/arch/arm64/core/mmu.c b/arch/arm64/core/mmu.c index ef199b2e7ab4d..a914916d605e7 100644 --- a/arch/arm64/core/mmu.c +++ b/arch/arm64/core/mmu.c @@ -1309,7 +1309,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread) } thread->arch.ptables = domain_ptables; - if (thread == arch_current_thread()) { + if (thread == _current) { z_arm64_swap_ptables(thread); } else { #ifdef CONFIG_SMP diff --git a/arch/arm64/core/smp.c b/arch/arm64/core/smp.c index e1c3f64dbb95f..fd9d457ea7df5 100644 --- a/arch/arm64/core/smp.c +++ b/arch/arm64/core/smp.c @@ -240,7 +240,7 @@ void mem_cfg_ipi_handler(const void *unused) * This is a no-op if the page table is already the right one. * Lock irq to prevent the interrupt during mem region switch. */ - z_arm64_swap_mem_domains(arch_current_thread()); + z_arm64_swap_mem_domains(_current); arch_irq_unlock(key); } diff --git a/arch/arm64/core/thread.c b/arch/arm64/core/thread.c index f51e203555039..18f49945eda49 100644 --- a/arch/arm64/core/thread.c +++ b/arch/arm64/core/thread.c @@ -159,15 +159,15 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, uint64_t tmpreg; /* Map the thread stack */ - z_arm64_thread_mem_domains_init(arch_current_thread()); + z_arm64_thread_mem_domains_init(_current); /* Top of the user stack area */ - stack_el0 = Z_STACK_PTR_ALIGN(arch_current_thread()->stack_info.start + - arch_current_thread()->stack_info.size - - arch_current_thread()->stack_info.delta); + stack_el0 = Z_STACK_PTR_ALIGN(_current->stack_info.start + + _current->stack_info.size - + _current->stack_info.delta); /* Top of the privileged non-user-accessible part of the stack */ - stack_el1 = (uintptr_t)(arch_current_thread()->stack_obj + ARCH_THREAD_STACK_RESERVED); + stack_el1 = (uintptr_t)(_current->stack_obj + ARCH_THREAD_STACK_RESERVED); register void *x0 __asm__("x0") = user_entry; register void *x1 __asm__("x1") = p1; diff --git a/arch/posix/core/swap.c b/arch/posix/core/swap.c index 18d83cf78d6b6..70d2cdedacc55 100644 --- a/arch/posix/core/swap.c +++ b/arch/posix/core/swap.c @@ -23,7 +23,7 @@ int arch_swap(unsigned int key) { /* - * struct k_thread * arch_current_thread() is the currently running thread + * struct k_thread * _current is the currently running thread * struct k_thread * _kernel.ready_q.cache contains the next thread to * run (cannot be NULL) * @@ -34,8 +34,8 @@ int arch_swap(unsigned int key) #if CONFIG_INSTRUMENT_THREAD_SWITCHING z_thread_mark_switched_out(); #endif - arch_current_thread()->callee_saved.key = key; - arch_current_thread()->callee_saved.retval = -EAGAIN; + _current->callee_saved.key = key; + _current->callee_saved.retval = -EAGAIN; /* retval may be modified with a call to * arch_thread_return_value_set() @@ -47,7 +47,7 @@ int arch_swap(unsigned int key) posix_thread_status_t *this_thread_ptr = (posix_thread_status_t *) - arch_current_thread()->callee_saved.thread_status; + _current->callee_saved.thread_status; arch_current_thread_set(_kernel.ready_q.cache); @@ -66,9 +66,9 @@ int arch_swap(unsigned int key) /* When we continue, _kernel->current points back to this thread */ - irq_unlock(arch_current_thread()->callee_saved.key); + irq_unlock(_current->callee_saved.key); - return arch_current_thread()->callee_saved.retval; + return _current->callee_saved.retval; } diff --git a/arch/posix/core/thread.c b/arch/posix/core/thread.c index 0a3f2eccd74b3..050d075fe570c 100644 --- a/arch/posix/core/thread.c +++ b/arch/posix/core/thread.c @@ -131,7 +131,7 @@ void z_impl_k_thread_abort(k_tid_t thread) key = irq_lock(); - if (arch_current_thread() == thread) { + if (_current == thread) { if (tstatus->aborted == 0) { /* LCOV_EXCL_BR_LINE */ tstatus->aborted = 1; } else { diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 5e3049433a9c4..b70eb0aca14f5 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -38,7 +38,7 @@ config RISCV_CURRENT_VIA_GP select ARCH_HAS_CUSTOM_CURRENT_IMPL help Store the current thread's pointer into the global pointer (GP) register. - When is enabled, calls to `arch_current_thread()` & `k_sched_current_thread_query()` will + When is enabled, calls to `_current` & `k_sched_current_thread_query()` will be reduced to a single register read. endchoice # RISCV_GP_PURPOSE diff --git a/arch/riscv/core/fatal.c b/arch/riscv/core/fatal.c index 8e0e55c8e8047..879ffab9a8095 100644 --- a/arch/riscv/core/fatal.c +++ b/arch/riscv/core/fatal.c @@ -158,23 +158,23 @@ static bool bad_stack_pointer(struct arch_esf *esf) uintptr_t sp = (uintptr_t)esf + sizeof(struct arch_esf); #ifdef CONFIG_USERSPACE - if (arch_current_thread()->arch.priv_stack_start != 0 && - sp >= arch_current_thread()->arch.priv_stack_start && - sp < arch_current_thread()->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE) { + if (_current->arch.priv_stack_start != 0 && + sp >= _current->arch.priv_stack_start && + sp < _current->arch.priv_stack_start + Z_RISCV_STACK_GUARD_SIZE) { return true; } - if (z_stack_is_user_capable(arch_current_thread()->stack_obj) && - sp >= arch_current_thread()->stack_info.start - K_THREAD_STACK_RESERVED && - sp < arch_current_thread()->stack_info.start - K_THREAD_STACK_RESERVED + if (z_stack_is_user_capable(_current->stack_obj) && + sp >= _current->stack_info.start - K_THREAD_STACK_RESERVED && + sp < _current->stack_info.start - K_THREAD_STACK_RESERVED + Z_RISCV_STACK_GUARD_SIZE) { return true; } #endif /* CONFIG_USERSPACE */ #if CONFIG_MULTITHREADING - if (sp >= arch_current_thread()->stack_info.start - K_KERNEL_STACK_RESERVED && - sp < arch_current_thread()->stack_info.start - K_KERNEL_STACK_RESERVED + if (sp >= _current->stack_info.start - K_KERNEL_STACK_RESERVED && + sp < _current->stack_info.start - K_KERNEL_STACK_RESERVED + Z_RISCV_STACK_GUARD_SIZE) { return true; } @@ -191,10 +191,10 @@ static bool bad_stack_pointer(struct arch_esf *esf) #ifdef CONFIG_USERSPACE if ((esf->mstatus & MSTATUS_MPP) == 0 && - (esf->sp < arch_current_thread()->stack_info.start || - esf->sp > arch_current_thread()->stack_info.start + - arch_current_thread()->stack_info.size - - arch_current_thread()->stack_info.delta)) { + (esf->sp < _current->stack_info.start || + esf->sp > _current->stack_info.start + + _current->stack_info.size - + _current->stack_info.delta)) { /* user stack pointer moved outside of its allowed stack */ return true; } @@ -246,9 +246,9 @@ FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr) void z_impl_user_fault(unsigned int reason) { - struct arch_esf *oops_esf = arch_current_thread()->syscall_frame; + struct arch_esf *oops_esf = _current->syscall_frame; - if (((arch_current_thread()->base.user_options & K_USER) != 0) && + if (((_current->base.user_options & K_USER) != 0) && reason != K_ERR_STACK_CHK_FAIL) { reason = K_ERR_KERNEL_OOPS; } diff --git a/arch/riscv/core/fpu.c b/arch/riscv/core/fpu.c index bd648585c436d..318e97e0002a9 100644 --- a/arch/riscv/core/fpu.c +++ b/arch/riscv/core/fpu.c @@ -36,8 +36,8 @@ static void DBG(char *msg, struct k_thread *th) strcpy(buf, "CPU# exc# "); buf[3] = '0' + _current_cpu->id; - buf[8] = '0' + arch_current_thread()->arch.exception_depth; - strcat(buf, arch_current_thread()->name); + buf[8] = '0' + _current->arch.exception_depth; + strcat(buf, _current->name); strcat(buf, ": "); strcat(buf, msg); strcat(buf, " "); @@ -82,12 +82,12 @@ static void z_riscv_fpu_load(void) "must be called with FPU access disabled"); /* become new owner */ - atomic_ptr_set(&_current_cpu->arch.fpu_owner, arch_current_thread()); + atomic_ptr_set(&_current_cpu->arch.fpu_owner, _current); /* restore our content */ csr_set(mstatus, MSTATUS_FS_INIT); - z_riscv_fpu_restore(&arch_current_thread()->arch.saved_fp_context); - DBG("restore", arch_current_thread()); + z_riscv_fpu_restore(&_current->arch.saved_fp_context); + DBG("restore", _current); } /* @@ -168,7 +168,7 @@ static void flush_owned_fpu(struct k_thread *thread) * replace it, and this avoids a deadlock where * two CPUs want to pull each other's FPU context. */ - if (thread == arch_current_thread()) { + if (thread == _current) { z_riscv_fpu_disable(); arch_flush_local_fpu(); do { @@ -213,7 +213,7 @@ void z_riscv_fpu_trap(struct arch_esf *esf) /* save current owner's content if any */ arch_flush_local_fpu(); - if (arch_current_thread()->arch.exception_depth > 0) { + if (_current->arch.exception_depth > 0) { /* * We were already in exception when the FPU access trapped. * We give it access and prevent any further IRQ recursion @@ -233,7 +233,7 @@ void z_riscv_fpu_trap(struct arch_esf *esf) * Make sure the FPU context we need isn't live on another CPU. * The current CPU's FPU context is NULL at this point. */ - flush_owned_fpu(arch_current_thread()); + flush_owned_fpu(_current); #endif /* make it accessible and clean to the returning context */ @@ -256,13 +256,13 @@ static bool fpu_access_allowed(unsigned int exc_update_level) __ASSERT((csr_read(mstatus) & MSTATUS_IEN) == 0, "must be called with IRQs disabled"); - if (arch_current_thread()->arch.exception_depth == exc_update_level) { + if (_current->arch.exception_depth == exc_update_level) { /* We're about to execute non-exception code */ - if (_current_cpu->arch.fpu_owner == arch_current_thread()) { + if (_current_cpu->arch.fpu_owner == _current) { /* everything is already in place */ return true; } - if (arch_current_thread()->arch.fpu_recently_used) { + if (_current->arch.fpu_recently_used) { /* * Before this thread was context-switched out, * it made active use of the FPU, but someone else @@ -273,7 +273,7 @@ static bool fpu_access_allowed(unsigned int exc_update_level) z_riscv_fpu_disable(); arch_flush_local_fpu(); #ifdef CONFIG_SMP - flush_owned_fpu(arch_current_thread()); + flush_owned_fpu(_current); #endif z_riscv_fpu_load(); _current_cpu->arch.fpu_state = MSTATUS_FS_CLEAN; diff --git a/arch/riscv/core/isr.S b/arch/riscv/core/isr.S index 193f48208716c..11787d3b32487 100644 --- a/arch/riscv/core/isr.S +++ b/arch/riscv/core/isr.S @@ -299,7 +299,7 @@ is_fp: /* Process the FP trap and quickly return from exception */ mv a0, sp tail z_riscv_fpu_trap 2: -no_fp: /* increment arch_current_thread()->arch.exception_depth */ +no_fp: /* increment _current->arch.exception_depth */ lr t0, ___cpu_t_current_OFFSET(s0) lb t1, _thread_offset_to_exception_depth(t0) add t1, t1, 1 @@ -726,7 +726,7 @@ no_reschedule: mv a0, sp call z_riscv_fpu_exit_exc - /* decrement arch_current_thread()->arch.exception_depth */ + /* decrement _current->arch.exception_depth */ lr t0, ___cpu_t_current_OFFSET(s0) lb t1, _thread_offset_to_exception_depth(t0) add t1, t1, -1 diff --git a/arch/riscv/core/pmp.c b/arch/riscv/core/pmp.c index fbbf7c55137ce..e29c8abd76d61 100644 --- a/arch/riscv/core/pmp.c +++ b/arch/riscv/core/pmp.c @@ -752,8 +752,8 @@ int arch_buffer_validate(const void *addr, size_t size, int write) int ret = -1; /* Check if this is on the stack */ - if (IS_WITHIN(start, size, arch_current_thread()->stack_info.start, - arch_current_thread()->stack_info.size)) { + if (IS_WITHIN(start, size, + _current->stack_info.start, _current->stack_info.size)) { return 0; } @@ -768,7 +768,7 @@ int arch_buffer_validate(const void *addr, size_t size, int write) } /* Look for a matching partition in our memory domain */ - struct k_mem_domain *domain = arch_current_thread()->mem_domain_info.mem_domain; + struct k_mem_domain *domain = _current->mem_domain_info.mem_domain; int p_idx, remaining_partitions; k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock); diff --git a/arch/riscv/core/stacktrace.c b/arch/riscv/core/stacktrace.c index 0dfe0a1963882..361e152f00ca3 100644 --- a/arch/riscv/core/stacktrace.c +++ b/arch/riscv/core/stacktrace.c @@ -108,7 +108,7 @@ static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k /* Unwind the provided exception stack frame */ fp = esf->s0; ra = esf->mepc; - } else if ((csf == NULL) || (csf == &arch_current_thread()->callee_saved)) { + } else if ((csf == NULL) || (csf == &_current->callee_saved)) { /* Unwind current thread (default case when nothing is provided ) */ fp = (uintptr_t)__builtin_frame_address(0); ra = (uintptr_t)walk_stackframe; @@ -181,7 +181,7 @@ static void walk_stackframe(riscv_stacktrace_cb cb, void *cookie, const struct k /* Unwind the provided exception stack frame */ sp = z_riscv_get_sp_before_exc(esf); ra = esf->mepc; - } else if ((csf == NULL) || (csf == &arch_current_thread()->callee_saved)) { + } else if ((csf == NULL) || (csf == &_current->callee_saved)) { /* Unwind current thread (default case when nothing is provided ) */ sp = current_stack_pointer; ra = (uintptr_t)walk_stackframe; @@ -215,10 +215,8 @@ void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie, const struct k_thread *thread, const struct arch_esf *esf) { if (thread == NULL) { - /* In case `thread` is NULL, default that to `arch_current_thread()` - * and try to unwind - */ - thread = arch_current_thread(); + /* In case `thread` is NULL, default that to `_current` and try to unwind */ + thread = _current; } walk_stackframe((riscv_stacktrace_cb)callback_fn, cookie, thread, esf, in_stack_bound, @@ -282,8 +280,7 @@ void z_riscv_unwind_stack(const struct arch_esf *esf, const _callee_saved_t *csf int i = 0; LOG_ERR("call trace:"); - walk_stackframe(print_trace_address, &i, arch_current_thread(), esf, in_fatal_stack_bound, - csf); + walk_stackframe(print_trace_address, &i, _current, esf, in_fatal_stack_bound, csf); LOG_ERR(""); } #endif /* CONFIG_EXCEPTION_STACK_TRACE */ diff --git a/arch/riscv/core/thread.c b/arch/riscv/core/thread.c index 5c471034d2575..b4999bda09ac3 100644 --- a/arch/riscv/core/thread.c +++ b/arch/riscv/core/thread.c @@ -132,29 +132,28 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, /* Set up privileged stack */ #ifdef CONFIG_GEN_PRIV_STACKS - arch_current_thread()->arch.priv_stack_start = - (unsigned long)z_priv_stack_find(arch_current_thread()->stack_obj); + _current->arch.priv_stack_start = + (unsigned long)z_priv_stack_find(_current->stack_obj); /* remove the stack guard from the main stack */ - arch_current_thread()->stack_info.start -= K_THREAD_STACK_RESERVED; - arch_current_thread()->stack_info.size += K_THREAD_STACK_RESERVED; + _current->stack_info.start -= K_THREAD_STACK_RESERVED; + _current->stack_info.size += K_THREAD_STACK_RESERVED; #else - arch_current_thread()->arch.priv_stack_start = - (unsigned long)arch_current_thread()->stack_obj; + _current->arch.priv_stack_start = (unsigned long)_current->stack_obj; #endif /* CONFIG_GEN_PRIV_STACKS */ - top_of_priv_stack = Z_STACK_PTR_ALIGN(arch_current_thread()->arch.priv_stack_start + + top_of_priv_stack = Z_STACK_PTR_ALIGN(_current->arch.priv_stack_start + K_KERNEL_STACK_RESERVED + CONFIG_PRIVILEGED_STACK_SIZE); #ifdef CONFIG_INIT_STACKS /* Initialize the privileged stack */ - (void)memset((void *)arch_current_thread()->arch.priv_stack_start, 0xaa, + (void)memset((void *)_current->arch.priv_stack_start, 0xaa, Z_STACK_PTR_ALIGN(K_KERNEL_STACK_RESERVED + CONFIG_PRIVILEGED_STACK_SIZE)); #endif /* CONFIG_INIT_STACKS */ top_of_user_stack = Z_STACK_PTR_ALIGN( - arch_current_thread()->stack_info.start + - arch_current_thread()->stack_info.size - - arch_current_thread()->stack_info.delta); + _current->stack_info.start + + _current->stack_info.size - + _current->stack_info.delta); status = csr_read(mstatus); @@ -170,12 +169,12 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, #ifdef CONFIG_PMP_STACK_GUARD /* reconfigure as the kernel mode stack will be different */ - z_riscv_pmp_stackguard_prepare(arch_current_thread()); + z_riscv_pmp_stackguard_prepare(_current); #endif /* Set up Physical Memory Protection */ - z_riscv_pmp_usermode_prepare(arch_current_thread()); - z_riscv_pmp_usermode_enable(arch_current_thread()); + z_riscv_pmp_usermode_prepare(_current); + z_riscv_pmp_usermode_enable(_current); /* preserve stack pointer for next exception entry */ arch_curr_cpu()->arch.user_exc_sp = top_of_priv_stack; diff --git a/arch/sparc/core/thread.c b/arch/sparc/core/thread.c index 8bdc4cd5500f6..e56d9f827c9de 100644 --- a/arch/sparc/core/thread.c +++ b/arch/sparc/core/thread.c @@ -61,7 +61,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, void *z_arch_get_next_switch_handle(struct k_thread **old_thread) { - *old_thread = arch_current_thread(); + *old_thread = _current; return z_get_next_switch_handle(*old_thread); } diff --git a/arch/x86/core/fatal.c b/arch/x86/core/fatal.c index f3103861f366e..d43499a08d2ac 100644 --- a/arch/x86/core/fatal.c +++ b/arch/x86/core/fatal.c @@ -49,7 +49,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs) { uintptr_t start, end; - if (arch_current_thread() == NULL || arch_is_in_isr()) { + if (_current == NULL || arch_is_in_isr()) { /* We were servicing an interrupt or in early boot environment * and are supposed to be on the interrupt stack */ int cpu_id; @@ -64,7 +64,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs) end = start + CONFIG_ISR_STACK_SIZE; #ifdef CONFIG_USERSPACE } else if ((cs & 0x3U) == 0U && - (arch_current_thread()->base.user_options & K_USER) != 0) { + (_current->base.user_options & K_USER) != 0) { /* The low two bits of the CS register is the privilege * level. It will be 0 in supervisor mode and 3 in user mode * corresponding to ring 0 / ring 3. @@ -72,14 +72,14 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs) * If we get here, we must have been doing a syscall, check * privilege elevation stack bounds */ - start = arch_current_thread()->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE; - end = arch_current_thread()->stack_info.start; + start = _current->stack_info.start - CONFIG_PRIVILEGED_STACK_SIZE; + end = _current->stack_info.start; #endif /* CONFIG_USERSPACE */ } else { /* Normal thread operation, check its stack buffer */ - start = arch_current_thread()->stack_info.start; - end = Z_STACK_PTR_ALIGN(arch_current_thread()->stack_info.start + - arch_current_thread()->stack_info.size); + start = _current->stack_info.start; + end = Z_STACK_PTR_ALIGN(_current->stack_info.start + + _current->stack_info.size); } return (addr <= start) || (addr + size > end); @@ -97,7 +97,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs) __pinned_func bool z_x86_check_guard_page(uintptr_t addr) { - struct k_thread *thread = arch_current_thread(); + struct k_thread *thread = _current; uintptr_t start, end; /* Front guard size - before thread stack area */ @@ -233,7 +233,7 @@ static inline uintptr_t get_cr3(const struct arch_esf *esf) * switch when we took the exception via z_x86_trampoline_to_kernel */ if ((esf->cs & 0x3) != 0) { - return arch_current_thread()->arch.ptables; + return _current->arch.ptables; } #else ARG_UNUSED(esf); diff --git a/arch/x86/core/ia32/float.c b/arch/x86/core/ia32/float.c index e4102d803324f..c89bf7accd5a1 100644 --- a/arch/x86/core/ia32/float.c +++ b/arch/x86/core/ia32/float.c @@ -207,7 +207,7 @@ void z_float_enable(struct k_thread *thread, unsigned int options) /* Associate the new FP context with the specified thread */ - if (thread == arch_current_thread()) { + if (thread == _current) { /* * When enabling FP support for the current thread, just claim * ownership of the FPU and leave CR0[TS] unset. @@ -222,7 +222,7 @@ void z_float_enable(struct k_thread *thread, unsigned int options) * of the FPU to them (unless we need it ourselves). */ - if ((arch_current_thread()->base.user_options & _FP_USER_MASK) == 0) { + if ((_current->base.user_options & _FP_USER_MASK) == 0) { /* * We are not FP-capable, so mark FPU as owned by the * thread we've just enabled FP support for, then @@ -278,7 +278,7 @@ int z_float_disable(struct k_thread *thread) thread->base.user_options &= ~_FP_USER_MASK; - if (thread == arch_current_thread()) { + if (thread == _current) { z_FpAccessDisable(); _kernel.current_fp = (struct k_thread *)0; } else { @@ -314,7 +314,7 @@ void _FpNotAvailableExcHandler(struct arch_esf *pEsf) /* Enable highest level of FP capability configured into the kernel */ - k_float_enable(arch_current_thread(), _FP_USER_MASK); + k_float_enable(_current, _FP_USER_MASK); } _EXCEPTION_CONNECT_NOCODE(_FpNotAvailableExcHandler, IV_DEVICE_NOT_AVAILABLE, 0); diff --git a/arch/x86/core/userspace.c b/arch/x86/core/userspace.c index fd38d22cb90b0..436bc18edb73d 100644 --- a/arch/x86/core/userspace.c +++ b/arch/x86/core/userspace.c @@ -132,9 +132,9 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, /* Transition will reset stack pointer to initial, discarding * any old context since this is a one-way operation */ - stack_end = Z_STACK_PTR_ALIGN(arch_current_thread()->stack_info.start + - arch_current_thread()->stack_info.size - - arch_current_thread()->stack_info.delta); + stack_end = Z_STACK_PTR_ALIGN(_current->stack_info.start + + _current->stack_info.size - + _current->stack_info.delta); #ifdef CONFIG_X86_64 /* x86_64 SysV ABI requires 16 byte stack alignment, which @@ -156,15 +156,15 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, * Note that this also needs to page in the reserved * portion of the stack (which is usually the page just * before the beginning of stack in - * arch_current_thread()->stack_info.start. + * _current->stack_info.start. */ uintptr_t stack_start; size_t stack_size; uintptr_t stack_aligned_start; size_t stack_aligned_size; - stack_start = POINTER_TO_UINT(arch_current_thread()->stack_obj); - stack_size = K_THREAD_STACK_LEN(arch_current_thread()->stack_info.size); + stack_start = POINTER_TO_UINT(_current->stack_obj); + stack_size = K_THREAD_STACK_LEN(_current->stack_info.size); #if defined(CONFIG_X86_STACK_PROTECTION) /* With hardware stack protection, the first page of stack @@ -182,7 +182,7 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, #endif z_x86_userspace_enter(user_entry, p1, p2, p3, stack_end, - arch_current_thread()->stack_info.start); + _current->stack_info.start); CODE_UNREACHABLE; } diff --git a/arch/x86/core/x86_mmu.c b/arch/x86/core/x86_mmu.c index fdda995b3b07f..e4188b8670f84 100644 --- a/arch/x86/core/x86_mmu.c +++ b/arch/x86/core/x86_mmu.c @@ -421,7 +421,7 @@ void z_x86_tlb_ipi(const void *arg) /* We might have been moved to another memory domain, so always invoke * z_x86_thread_page_tables_get() instead of using current CR3 value. */ - ptables_phys = k_mem_phys_addr(z_x86_thread_page_tables_get(arch_current_thread())); + ptables_phys = k_mem_phys_addr(z_x86_thread_page_tables_get(_current)); #endif /* * In the future, we can consider making this smarter, such as @@ -1440,7 +1440,7 @@ static inline void bcb_fence(void) __pinned_func int arch_buffer_validate(const void *addr, size_t size, int write) { - pentry_t *ptables = z_x86_thread_page_tables_get(arch_current_thread()); + pentry_t *ptables = z_x86_thread_page_tables_get(_current); uint8_t *virt; size_t aligned_size; int ret = 0; @@ -1958,7 +1958,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread) * IPI takes care of this if the thread is currently running on some * other CPU. */ - if (thread == arch_current_thread() && thread->arch.ptables != z_x86_cr3_get()) { + if (thread == _current && thread->arch.ptables != z_x86_cr3_get()) { z_x86_cr3_set(thread->arch.ptables); } #endif /* CONFIG_X86_KPTI */ @@ -1980,9 +1980,8 @@ void z_x86_current_stack_perms(void) /* Clear any previous context in the stack buffer to prevent * unintentional data leakage. */ - (void)memset((void *)arch_current_thread()->stack_info.start, 0xAA, - arch_current_thread()->stack_info.size - - arch_current_thread()->stack_info.delta); + (void)memset((void *)_current->stack_info.start, 0xAA, + _current->stack_info.size - _current->stack_info.delta); /* Only now is it safe to grant access to the stack buffer since any * previous context has been erased. @@ -1992,13 +1991,13 @@ void z_x86_current_stack_perms(void) * This will grant stack and memory domain access if it wasn't set * already (in which case this returns very quickly). */ - z_x86_swap_update_common_page_table(arch_current_thread()); + z_x86_swap_update_common_page_table(_current); #else /* Memory domain access is already programmed into the page tables. * Need to enable access to this new user thread's stack buffer in * its domain-specific page tables. */ - set_stack_perms(arch_current_thread(), z_x86_thread_page_tables_get(arch_current_thread())); + set_stack_perms(_current, z_x86_thread_page_tables_get(_current)); #endif } #endif /* CONFIG_USERSPACE */ diff --git a/arch/xtensa/core/fatal.c b/arch/xtensa/core/fatal.c index 5721f130446a1..41a7a8d14097b 100644 --- a/arch/xtensa/core/fatal.c +++ b/arch/xtensa/core/fatal.c @@ -140,7 +140,7 @@ FUNC_NORETURN void arch_syscall_oops(void *ssf) #ifdef CONFIG_USERSPACE void z_impl_xtensa_user_fault(unsigned int reason) { - if ((arch_current_thread()->base.user_options & K_USER) != 0) { + if ((_current->base.user_options & K_USER) != 0) { if ((reason != K_ERR_KERNEL_OOPS) && (reason != K_ERR_STACK_CHK_FAIL)) { reason = K_ERR_KERNEL_OOPS; diff --git a/arch/xtensa/core/ptables.c b/arch/xtensa/core/ptables.c index b6c8e8fb7fd32..c02ecc64b0dbe 100644 --- a/arch/xtensa/core/ptables.c +++ b/arch/xtensa/core/ptables.c @@ -1086,7 +1086,7 @@ static int mem_buffer_validate(const void *addr, size_t size, int write, int rin int ret = 0; uint8_t *virt; size_t aligned_size; - const struct k_thread *thread = arch_current_thread(); + const struct k_thread *thread = _current; uint32_t *ptables = thread_page_tables_get(thread); /* addr/size arbitrary, fix this up into an aligned region */ diff --git a/arch/xtensa/core/thread.c b/arch/xtensa/core/thread.c index 5bc736a352f2f..f9b8179173d4e 100644 --- a/arch/xtensa/core/thread.c +++ b/arch/xtensa/core/thread.c @@ -156,7 +156,7 @@ int arch_float_enable(struct k_thread *thread, unsigned int options) FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3) { - struct k_thread *current = arch_current_thread(); + struct k_thread *current = _current; size_t stack_end; /* Transition will reset stack pointer to initial, discarding diff --git a/arch/xtensa/core/vector_handlers.c b/arch/xtensa/core/vector_handlers.c index f721e480a2c43..fa58b9c2133ad 100644 --- a/arch/xtensa/core/vector_handlers.c +++ b/arch/xtensa/core/vector_handlers.c @@ -34,7 +34,7 @@ extern char xtensa_arch_kernel_oops_epc[]; bool xtensa_is_outside_stack_bounds(uintptr_t addr, size_t sz, uint32_t ps) { uintptr_t start, end; - struct k_thread *thread = arch_current_thread(); + struct k_thread *thread = _current; bool was_in_isr, invalid; /* Without userspace, there is no privileged stack so the thread stack diff --git a/boards/native/native_posix/irq_handler.c b/boards/native/native_posix/irq_handler.c index 69a1f131dcbf2..56ce6931c260b 100644 --- a/boards/native/native_posix/irq_handler.c +++ b/boards/native/native_posix/irq_handler.c @@ -105,7 +105,7 @@ void posix_irq_handler(void) */ if (may_swap && (hw_irq_ctrl_get_cur_prio() == 256) - && (_kernel.ready_q.cache) && (_kernel.ready_q.cache != arch_current_thread())) { + && (_kernel.ready_q.cache) && (_kernel.ready_q.cache != _current)) { (void)z_swap_irqlock(irq_lock); } diff --git a/boards/native/native_sim/irq_handler.c b/boards/native/native_sim/irq_handler.c index c9a18f018639b..38462b4b14a42 100644 --- a/boards/native/native_sim/irq_handler.c +++ b/boards/native/native_sim/irq_handler.c @@ -113,7 +113,7 @@ void posix_irq_handler(void) */ if (may_swap && (hw_irq_ctrl_get_cur_prio() == 256) - && (_kernel.ready_q.cache) && (_kernel.ready_q.cache != arch_current_thread())) { + && (_kernel.ready_q.cache) && (_kernel.ready_q.cache != _current)) { (void)z_swap_irqlock(irq_lock); } diff --git a/boards/native/nrf_bsim/irq_handler.c b/boards/native/nrf_bsim/irq_handler.c index c794395fb9a73..2d6ad4f66b7c4 100644 --- a/boards/native/nrf_bsim/irq_handler.c +++ b/boards/native/nrf_bsim/irq_handler.c @@ -135,7 +135,7 @@ void posix_irq_handler(void) if (may_swap && (hw_irq_ctrl_get_cur_prio(cpu_n) == 256) && (CPU_will_be_awaken_from_WFE == false) - && (_kernel.ready_q.cache) && (_kernel.ready_q.cache != arch_current_thread())) { + && (_kernel.ready_q.cache) && (_kernel.ready_q.cache != _current)) { z_swap_irqlock(irq_lock); } diff --git a/doc/kernel/services/smp/smp.rst b/doc/kernel/services/smp/smp.rst index e570958fce23e..615676494f822 100644 --- a/doc/kernel/services/smp/smp.rst +++ b/doc/kernel/services/smp/smp.rst @@ -276,7 +276,7 @@ Per-CPU data ============ Many elements of the core kernel data need to be implemented for each -CPU in SMP mode. For example, the ``arch_current_thread()`` thread pointer obviously +CPU in SMP mode. For example, the ``_current`` thread pointer obviously needs to reflect what is running locally, there are many threads running concurrently. Likewise a kernel-provided interrupt stack needs to be created and assigned for each physical CPU, as does the diff --git a/doc/releases/migration-guide-4.1.rst b/doc/releases/migration-guide-4.1.rst index fce9ffa3c13cc..271bb01d6be12 100644 --- a/doc/releases/migration-guide-4.1.rst +++ b/doc/releases/migration-guide-4.1.rst @@ -404,10 +404,6 @@ Stream Flash Architectures ************* -* Common - - * ``_current`` is deprecated, used :c:func:`arch_current_thread` instead. - * native/POSIX * :kconfig:option:`CONFIG_NATIVE_APPLICATION` has been deprecated. Out-of-tree boards using this diff --git a/doc/releases/release-notes-4.1.rst b/doc/releases/release-notes-4.1.rst index aec178a5d33fd..b89e86be51f5e 100644 --- a/doc/releases/release-notes-4.1.rst +++ b/doc/releases/release-notes-4.1.rst @@ -59,9 +59,9 @@ Architectures * Common * Introduced :kconfig:option:`CONFIG_ARCH_HAS_CUSTOM_CURRENT_IMPL`, which can be selected when - an architecture implemented and enabled its own :c:func:`arch_current_thread` and + an architecture implements :c:func:`arch_current_thread` and :c:func:`arch_current_thread_set` functions for faster retrieval of the current CPU's thread - pointer. When enabled, ``_current`` variable will be routed to the + pointer. When enabled, the ``_current`` symbol will be routed to :c:func:`arch_current_thread` (:github:`80716`). * ARC diff --git a/drivers/wifi/eswifi/eswifi.h b/drivers/wifi/eswifi/eswifi.h index 0bf4fddb2066a..54bf00f0f7896 100644 --- a/drivers/wifi/eswifi/eswifi.h +++ b/drivers/wifi/eswifi/eswifi.h @@ -92,9 +92,9 @@ static inline int eswifi_request(struct eswifi_dev *eswifi, char *cmd, static inline void eswifi_lock(struct eswifi_dev *eswifi) { /* Nested locking */ - if (atomic_get(&eswifi->mutex_owner) != (atomic_t)(uintptr_t)arch_current_thread()) { + if (atomic_get(&eswifi->mutex_owner) != (atomic_t)(uintptr_t)_current) { k_mutex_lock(&eswifi->mutex, K_FOREVER); - atomic_set(&eswifi->mutex_owner, (atomic_t)(uintptr_t)arch_current_thread()); + atomic_set(&eswifi->mutex_owner, (atomic_t)(uintptr_t)_current); eswifi->mutex_depth = 1; } else { eswifi->mutex_depth++; diff --git a/include/zephyr/arch/arch_interface.h b/include/zephyr/arch/arch_interface.h index 0f081d06adafa..6721cec117171 100644 --- a/include/zephyr/arch/arch_interface.h +++ b/include/zephyr/arch/arch_interface.h @@ -1289,7 +1289,7 @@ typedef bool (*stack_trace_callback_fn)(void *cookie, unsigned long addr); * ============ ======= ============================================ * thread esf * ============ ======= ============================================ - * thread NULL Stack trace from thread (can be arch_current_thread()) + * thread NULL Stack trace from thread (can be _current) * thread esf Stack trace starting on esf * ============ ======= ============================================ */ diff --git a/include/zephyr/arch/common/arch_inlines.h b/include/zephyr/arch/common/arch_inlines.h index 8c0ba3343ad9f..0490dba71aab5 100644 --- a/include/zephyr/arch/common/arch_inlines.h +++ b/include/zephyr/arch/common/arch_inlines.h @@ -19,7 +19,7 @@ static ALWAYS_INLINE struct k_thread *arch_current_thread(void) { #ifdef CONFIG_SMP - /* In SMP, arch_current_thread() is a field read from _current_cpu, which + /* In SMP, _current is a field read from _current_cpu, which * can race with preemption before it is read. We must lock * local interrupts when reading it. */ diff --git a/include/zephyr/arch/x86/ia32/arch.h b/include/zephyr/arch/x86/ia32/arch.h index e2f961c817e25..b82e0db0f1733 100644 --- a/include/zephyr/arch/x86/ia32/arch.h +++ b/include/zephyr/arch/x86/ia32/arch.h @@ -305,7 +305,7 @@ static inline void arch_isr_direct_footer(int swap) * 3) Next thread to run in the ready queue is not this thread */ if (swap != 0 && _kernel.cpus[0].nested == 0 && - _kernel.ready_q.cache != arch_current_thread()) { + _kernel.ready_q.cache != _current) { unsigned int flags; /* Fetch EFLAGS argument to z_swap() */ diff --git a/include/zephyr/internal/syscall_handler.h b/include/zephyr/internal/syscall_handler.h index a1264d6c28733..b48070fad6b46 100644 --- a/include/zephyr/internal/syscall_handler.h +++ b/include/zephyr/internal/syscall_handler.h @@ -62,7 +62,7 @@ static inline bool k_is_in_user_syscall(void) * calls from supervisor mode bypass everything directly to * the implementation function. */ - return !k_is_in_isr() && (arch_current_thread()->syscall_frame != NULL); + return !k_is_in_isr() && (_current->syscall_frame != NULL); } /** @@ -350,7 +350,7 @@ int k_usermode_string_copy(char *dst, const char *src, size_t maxlen); #define K_OOPS(expr) \ do { \ if (expr) { \ - arch_syscall_oops(arch_current_thread()->syscall_frame); \ + arch_syscall_oops(_current->syscall_frame); \ } \ } while (false) diff --git a/include/zephyr/kernel_structs.h b/include/zephyr/kernel_structs.h index 4c5d41fa02444..3e2a9fe428560 100644 --- a/include/zephyr/kernel_structs.h +++ b/include/zephyr/kernel_structs.h @@ -174,7 +174,7 @@ struct _cpu { #endif #ifdef CONFIG_SMP - /* True when arch_current_thread() is allowed to context switch */ + /* True when _current is allowed to context switch */ uint8_t swap_ok; #endif @@ -263,12 +263,12 @@ bool z_smp_cpu_mobile(void); #define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \ arch_curr_cpu(); }) +#define _current arch_current_thread() #else #define _current_cpu (&_kernel.cpus[0]) -#endif /* CONFIG_SMP */ - -#define _current arch_current_thread() __DEPRECATED_MACRO +#define _current _kernel.cpus[0].current +#endif /* kernel wait queue record */ #ifdef CONFIG_WAITQ_SCALABLE diff --git a/kernel/Kconfig b/kernel/Kconfig index 36fcf1d821cc4..27cb5d70bc0bc 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -211,7 +211,7 @@ config THREAD_ABORT_NEED_CLEANUP bool help This option enables the bits to clean up the current thread if - k_thread_abort(arch_current_thread()) is called, as the cleanup cannot be + k_thread_abort(_current) is called, as the cleanup cannot be running in the current thread stack. config THREAD_CUSTOM_DATA diff --git a/kernel/errno.c b/kernel/errno.c index 2535e00e336cb..bbbd6f87bfd03 100644 --- a/kernel/errno.c +++ b/kernel/errno.c @@ -36,7 +36,7 @@ int *z_impl_z_errno(void) /* Initialized to the lowest address in the stack so the thread can * directly read/write it */ - return &arch_current_thread()->userspace_local_data->errno_var; + return &_current->userspace_local_data->errno_var; } static inline int *z_vrfy_z_errno(void) @@ -48,7 +48,7 @@ static inline int *z_vrfy_z_errno(void) #else int *z_impl_z_errno(void) { - return &arch_current_thread()->errno_var; + return &_current->errno_var; } #endif /* CONFIG_USERSPACE */ diff --git a/kernel/fatal.c b/kernel/fatal.c index a5682e7cd7f41..3cf3114364da8 100644 --- a/kernel/fatal.c +++ b/kernel/fatal.c @@ -90,7 +90,7 @@ void z_fatal_error(unsigned int reason, const struct arch_esf *esf) */ unsigned int key = arch_irq_lock(); struct k_thread *thread = IS_ENABLED(CONFIG_MULTITHREADING) ? - arch_current_thread() : NULL; + _current : NULL; /* twister looks for the "ZEPHYR FATAL ERROR" string, don't * change it without also updating twister diff --git a/kernel/idle.c b/kernel/idle.c index 4d095c8f27b3a..62ff84e4c88dc 100644 --- a/kernel/idle.c +++ b/kernel/idle.c @@ -24,7 +24,7 @@ void idle(void *unused1, void *unused2, void *unused3) ARG_UNUSED(unused2); ARG_UNUSED(unused3); - __ASSERT_NO_MSG(arch_current_thread()->base.prio >= 0); + __ASSERT_NO_MSG(_current->base.prio >= 0); while (true) { /* SMP systems without a working IPI can't actual @@ -85,7 +85,7 @@ void idle(void *unused1, void *unused2, void *unused3) * explicitly yield in the idle thread otherwise * nothing else will run once it starts. */ - if (_kernel.ready_q.cache != arch_current_thread()) { + if (_kernel.ready_q.cache != _current) { z_swap_unlocked(); } # endif /* !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC) */ diff --git a/kernel/include/kernel_internal.h b/kernel/include/kernel_internal.h index cb13aacf6007a..94f90ce94624c 100644 --- a/kernel/include/kernel_internal.h +++ b/kernel/include/kernel_internal.h @@ -286,7 +286,7 @@ int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats); * where these steps require that the thread is no longer running. * If the target thread is not the current running thread, the cleanup * steps will be performed immediately. However, if the target thread is - * the current running thread (e.g. k_thread_abort(arch_current_thread())), it defers + * the current running thread (e.g. k_thread_abort(_current)), it defers * the cleanup steps to later when the work will be finished in another * context. * diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index 4cc17d5e92ecd..1807330fe526f 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -141,9 +141,9 @@ static inline bool _is_valid_prio(int prio, void *entry_point) static inline void z_sched_lock(void) { __ASSERT(!arch_is_in_isr(), ""); - __ASSERT(arch_current_thread()->base.sched_locked != 1U, ""); + __ASSERT(_current->base.sched_locked != 1U, ""); - --arch_current_thread()->base.sched_locked; + --_current->base.sched_locked; compiler_barrier(); } diff --git a/kernel/include/kswap.h b/kernel/include/kswap.h index ea3c4ef79231e..a4e75e7ed6da8 100644 --- a/kernel/include/kswap.h +++ b/kernel/include/kswap.h @@ -96,12 +96,12 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, */ # ifndef CONFIG_ARM64 __ASSERT(arch_irq_unlocked(key) || - arch_current_thread()->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD), + _current->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD), "Context switching while holding lock!"); # endif /* CONFIG_ARM64 */ #endif /* CONFIG_SPIN_VALIDATE */ - old_thread = arch_current_thread(); + old_thread = _current; z_check_stack_sentinel(); @@ -146,7 +146,7 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, arch_cohere_stacks(old_thread, NULL, new_thread); #ifdef CONFIG_SMP - /* Now add arch_current_thread() back to the run queue, once we are + /* Now add _current back to the run queue, once we are * guaranteed to reach the context switch in finite * time. See z_sched_switch_spin(). */ @@ -174,7 +174,7 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, irq_unlock(key); } - return arch_current_thread()->swap_retval; + return _current->swap_retval; } static inline int z_swap_irqlock(unsigned int key) diff --git a/kernel/include/kthread.h b/kernel/include/kthread.h index 35049c7805bde..636a62b7009e5 100644 --- a/kernel/include/kthread.h +++ b/kernel/include/kthread.h @@ -211,17 +211,17 @@ static ALWAYS_INLINE bool should_preempt(struct k_thread *thread, return true; } - __ASSERT(arch_current_thread() != NULL, ""); + __ASSERT(_current != NULL, ""); /* Or if we're pended/suspended/dummy (duh) */ - if (z_is_thread_prevented_from_running(arch_current_thread())) { + if (z_is_thread_prevented_from_running(_current)) { return true; } /* Otherwise we have to be running a preemptible thread or * switching to a metairq */ - if (thread_is_preemptible(arch_current_thread()) || thread_is_metairq(thread)) { + if (thread_is_preemptible(_current) || thread_is_metairq(thread)) { return true; } diff --git a/kernel/include/priority_q.h b/kernel/include/priority_q.h index 5ca0de23cbe25..259d689dda75b 100644 --- a/kernel/include/priority_q.h +++ b/kernel/include/priority_q.h @@ -131,9 +131,9 @@ static ALWAYS_INLINE void z_priq_dumb_yield(sys_dlist_t *pq) #ifndef CONFIG_SMP sys_dnode_t *n; - n = sys_dlist_peek_next_no_check(pq, &arch_current_thread()->base.qnode_dlist); + n = sys_dlist_peek_next_no_check(pq, &_current->base.qnode_dlist); - sys_dlist_dequeue(&arch_current_thread()->base.qnode_dlist); + sys_dlist_dequeue(&_current->base.qnode_dlist); struct k_thread *t; @@ -145,15 +145,15 @@ static ALWAYS_INLINE void z_priq_dumb_yield(sys_dlist_t *pq) while (n != NULL) { t = CONTAINER_OF(n, struct k_thread, base.qnode_dlist); - if (z_sched_prio_cmp(arch_current_thread(), t) > 0) { + if (z_sched_prio_cmp(_current, t) > 0) { sys_dlist_insert(&t->base.qnode_dlist, - &arch_current_thread()->base.qnode_dlist); + &_current->base.qnode_dlist); return; } n = sys_dlist_peek_next_no_check(pq, n); } - sys_dlist_append(pq, &arch_current_thread()->base.qnode_dlist); + sys_dlist_append(pq, &_current->base.qnode_dlist); #endif } @@ -229,8 +229,8 @@ static ALWAYS_INLINE void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread static ALWAYS_INLINE void z_priq_rb_yield(struct _priq_rb *pq) { #ifndef CONFIG_SMP - z_priq_rb_remove(pq, arch_current_thread()); - z_priq_rb_add(pq, arch_current_thread()); + z_priq_rb_remove(pq, _current); + z_priq_rb_add(pq, _current); #endif } @@ -319,11 +319,11 @@ static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq, static ALWAYS_INLINE void z_priq_mq_yield(struct _priq_mq *pq) { #ifndef CONFIG_SMP - struct prio_info pos = get_prio_info(arch_current_thread()->base.prio); + struct prio_info pos = get_prio_info(_current->base.prio); - sys_dlist_dequeue(&arch_current_thread()->base.qnode_dlist); + sys_dlist_dequeue(&_current->base.qnode_dlist); sys_dlist_append(&pq->queues[pos.offset_prio], - &arch_current_thread()->base.qnode_dlist); + &_current->base.qnode_dlist); #endif } diff --git a/kernel/ipi.c b/kernel/ipi.c index 59c2eba669867..ee01c4594251c 100644 --- a/kernel/ipi.c +++ b/kernel/ipi.c @@ -101,7 +101,7 @@ void z_sched_ipi(void) #endif /* CONFIG_TRACE_SCHED_IPI */ #ifdef CONFIG_TIMESLICING - if (thread_is_sliceable(arch_current_thread())) { + if (thread_is_sliceable(_current)) { z_time_slice(); } #endif /* CONFIG_TIMESLICING */ diff --git a/kernel/mailbox.c b/kernel/mailbox.c index 17ebfb2ea0351..d7da8e3c8e49a 100644 --- a/kernel/mailbox.c +++ b/kernel/mailbox.c @@ -216,7 +216,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_spinlock_key_t key; /* save sender id so it can be used during message matching */ - tx_msg->rx_source_thread = arch_current_thread(); + tx_msg->rx_source_thread = _current; /* finish readying sending thread (actual or dummy) for send */ sending_thread = tx_msg->_syncing_thread; @@ -296,7 +296,7 @@ int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout) { /* configure things for a synchronous send, then send the message */ - tx_msg->_syncing_thread = arch_current_thread(); + tx_msg->_syncing_thread = _current; SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, put, mbox, timeout); @@ -321,7 +321,7 @@ void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, */ mbox_async_alloc(&async); - async->thread.prio = arch_current_thread()->base.prio; + async->thread.prio = _current->base.prio; async->tx_msg = *tx_msg; async->tx_msg._syncing_thread = (struct k_thread *)&async->thread; @@ -388,7 +388,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, int result; /* save receiver id so it can be used during message matching */ - rx_msg->tx_target_thread = arch_current_thread(); + rx_msg->tx_target_thread = _current; /* search mailbox's tx queue for a compatible sender */ key = k_spin_lock(&mbox->lock); @@ -425,7 +425,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, get, mbox, timeout); /* wait until a matching sender appears or a timeout occurs */ - arch_current_thread()->base.swap_data = rx_msg; + _current->base.swap_data = rx_msg; result = z_pend_curr(&mbox->lock, key, &mbox->rx_msg_queue, timeout); /* consume message data immediately, if needed */ diff --git a/kernel/mem_domain.c b/kernel/mem_domain.c index 1fc8a36a94de2..16b337acf011d 100644 --- a/kernel/mem_domain.c +++ b/kernel/mem_domain.c @@ -299,7 +299,7 @@ void z_mem_domain_init_thread(struct k_thread *thread) k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock); /* New threads inherit memory domain configuration from parent */ - ret = add_thread_locked(arch_current_thread()->mem_domain_info.mem_domain, thread); + ret = add_thread_locked(_current->mem_domain_info.mem_domain, thread); __ASSERT_NO_MSG(ret == 0); ARG_UNUSED(ret); diff --git a/kernel/mem_slab.c b/kernel/mem_slab.c index 540ef6ed42497..a7da66d374fab 100644 --- a/kernel/mem_slab.c +++ b/kernel/mem_slab.c @@ -252,7 +252,7 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout) /* wait for a free block or timeout */ result = z_pend_curr(&slab->lock, key, &slab->wait_q, timeout); if (result == 0) { - *mem = arch_current_thread()->base.swap_data; + *mem = _current->base.swap_data; } SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mem_slab, alloc, slab, timeout, result); diff --git a/kernel/mempool.c b/kernel/mempool.c index 7e9a7677f71ab..d8926c63ed940 100644 --- a/kernel/mempool.c +++ b/kernel/mempool.c @@ -165,7 +165,7 @@ void *z_thread_aligned_alloc(size_t align, size_t size) if (k_is_in_isr()) { heap = _SYSTEM_HEAP; } else { - heap = arch_current_thread()->resource_pool; + heap = _current->resource_pool; } if (heap != NULL) { diff --git a/kernel/mmu.c b/kernel/mmu.c index fc55096d44b53..617b02997dd9c 100644 --- a/kernel/mmu.c +++ b/kernel/mmu.c @@ -1674,7 +1674,7 @@ static bool do_page_fault(void *addr, bool pin) #endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */ key = k_spin_lock(&z_mm_lock); - faulting_thread = arch_current_thread(); + faulting_thread = _current; status = arch_page_location_get(addr, &page_in_location); if (status == ARCH_PAGE_LOCATION_BAD) { diff --git a/kernel/msg_q.c b/kernel/msg_q.c index 9035fccda7da8..3998c2351088f 100644 --- a/kernel/msg_q.c +++ b/kernel/msg_q.c @@ -169,7 +169,7 @@ int z_impl_k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_msgq, put, msgq, timeout); /* wait for put message success, failure, or timeout */ - arch_current_thread()->base.swap_data = (void *) data; + _current->base.swap_data = (void *) data; result = z_pend_curr(&msgq->lock, key, &msgq->wait_q, timeout); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, put, msgq, timeout, result); @@ -267,7 +267,7 @@ int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout) SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_msgq, get, msgq, timeout); /* wait for get message success or timeout */ - arch_current_thread()->base.swap_data = data; + _current->base.swap_data = data; result = z_pend_curr(&msgq->lock, key, &msgq->wait_q, timeout); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_msgq, get, msgq, timeout, result); diff --git a/kernel/mutex.c b/kernel/mutex.c index 2fbede19e2caf..ce76e5a2af545 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -114,17 +114,17 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout) key = k_spin_lock(&lock); - if (likely((mutex->lock_count == 0U) || (mutex->owner == arch_current_thread()))) { + if (likely((mutex->lock_count == 0U) || (mutex->owner == _current))) { mutex->owner_orig_prio = (mutex->lock_count == 0U) ? - arch_current_thread()->base.prio : + _current->base.prio : mutex->owner_orig_prio; mutex->lock_count++; - mutex->owner = arch_current_thread(); + mutex->owner = _current; LOG_DBG("%p took mutex %p, count: %d, orig prio: %d", - arch_current_thread(), mutex, mutex->lock_count, + _current, mutex, mutex->lock_count, mutex->owner_orig_prio); k_spin_unlock(&lock, key); @@ -144,7 +144,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout) SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mutex, lock, mutex, timeout); - new_prio = new_prio_for_inheritance(arch_current_thread()->base.prio, + new_prio = new_prio_for_inheritance(_current->base.prio, mutex->owner->base.prio); LOG_DBG("adjusting prio up on mutex %p", mutex); @@ -157,7 +157,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout) LOG_DBG("on mutex %p got_mutex value: %d", mutex, got_mutex); - LOG_DBG("%p got mutex %p (y/n): %c", arch_current_thread(), mutex, + LOG_DBG("%p got mutex %p (y/n): %c", _current, mutex, got_mutex ? 'y' : 'n'); if (got_mutex == 0) { @@ -167,7 +167,7 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout) /* timed out */ - LOG_DBG("%p timeout on mutex %p", arch_current_thread(), mutex); + LOG_DBG("%p timeout on mutex %p", _current, mutex); key = k_spin_lock(&lock); @@ -224,7 +224,7 @@ int z_impl_k_mutex_unlock(struct k_mutex *mutex) /* * The current thread does not own the mutex. */ - CHECKIF(mutex->owner != arch_current_thread()) { + CHECKIF(mutex->owner != _current) { SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mutex, unlock, mutex, -EPERM); return -EPERM; diff --git a/kernel/pipes.c b/kernel/pipes.c index a9eef5a4f368c..a81393c508d1c 100644 --- a/kernel/pipes.c +++ b/kernel/pipes.c @@ -443,11 +443,11 @@ int z_impl_k_pipe_put(struct k_pipe *pipe, const void *data, * invoked from within an ISR as that is not safe to do. */ - src_desc = k_is_in_isr() ? &isr_desc : &arch_current_thread()->pipe_desc; + src_desc = k_is_in_isr() ? &isr_desc : &_current->pipe_desc; src_desc->buffer = (unsigned char *)data; src_desc->bytes_to_xfer = bytes_to_write; - src_desc->thread = arch_current_thread(); + src_desc->thread = _current; sys_dlist_append(&src_list, &src_desc->node); *bytes_written = pipe_write(pipe, &src_list, @@ -488,7 +488,7 @@ int z_impl_k_pipe_put(struct k_pipe *pipe, const void *data, SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_pipe, put, pipe, timeout); - arch_current_thread()->base.swap_data = src_desc; + _current->base.swap_data = src_desc; z_sched_wait(&pipe->lock, key, &pipe->wait_q.writers, timeout, NULL); @@ -581,11 +581,11 @@ static int pipe_get_internal(k_spinlock_key_t key, struct k_pipe *pipe, * invoked from within an ISR as that is not safe to do. */ - dest_desc = k_is_in_isr() ? &isr_desc : &arch_current_thread()->pipe_desc; + dest_desc = k_is_in_isr() ? &isr_desc : &_current->pipe_desc; dest_desc->buffer = data; dest_desc->bytes_to_xfer = bytes_to_read; - dest_desc->thread = arch_current_thread(); + dest_desc->thread = _current; src_desc = (struct _pipe_desc *)sys_dlist_get(&src_list); while (src_desc != NULL) { @@ -674,7 +674,7 @@ static int pipe_get_internal(k_spinlock_key_t key, struct k_pipe *pipe, SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_pipe, get, pipe, timeout); - arch_current_thread()->base.swap_data = dest_desc; + _current->base.swap_data = dest_desc; z_sched_wait(&pipe->lock, key, &pipe->wait_q.readers, timeout, NULL); diff --git a/kernel/poll.c b/kernel/poll.c index 05e9fe10c3e06..502e97537b9b6 100644 --- a/kernel/poll.c +++ b/kernel/poll.c @@ -290,7 +290,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events, { int events_registered; k_spinlock_key_t key; - struct z_poller *poller = &arch_current_thread()->poller; + struct z_poller *poller = &_current->poller; poller->is_polling = true; poller->mode = MODE_POLL; diff --git a/kernel/queue.c b/kernel/queue.c index 09b224c9c9449..4b00deeb1e757 100644 --- a/kernel/queue.c +++ b/kernel/queue.c @@ -346,9 +346,9 @@ void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout) int ret = z_pend_curr(&queue->lock, key, &queue->wait_q, timeout); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout, - (ret != 0) ? NULL : arch_current_thread()->base.swap_data); + (ret != 0) ? NULL : _current->base.swap_data); - return (ret != 0) ? NULL : arch_current_thread()->base.swap_data; + return (ret != 0) ? NULL : _current->base.swap_data; } bool k_queue_remove(struct k_queue *queue, void *data) diff --git a/kernel/sched.c b/kernel/sched.c index 41cdafa5d7eb5..02dc0b699d223 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -31,7 +31,7 @@ extern struct k_thread *pending_current; struct k_spinlock _sched_spinlock; /* Storage to "complete" the context switch from an invalid/incomplete thread - * context (ex: exiting an ISR that aborted arch_current_thread()) + * context (ex: exiting an ISR that aborted _current) */ __incoherent struct k_thread _thread_dummy; @@ -98,12 +98,12 @@ static ALWAYS_INLINE struct k_thread *runq_best(void) return _priq_run_best(curr_cpu_runq()); } -/* arch_current_thread() is never in the run queue until context switch on +/* _current is never in the run queue until context switch on * SMP configurations, see z_requeue_current() */ static inline bool should_queue_thread(struct k_thread *thread) { - return !IS_ENABLED(CONFIG_SMP) || (thread != arch_current_thread()); + return !IS_ENABLED(CONFIG_SMP) || (thread != _current); } static ALWAYS_INLINE void queue_thread(struct k_thread *thread) @@ -113,7 +113,7 @@ static ALWAYS_INLINE void queue_thread(struct k_thread *thread) runq_add(thread); } #ifdef CONFIG_SMP - if (thread == arch_current_thread()) { + if (thread == _current) { /* add current to end of queue means "yield" */ _current_cpu->swap_ok = true; } @@ -167,8 +167,8 @@ static inline void clear_halting(struct k_thread *thread) static ALWAYS_INLINE struct k_thread *next_up(void) { #ifdef CONFIG_SMP - if (is_halting(arch_current_thread())) { - halt_thread(arch_current_thread(), is_aborting(arch_current_thread()) ? + if (is_halting(_current)) { + halt_thread(_current, is_aborting(_current) ? _THREAD_DEAD : _THREAD_SUSPENDED); } #endif /* CONFIG_SMP */ @@ -207,42 +207,42 @@ static ALWAYS_INLINE struct k_thread *next_up(void) #else /* Under SMP, the "cache" mechanism for selecting the next * thread doesn't work, so we have more work to do to test - * arch_current_thread() against the best choice from the queue. Here, the + * _current against the best choice from the queue. Here, the * thread selected above represents "the best thread that is * not current". * - * Subtle note on "queued": in SMP mode, arch_current_thread() does not + * Subtle note on "queued": in SMP mode, _current does not * live in the queue, so this isn't exactly the same thing as - * "ready", it means "is arch_current_thread() already added back to the + * "ready", it means "is _current already added back to the * queue such that we don't want to re-add it". */ - bool queued = z_is_thread_queued(arch_current_thread()); - bool active = !z_is_thread_prevented_from_running(arch_current_thread()); + bool queued = z_is_thread_queued(_current); + bool active = !z_is_thread_prevented_from_running(_current); if (thread == NULL) { thread = _current_cpu->idle_thread; } if (active) { - int32_t cmp = z_sched_prio_cmp(arch_current_thread(), thread); + int32_t cmp = z_sched_prio_cmp(_current, thread); /* Ties only switch if state says we yielded */ if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) { - thread = arch_current_thread(); + thread = _current; } if (!should_preempt(thread, _current_cpu->swap_ok)) { - thread = arch_current_thread(); + thread = _current; } } - /* Put arch_current_thread() back into the queue */ - if ((thread != arch_current_thread()) && active && - !z_is_idle_thread_object(arch_current_thread()) && !queued) { - queue_thread(arch_current_thread()); + /* Put _current back into the queue */ + if ((thread != _current) && active && + !z_is_idle_thread_object(_current) && !queued) { + queue_thread(_current); } - /* Take the new arch_current_thread() out of the queue */ + /* Take the new _current out of the queue */ if (z_is_thread_queued(thread)) { dequeue_thread(thread); } @@ -258,7 +258,7 @@ void move_thread_to_end_of_prio_q(struct k_thread *thread) dequeue_thread(thread); } queue_thread(thread); - update_cache(thread == arch_current_thread()); + update_cache(thread == _current); } /* Track cooperative threads preempted by metairqs so we can return to @@ -269,10 +269,10 @@ static void update_metairq_preempt(struct k_thread *thread) { #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \ (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES) - if (thread_is_metairq(thread) && !thread_is_metairq(arch_current_thread()) && - !thread_is_preemptible(arch_current_thread())) { + if (thread_is_metairq(thread) && !thread_is_metairq(_current) && + !thread_is_preemptible(_current)) { /* Record new preemption */ - _current_cpu->metairq_preempted = arch_current_thread(); + _current_cpu->metairq_preempted = _current; } else if (!thread_is_metairq(thread) && !z_is_idle_thread_object(thread)) { /* Returning from existing preemption */ _current_cpu->metairq_preempted = NULL; @@ -292,14 +292,14 @@ static ALWAYS_INLINE void update_cache(int preempt_ok) if (should_preempt(thread, preempt_ok)) { #ifdef CONFIG_TIMESLICING - if (thread != arch_current_thread()) { + if (thread != _current) { z_reset_time_slice(thread); } #endif /* CONFIG_TIMESLICING */ update_metairq_preempt(thread); _kernel.ready_q.cache = thread; } else { - _kernel.ready_q.cache = arch_current_thread(); + _kernel.ready_q.cache = _current; } #else @@ -378,9 +378,9 @@ void z_move_thread_to_end_of_prio_q(struct k_thread *thread) */ static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key) { - if (is_halting(arch_current_thread())) { - halt_thread(arch_current_thread(), - is_aborting(arch_current_thread()) ? _THREAD_DEAD : _THREAD_SUSPENDED); + if (is_halting(_current)) { + halt_thread(_current, + is_aborting(_current) ? _THREAD_DEAD : _THREAD_SUSPENDED); } k_spin_unlock(&_sched_spinlock, key); while (is_halting(thread)) { @@ -394,7 +394,7 @@ static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key) /* Shared handler for k_thread_{suspend,abort}(). Called with the * scheduler lock held and the key passed (which it may * release/reacquire!) which will be released before a possible return - * (aborting arch_current_thread() will not return, obviously), which may be after + * (aborting _current will not return, obviously), which may be after * a context switch. */ static ALWAYS_INLINE void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key, @@ -427,14 +427,14 @@ static ALWAYS_INLINE void z_thread_halt(struct k_thread *thread, k_spinlock_key_ if (arch_is_in_isr()) { thread_halt_spin(thread, key); } else { - add_to_waitq_locked(arch_current_thread(), wq); + add_to_waitq_locked(_current, wq); z_swap(&_sched_spinlock, key); } } else { halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED); - if ((thread == arch_current_thread()) && !arch_is_in_isr()) { + if ((thread == _current) && !arch_is_in_isr()) { z_swap(&_sched_spinlock, key); - __ASSERT(!terminate, "aborted arch_current_thread() back from dead"); + __ASSERT(!terminate, "aborted _current back from dead"); } else { k_spin_unlock(&_sched_spinlock, key); } @@ -453,7 +453,7 @@ void z_impl_k_thread_suspend(k_tid_t thread) /* Special case "suspend the current thread" as it doesn't * need the async complexity below. */ - if (thread == arch_current_thread() && !arch_is_in_isr() && !IS_ENABLED(CONFIG_SMP)) { + if (thread == _current && !arch_is_in_isr() && !IS_ENABLED(CONFIG_SMP)) { k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); z_mark_thread_as_suspended(thread); @@ -521,7 +521,7 @@ static void unready_thread(struct k_thread *thread) if (z_is_thread_queued(thread)) { dequeue_thread(thread); } - update_cache(thread == arch_current_thread()); + update_cache(thread == _current); } /* _sched_spinlock must be held */ @@ -558,7 +558,7 @@ static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q, void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, k_timeout_t timeout) { - __ASSERT_NO_MSG(thread == arch_current_thread() || is_thread_dummy(thread)); + __ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread)); K_SPINLOCK(&_sched_spinlock) { pend_locked(thread, wait_q, timeout); } @@ -616,7 +616,7 @@ int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key, _wait_q_t *wait_q, k_timeout_t timeout) { #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) - pending_current = arch_current_thread(); + pending_current = _current; #endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */ __ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock); @@ -629,7 +629,7 @@ int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key, * held. */ (void) k_spin_lock(&_sched_spinlock); - pend_locked(arch_current_thread(), wait_q, timeout); + pend_locked(_current, wait_q, timeout); k_spin_release(lock); return z_swap(&_sched_spinlock, key); } @@ -727,7 +727,7 @@ static inline bool need_swap(void) /* Check if the next ready thread is the same as the current thread */ new_thread = _kernel.ready_q.cache; - return new_thread != arch_current_thread(); + return new_thread != _current; #endif /* CONFIG_SMP */ } @@ -763,15 +763,15 @@ void k_sched_lock(void) void k_sched_unlock(void) { K_SPINLOCK(&_sched_spinlock) { - __ASSERT(arch_current_thread()->base.sched_locked != 0U, ""); + __ASSERT(_current->base.sched_locked != 0U, ""); __ASSERT(!arch_is_in_isr(), ""); - ++arch_current_thread()->base.sched_locked; + ++_current->base.sched_locked; update_cache(0); } LOG_DBG("scheduler unlocked (%p:%d)", - arch_current_thread(), arch_current_thread()->base.sched_locked); + _current, _current->base.sched_locked); SYS_PORT_TRACING_FUNC(k_thread, sched_unlock); @@ -783,10 +783,10 @@ struct k_thread *z_swap_next_thread(void) #ifdef CONFIG_SMP struct k_thread *ret = next_up(); - if (ret == arch_current_thread()) { + if (ret == _current) { /* When not swapping, have to signal IPIs here. In * the context switch case it must happen later, after - * arch_current_thread() gets requeued. + * _current gets requeued. */ signal_pending_ipi(); } @@ -827,7 +827,7 @@ static inline void set_current(struct k_thread *new_thread) * function. * * @warning - * The arch_current_thread() value may have changed after this call and not refer + * The _current value may have changed after this call and not refer * to the interrupted thread anymore. It might be necessary to make a local * copy before calling this function. * @@ -843,7 +843,7 @@ void *z_get_next_switch_handle(void *interrupted) void *ret = NULL; K_SPINLOCK(&_sched_spinlock) { - struct k_thread *old_thread = arch_current_thread(), *new_thread; + struct k_thread *old_thread = _current, *new_thread; if (IS_ENABLED(CONFIG_SMP)) { old_thread->switch_handle = NULL; @@ -869,7 +869,7 @@ void *z_get_next_switch_handle(void *interrupted) #endif /* CONFIG_TIMESLICING */ #ifdef CONFIG_SPIN_VALIDATE - /* Changed arch_current_thread()! Update the spinlock + /* Changed _current! Update the spinlock * bookkeeping so the validation doesn't get * confused when the "wrong" thread tries to * release the lock. @@ -904,9 +904,9 @@ void *z_get_next_switch_handle(void *interrupted) return ret; #else z_sched_usage_switch(_kernel.ready_q.cache); - arch_current_thread()->switch_handle = interrupted; + _current->switch_handle = interrupted; set_current(_kernel.ready_q.cache); - return arch_current_thread()->switch_handle; + return _current->switch_handle; #endif /* CONFIG_SMP */ } #endif /* CONFIG_USE_SWITCH */ @@ -952,7 +952,7 @@ void z_impl_k_thread_priority_set(k_tid_t thread, int prio) bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio); if ((need_sched) && (IS_ENABLED(CONFIG_SMP) || - (arch_current_thread()->base.sched_locked == 0U))) { + (_current->base.sched_locked == 0U))) { z_reschedule_unlocked(); } } @@ -1036,7 +1036,7 @@ static inline void z_vrfy_k_reschedule(void) bool k_can_yield(void) { return !(k_is_pre_kernel() || k_is_in_isr() || - z_is_idle_thread_object(arch_current_thread())); + z_is_idle_thread_object(_current)); } void z_impl_k_yield(void) @@ -1048,7 +1048,7 @@ void z_impl_k_yield(void) k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); #ifdef CONFIG_SMP - z_mark_thread_as_queued(arch_current_thread()); + z_mark_thread_as_queued(_current); #endif runq_yield(); @@ -1070,7 +1070,7 @@ static int32_t z_tick_sleep(k_ticks_t ticks) __ASSERT(!arch_is_in_isr(), ""); - LOG_DBG("thread %p for %lu ticks", arch_current_thread(), (unsigned long)ticks); + LOG_DBG("thread %p for %lu ticks", _current, (unsigned long)ticks); /* wait of 0 ms is treated as a 'yield' */ if (ticks == 0) { @@ -1088,11 +1088,11 @@ static int32_t z_tick_sleep(k_ticks_t ticks) k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) - pending_current = arch_current_thread(); + pending_current = _current; #endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */ - unready_thread(arch_current_thread()); - z_add_thread_timeout(arch_current_thread(), timeout); - z_mark_thread_as_sleeping(arch_current_thread()); + unready_thread(_current); + z_add_thread_timeout(_current, timeout); + z_mark_thread_as_sleeping(_current); (void)z_swap(&_sched_spinlock, key); @@ -1195,7 +1195,7 @@ static inline void z_vrfy_k_wakeup(k_tid_t thread) k_tid_t z_impl_k_sched_current_thread_query(void) { - return arch_current_thread(); + return _current; } #ifdef CONFIG_USERSPACE @@ -1250,13 +1250,13 @@ static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state (void)z_abort_thread_timeout(thread); unpend_all(&thread->join_queue); - /* Edge case: aborting arch_current_thread() from within an + /* Edge case: aborting _current from within an * ISR that preempted it requires clearing the - * arch_current_thread() pointer so the upcoming context + * _current pointer so the upcoming context * switch doesn't clobber the now-freed * memory */ - if (thread == arch_current_thread() && arch_is_in_isr()) { + if (thread == _current && arch_is_in_isr()) { dummify = true; } } @@ -1299,10 +1299,10 @@ static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state k_thread_abort_cleanup(thread); #endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */ - /* Do this "set arch_current_thread() to dummy" step last so that - * subsystems above can rely on arch_current_thread() being + /* Do this "set _current to dummy" step last so that + * subsystems above can rely on _current being * unchanged. Disabled for posix as that arch - * continues to use the arch_current_thread() pointer in its swap + * continues to use the _current pointer in its swap * code. Note that we must leave a non-null switch * handle for any threads spinning in join() (this can * never be used, as our thread is flagged dead, but @@ -1310,7 +1310,7 @@ static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state */ if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) { #ifdef CONFIG_USE_SWITCH - arch_current_thread()->switch_handle = arch_current_thread(); + _current->switch_handle = _current; #endif z_dummy_thread_init(&_thread_dummy); @@ -1368,13 +1368,13 @@ int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout) ret = 0; } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { ret = -EBUSY; - } else if ((thread == arch_current_thread()) || - (thread->base.pended_on == &arch_current_thread()->join_queue)) { + } else if ((thread == _current) || + (thread->base.pended_on == &_current->join_queue)) { ret = -EDEADLK; } else { __ASSERT(!arch_is_in_isr(), "cannot join in ISR"); - add_to_waitq_locked(arch_current_thread(), &thread->join_queue); - add_thread_timeout(arch_current_thread(), timeout); + add_to_waitq_locked(_current, &thread->join_queue); + add_thread_timeout(_current, timeout); SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout); ret = z_swap(&_sched_spinlock, key); @@ -1473,7 +1473,7 @@ int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key, int ret = z_pend_curr(lock, key, wait_q, timeout); if (data != NULL) { - *data = arch_current_thread()->base.swap_data; + *data = _current->base.swap_data; } return ret; } diff --git a/kernel/smp.c b/kernel/smp.c index b0eefb35e4144..a56595252789a 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -58,23 +58,23 @@ unsigned int z_smp_global_lock(void) { unsigned int key = arch_irq_lock(); - if (!arch_current_thread()->base.global_lock_count) { + if (!_current->base.global_lock_count) { while (!atomic_cas(&global_lock, 0, 1)) { arch_spin_relax(); } } - arch_current_thread()->base.global_lock_count++; + _current->base.global_lock_count++; return key; } void z_smp_global_unlock(unsigned int key) { - if (arch_current_thread()->base.global_lock_count != 0U) { - arch_current_thread()->base.global_lock_count--; + if (_current->base.global_lock_count != 0U) { + _current->base.global_lock_count--; - if (!arch_current_thread()->base.global_lock_count) { + if (!_current->base.global_lock_count) { (void)atomic_clear(&global_lock); } } diff --git a/kernel/spinlock_validate.c b/kernel/spinlock_validate.c index c2a97356d9cec..cb7ff5a3e7ff7 100644 --- a/kernel/spinlock_validate.c +++ b/kernel/spinlock_validate.c @@ -24,11 +24,11 @@ bool z_spin_unlock_valid(struct k_spinlock *l) l->thread_cpu = 0; - if (arch_is_in_isr() && arch_current_thread()->base.thread_state & _THREAD_DUMMY) { - /* Edge case where an ISR aborted arch_current_thread() */ + if (arch_is_in_isr() && _current->base.thread_state & _THREAD_DUMMY) { + /* Edge case where an ISR aborted _current */ return true; } - if (tcpu != (_current_cpu->id | (uintptr_t)arch_current_thread())) { + if (tcpu != (_current_cpu->id | (uintptr_t)_current)) { return false; } return true; @@ -36,7 +36,7 @@ bool z_spin_unlock_valid(struct k_spinlock *l) void z_spin_lock_set_owner(struct k_spinlock *l) { - l->thread_cpu = _current_cpu->id | (uintptr_t)arch_current_thread(); + l->thread_cpu = _current_cpu->id | (uintptr_t)_current; } #ifdef CONFIG_KERNEL_COHERENCE diff --git a/kernel/stack.c b/kernel/stack.c index b3ea624b1625e..5add38b9c2318 100644 --- a/kernel/stack.c +++ b/kernel/stack.c @@ -182,7 +182,7 @@ int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data, return -EAGAIN; } - *data = (stack_data_t)arch_current_thread()->base.swap_data; + *data = (stack_data_t)_current->base.swap_data; SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_stack, pop, stack, timeout, 0); diff --git a/kernel/thread.c b/kernel/thread.c index f731bfefdb6bc..c3535540a680c 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -82,7 +82,7 @@ EXPORT_SYMBOL(k_is_in_isr); #ifdef CONFIG_THREAD_CUSTOM_DATA void z_impl_k_thread_custom_data_set(void *value) { - arch_current_thread()->custom_data = value; + _current->custom_data = value; } #ifdef CONFIG_USERSPACE @@ -95,7 +95,7 @@ static inline void z_vrfy_k_thread_custom_data_set(void *data) void *z_impl_k_thread_custom_data_get(void) { - return arch_current_thread()->custom_data; + return _current->custom_data; } #ifdef CONFIG_USERSPACE @@ -110,7 +110,7 @@ static inline void *z_vrfy_k_thread_custom_data_get(void) int z_impl_k_is_preempt_thread(void) { - return !arch_is_in_isr() && thread_is_preemptible(arch_current_thread()); + return !arch_is_in_isr() && thread_is_preemptible(_current); } #ifdef CONFIG_USERSPACE @@ -139,7 +139,7 @@ int z_impl_k_thread_name_set(k_tid_t thread, const char *str) { #ifdef CONFIG_THREAD_NAME if (thread == NULL) { - thread = arch_current_thread(); + thread = _current; } strncpy(thread->name, str, CONFIG_THREAD_MAX_NAME_LEN - 1); @@ -334,11 +334,11 @@ void z_check_stack_sentinel(void) { uint32_t *stack; - if ((arch_current_thread()->base.thread_state & _THREAD_DUMMY) != 0) { + if ((_current->base.thread_state & _THREAD_DUMMY) != 0) { return; } - stack = (uint32_t *)arch_current_thread()->stack_info.start; + stack = (uint32_t *)_current->stack_info.start; if (*stack != STACK_SENTINEL) { /* Restore it so further checks don't trigger this same error */ *stack = STACK_SENTINEL; @@ -614,8 +614,8 @@ char *z_setup_new_thread(struct k_thread *new_thread, } #endif /* CONFIG_SCHED_CPU_MASK */ #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN - /* arch_current_thread() may be null if the dummy thread is not used */ - if (!arch_current_thread()) { + /* _current may be null if the dummy thread is not used */ + if (!_current) { new_thread->resource_pool = NULL; return stack_ptr; } @@ -624,13 +624,13 @@ char *z_setup_new_thread(struct k_thread *new_thread, z_mem_domain_init_thread(new_thread); if ((options & K_INHERIT_PERMS) != 0U) { - k_thread_perms_inherit(arch_current_thread(), new_thread); + k_thread_perms_inherit(_current, new_thread); } #endif /* CONFIG_USERSPACE */ #ifdef CONFIG_SCHED_DEADLINE new_thread->base.prio_deadline = 0; #endif /* CONFIG_SCHED_DEADLINE */ - new_thread->resource_pool = arch_current_thread()->resource_pool; + new_thread->resource_pool = _current->resource_pool; #ifdef CONFIG_SMP z_waitq_init(&new_thread->halt_queue); @@ -725,7 +725,7 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread, */ K_OOPS(K_SYSCALL_VERIFY(_is_valid_prio(prio, NULL))); K_OOPS(K_SYSCALL_VERIFY(z_is_prio_lower_or_equal(prio, - arch_current_thread()->base.prio))); + _current->base.prio))); z_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3, prio, options, NULL); @@ -770,25 +770,25 @@ FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry, { SYS_PORT_TRACING_FUNC(k_thread, user_mode_enter); - arch_current_thread()->base.user_options |= K_USER; - z_thread_essential_clear(arch_current_thread()); + _current->base.user_options |= K_USER; + z_thread_essential_clear(_current); #ifdef CONFIG_THREAD_MONITOR - arch_current_thread()->entry.pEntry = entry; - arch_current_thread()->entry.parameter1 = p1; - arch_current_thread()->entry.parameter2 = p2; - arch_current_thread()->entry.parameter3 = p3; + _current->entry.pEntry = entry; + _current->entry.parameter1 = p1; + _current->entry.parameter2 = p2; + _current->entry.parameter3 = p3; #endif /* CONFIG_THREAD_MONITOR */ #ifdef CONFIG_USERSPACE - __ASSERT(z_stack_is_user_capable(arch_current_thread()->stack_obj), + __ASSERT(z_stack_is_user_capable(_current->stack_obj), "dropping to user mode with kernel-only stack object"); #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA - memset(arch_current_thread()->userspace_local_data, 0, + memset(_current->userspace_local_data, 0, sizeof(struct _thread_userspace_local_data)); #endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */ #ifdef CONFIG_THREAD_LOCAL_STORAGE - arch_tls_stack_setup(arch_current_thread(), - (char *)(arch_current_thread()->stack_info.start + - arch_current_thread()->stack_info.size)); + arch_tls_stack_setup(_current, + (char *)(_current->stack_info.start + + _current->stack_info.size)); #endif /* CONFIG_THREAD_LOCAL_STORAGE */ arch_user_mode_enter(entry, p1, p2, p3); #else @@ -916,7 +916,7 @@ static inline k_ticks_t z_vrfy_k_thread_timeout_expires_ticks( void z_thread_mark_switched_in(void) { #if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH) - z_sched_usage_start(arch_current_thread()); + z_sched_usage_start(_current); #endif /* CONFIG_SCHED_THREAD_USAGE && !CONFIG_USE_SWITCH */ #ifdef CONFIG_TRACING @@ -933,9 +933,10 @@ void z_thread_mark_switched_out(void) #ifdef CONFIG_TRACING #ifdef CONFIG_THREAD_LOCAL_STORAGE /* Dummy thread won't have TLS set up to run arbitrary code */ - if (!arch_current_thread() || - (arch_current_thread()->base.thread_state & _THREAD_DUMMY) != 0) + if (!_current || + (_current->base.thread_state & _THREAD_DUMMY) != 0) { return; + } #endif /* CONFIG_THREAD_LOCAL_STORAGE */ SYS_PORT_TRACING_FUNC(k_thread, switched_out); #endif /* CONFIG_TRACING */ @@ -1084,7 +1085,7 @@ void k_thread_abort_cleanup(struct k_thread *thread) thread_to_cleanup = NULL; } - if (thread == arch_current_thread()) { + if (thread == _current) { /* Need to defer for current running thread as the cleanup * might result in exception. Actual cleanup will be done * at the next time k_thread_abort() is called, or at thread diff --git a/kernel/timeslicing.c b/kernel/timeslicing.c index 0410d42b91fe3..be91d9606f51e 100644 --- a/kernel/timeslicing.c +++ b/kernel/timeslicing.c @@ -15,7 +15,7 @@ static bool slice_expired[CONFIG_MP_MAX_NUM_CPUS]; #ifdef CONFIG_SWAP_NONATOMIC /* If z_swap() isn't atomic, then it's possible for a timer interrupt - * to try to timeslice away arch_current_thread() after it has already pended + * to try to timeslice away _current after it has already pended * itself but before the corresponding context switch. Treat that as * a noop condition in z_time_slice(). */ @@ -82,7 +82,7 @@ void k_sched_time_slice_set(int32_t slice, int prio) K_SPINLOCK(&_sched_spinlock) { slice_ticks = k_ms_to_ticks_ceil32(slice); slice_max_prio = prio; - z_reset_time_slice(arch_current_thread()); + z_reset_time_slice(_current); } } @@ -103,7 +103,7 @@ void k_thread_time_slice_set(struct k_thread *thread, int32_t thread_slice_ticks void z_time_slice(void) { k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); - struct k_thread *curr = arch_current_thread(); + struct k_thread *curr = _current; #ifdef CONFIG_SWAP_NONATOMIC if (pending_current == curr) { diff --git a/kernel/userspace.c b/kernel/userspace.c index 5aeafe221c72a..7a66513c03e5a 100644 --- a/kernel/userspace.c +++ b/kernel/userspace.c @@ -437,7 +437,7 @@ static void *z_object_alloc(enum k_objects otype, size_t size) /* The allocating thread implicitly gets permission on kernel objects * that it allocates */ - k_thread_perms_set(zo, arch_current_thread()); + k_thread_perms_set(zo, _current); /* Activates reference counting logic for automatic disposal when * all permissions have been revoked @@ -654,7 +654,7 @@ static int thread_perms_test(struct k_object *ko) return 1; } - index = thread_index_get(arch_current_thread()); + index = thread_index_get(_current); if (index != -1) { return sys_bitfield_test_bit((mem_addr_t)&ko->perms, index); } @@ -663,9 +663,9 @@ static int thread_perms_test(struct k_object *ko) static void dump_permission_error(struct k_object *ko) { - int index = thread_index_get(arch_current_thread()); + int index = thread_index_get(_current); LOG_ERR("thread %p (%d) does not have permission on %s %p", - arch_current_thread(), index, + _current, index, otype_to_str(ko->type), ko->name); LOG_HEXDUMP_ERR(ko->perms, sizeof(ko->perms), "permission bitmap"); } @@ -718,7 +718,7 @@ void k_object_access_revoke(const void *object, struct k_thread *thread) void z_impl_k_object_release(const void *object) { - k_object_access_revoke(object, arch_current_thread()); + k_object_access_revoke(object, _current); } void k_object_access_all_grant(const void *object) @@ -794,7 +794,7 @@ void k_object_recycle(const void *obj) if (ko != NULL) { (void)memset(ko->perms, 0, sizeof(ko->perms)); - k_thread_perms_set(ko, arch_current_thread()); + k_thread_perms_set(ko, _current); ko->flags |= K_OBJ_FLAG_INITIALIZED; } } diff --git a/kernel/userspace_handler.c b/kernel/userspace_handler.c index 38e778713bafc..ab6e4f0623c7f 100644 --- a/kernel/userspace_handler.c +++ b/kernel/userspace_handler.c @@ -72,7 +72,7 @@ static inline void z_vrfy_k_object_release(const void *object) ko = validate_any_object(object); K_OOPS(K_SYSCALL_VERIFY_MSG(ko != NULL, "object %p access denied", object)); - k_thread_perms_clear(ko, arch_current_thread()); + k_thread_perms_clear(ko, _current); } #include diff --git a/kernel/work.c b/kernel/work.c index 3e04033ff1be6..24691bd31096e 100644 --- a/kernel/work.c +++ b/kernel/work.c @@ -262,7 +262,7 @@ static inline int queue_submit_locked(struct k_work_q *queue, } int ret; - bool chained = (arch_current_thread() == &queue->thread) && !k_is_in_isr(); + bool chained = (_current == &queue->thread) && !k_is_in_isr(); bool draining = flag_test(&queue->flags, K_WORK_QUEUE_DRAIN_BIT); bool plugged = flag_test(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT); diff --git a/lib/libc/armstdc/src/libc-hooks.c b/lib/libc/armstdc/src/libc-hooks.c index f9fe9d1c4200c..afce534eddfd6 100644 --- a/lib/libc/armstdc/src/libc-hooks.c +++ b/lib/libc/armstdc/src/libc-hooks.c @@ -23,7 +23,7 @@ void __stdout_hook_install(int (*hook)(int)) volatile int *__aeabi_errno_addr(void) { - return &arch_current_thread()->errno_var; + return &_current->errno_var; } int fputc(int c, FILE *f) diff --git a/lib/os/p4wq.c b/lib/os/p4wq.c index 5a48ee6cf7b26..39b59de20d944 100644 --- a/lib/os/p4wq.c +++ b/lib/os/p4wq.c @@ -87,10 +87,10 @@ static FUNC_NORETURN void p4wq_loop(void *p0, void *p1, void *p2) = CONTAINER_OF(r, struct k_p4wq_work, rbnode); rb_remove(&queue->queue, r); - w->thread = arch_current_thread(); + w->thread = _current; sys_dlist_append(&queue->active, &w->dlnode); - set_prio(arch_current_thread(), w); - thread_clear_requeued(arch_current_thread()); + set_prio(_current, w); + thread_clear_requeued(_current); k_spin_unlock(&queue->lock, k); @@ -101,7 +101,7 @@ static FUNC_NORETURN void p4wq_loop(void *p0, void *p1, void *p2) /* Remove from the active list only if it * wasn't resubmitted already */ - if (!thread_was_requeued(arch_current_thread())) { + if (!thread_was_requeued(_current)) { sys_dlist_remove(&w->dlnode); w->thread = NULL; k_sem_give(&w->done_sem); @@ -223,9 +223,9 @@ void k_p4wq_submit(struct k_p4wq *queue, struct k_p4wq_work *item) item->deadline += k_cycle_get_32(); /* Resubmission from within handler? Remove from active list */ - if (item->thread == arch_current_thread()) { + if (item->thread == _current) { sys_dlist_remove(&item->dlnode); - thread_set_requeued(arch_current_thread()); + thread_set_requeued(_current); item->thread = NULL; } else { k_sem_init(&item->done_sem, 0, 1); diff --git a/scripts/build/gen_syscalls.py b/scripts/build/gen_syscalls.py index d2bb3249a8159..352130caf8333 100755 --- a/scripts/build/gen_syscalls.py +++ b/scripts/build/gen_syscalls.py @@ -362,7 +362,7 @@ def marshall_defs(func_name, func_type, args): else: mrsh += "\t\t" + "uintptr_t arg3, uintptr_t arg4, void *more, void *ssf)\n" mrsh += "{\n" - mrsh += "\t" + "arch_current_thread()->syscall_frame = ssf;\n" + mrsh += "\t" + "_current->syscall_frame = ssf;\n" for unused_arg in range(nmrsh, 6): mrsh += "\t(void) arg%d;\t/* unused */\n" % unused_arg @@ -388,7 +388,7 @@ def marshall_defs(func_name, func_type, args): if func_type == "void": mrsh += "\t" + "%s;\n" % vrfy_call - mrsh += "\t" + "arch_current_thread()->syscall_frame = NULL;\n" + mrsh += "\t" + "_current->syscall_frame = NULL;\n" mrsh += "\t" + "return 0;\n" else: mrsh += "\t" + "%s ret = %s;\n" % (func_type, vrfy_call) @@ -397,10 +397,10 @@ def marshall_defs(func_name, func_type, args): ptr = "((uint64_t *)%s)" % mrsh_rval(nmrsh - 1, nmrsh) mrsh += "\t" + "K_OOPS(K_SYSCALL_MEMORY_WRITE(%s, 8));\n" % ptr mrsh += "\t" + "*%s = ret;\n" % ptr - mrsh += "\t" + "arch_current_thread()->syscall_frame = NULL;\n" + mrsh += "\t" + "_current->syscall_frame = NULL;\n" mrsh += "\t" + "return 0;\n" else: - mrsh += "\t" + "arch_current_thread()->syscall_frame = NULL;\n" + mrsh += "\t" + "_current->syscall_frame = NULL;\n" mrsh += "\t" + "return (uintptr_t) ret;\n" mrsh += "}\n" diff --git a/soc/espressif/esp32/soc.c b/soc/espressif/esp32/soc.c index d244ff5c84c5f..ec1fc8c8babeb 100644 --- a/soc/espressif/esp32/soc.c +++ b/soc/espressif/esp32/soc.c @@ -69,7 +69,7 @@ void IRAM_ATTR __esp_platform_start(void) __asm__ __volatile__ ("wsr %0, PS" : : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE)); /* Initialize the architecture CPU pointer. Some of the - * initialization code wants a valid arch_current_thread() before + * initialization code wants a valid _current before * z_prep_c() is invoked. */ __asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0])); diff --git a/soc/espressif/esp32/soc_appcpu.c b/soc/espressif/esp32/soc_appcpu.c index e942bab01a200..82c8ef1ad6571 100644 --- a/soc/espressif/esp32/soc_appcpu.c +++ b/soc/espressif/esp32/soc_appcpu.c @@ -80,7 +80,7 @@ void IRAM_ATTR __appcpu_start(void) : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE)); /* Initialize the architecture CPU pointer. Some of the - * initialization code wants a valid arch_current_thread() before + * initialization code wants a valid _current before * z_prep_c() is invoked. */ __asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[1])); diff --git a/soc/espressif/esp32s2/soc.c b/soc/espressif/esp32s2/soc.c index 858618817f2bd..02a6d1b4dc6fb 100644 --- a/soc/espressif/esp32s2/soc.c +++ b/soc/espressif/esp32s2/soc.c @@ -62,7 +62,7 @@ void __attribute__((section(".iram1"))) __esp_platform_start(void) __asm__ __volatile__("wsr %0, PS" : : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE)); /* Initialize the architecture CPU pointer. Some of the - * initialization code wants a valid arch_current_thread() before + * initialization code wants a valid _current before * arch_kernel_init() is invoked. */ __asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0])); diff --git a/soc/espressif/esp32s3/soc.c b/soc/espressif/esp32s3/soc.c index 1a7d1c04dd967..763ba341940f0 100644 --- a/soc/espressif/esp32s3/soc.c +++ b/soc/espressif/esp32s3/soc.c @@ -97,7 +97,7 @@ void IRAM_ATTR __esp_platform_start(void) __asm__ __volatile__("wsr %0, PS" : : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE)); /* Initialize the architecture CPU pointer. Some of the - * initialization code wants a valid arch_current_thread() before + * initialization code wants a valid _current before * arch_kernel_init() is invoked. */ __asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0])); diff --git a/soc/espressif/esp32s3/soc_appcpu.c b/soc/espressif/esp32s3/soc_appcpu.c index 1ec3bbf94aab7..a03304c87519e 100644 --- a/soc/espressif/esp32s3/soc_appcpu.c +++ b/soc/espressif/esp32s3/soc_appcpu.c @@ -65,7 +65,7 @@ void IRAM_ATTR __appcpu_start(void) __asm__ __volatile__("wsr %0, PS" : : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE)); /* Initialize the architecture CPU pointer. Some of the - * initialization code wants a valid arch_current_thread() before + * initialization code wants a valid _current before * arch_kernel_init() is invoked. */ __asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[1])); diff --git a/subsys/net/lib/sockets/sockets.c b/subsys/net/lib/sockets/sockets.c index a3bc0b71135e4..441ed438472b4 100644 --- a/subsys/net/lib/sockets/sockets.c +++ b/subsys/net/lib/sockets/sockets.c @@ -68,7 +68,7 @@ static inline void *get_sock_vtable(int sock, if (ctx == NULL) { NET_DBG("Invalid access on sock %d by thread %p (%s)", sock, - arch_current_thread(), k_thread_name_get(arch_current_thread())); + _current, k_thread_name_get(_current)); } return ctx; diff --git a/subsys/portability/cmsis_rtos_v2/kernel.c b/subsys/portability/cmsis_rtos_v2/kernel.c index 1fbd606dbbf06..519be96023ddb 100644 --- a/subsys/portability/cmsis_rtos_v2/kernel.c +++ b/subsys/portability/cmsis_rtos_v2/kernel.c @@ -39,7 +39,7 @@ osStatus_t osKernelGetInfo(osVersion_t *version, char *id_buf, uint32_t id_size) */ int32_t osKernelLock(void) { - int temp = arch_current_thread()->base.sched_locked; + int temp = _current->base.sched_locked; if (k_is_in_isr()) { return osErrorISR; @@ -55,7 +55,7 @@ int32_t osKernelLock(void) */ int32_t osKernelUnlock(void) { - int temp = arch_current_thread()->base.sched_locked; + int temp = _current->base.sched_locked; if (k_is_in_isr()) { return osErrorISR; @@ -71,7 +71,7 @@ int32_t osKernelUnlock(void) */ int32_t osKernelRestoreLock(int32_t lock) { - arch_current_thread()->base.sched_locked = lock; + _current->base.sched_locked = lock; if (k_is_in_isr()) { return osErrorISR; diff --git a/subsys/profiling/perf/backends/perf_riscv.c b/subsys/profiling/perf/backends/perf_riscv.c index 0d04676a11b17..2259e1c60db3a 100644 --- a/subsys/profiling/perf/backends/perf_riscv.c +++ b/subsys/profiling/perf/backends/perf_riscv.c @@ -76,10 +76,10 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size) * function prologue or epilogue. */ buf[idx++] = (uintptr_t)esf->ra; - if (valid_stack((uintptr_t)new_fp, arch_current_thread())) { + if (valid_stack((uintptr_t)new_fp, _current)) { fp = new_fp; } - while (valid_stack((uintptr_t)fp, arch_current_thread())) { + while (valid_stack((uintptr_t)fp, _current)) { if (idx >= size) { return 0; } diff --git a/subsys/profiling/perf/backends/perf_x86.c b/subsys/profiling/perf/backends/perf_x86.c index 1321631763428..e62ad64b74a0e 100644 --- a/subsys/profiling/perf/backends/perf_x86.c +++ b/subsys/profiling/perf/backends/perf_x86.c @@ -67,7 +67,7 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size) */ buf[idx++] = (uintptr_t)isf->eip; - while (valid_stack((uintptr_t)fp, arch_current_thread())) { + while (valid_stack((uintptr_t)fp, _current)) { if (idx >= size) { return 0; } diff --git a/subsys/profiling/perf/backends/perf_x86_64.c b/subsys/profiling/perf/backends/perf_x86_64.c index f5e13b53597f4..84e45024c3cb4 100644 --- a/subsys/profiling/perf/backends/perf_x86_64.c +++ b/subsys/profiling/perf/backends/perf_x86_64.c @@ -35,13 +35,13 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size) /* * In x86_64 (arch/x86/core/intel64/locore.S) %rip and %rbp - * are always saved in arch_current_thread()->callee_saved before calling + * are always saved in _current->callee_saved before calling * handler function if interrupt is not nested * * %rip points the location where interrupt was occurred */ - buf[idx++] = (uintptr_t)arch_current_thread()->callee_saved.rip; - void **fp = (void **)arch_current_thread()->callee_saved.rbp; + buf[idx++] = (uintptr_t)_current->callee_saved.rip; + void **fp = (void **)_current->callee_saved.rbp; /* * %rbp is frame pointer. @@ -53,7 +53,7 @@ size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size) * %rbp (next) <- %rbp (curr) * .... */ - while (valid_stack((uintptr_t)fp, arch_current_thread())) { + while (valid_stack((uintptr_t)fp, _current)) { if (idx >= size) { return 0; } diff --git a/subsys/shell/modules/kernel_service/thread/unwind.c b/subsys/shell/modules/kernel_service/thread/unwind.c index e41df7d3b00b0..903f05822b14a 100644 --- a/subsys/shell/modules/kernel_service/thread/unwind.c +++ b/subsys/shell/modules/kernel_service/thread/unwind.c @@ -30,7 +30,7 @@ static int cmd_kernel_thread_unwind(const struct shell *sh, size_t argc, char ** int err = 0; if (argc == 1) { - thread = arch_current_thread(); + thread = _current; } else { thread = UINT_TO_POINTER(shell_strtoull(argv[1], 16, &err)); if (err != 0) { diff --git a/tests/arch/arm/arm_interrupt/src/arm_interrupt.c b/tests/arch/arm/arm_interrupt/src/arm_interrupt.c index 8ab6e6c469196..3891c9b1d41c2 100644 --- a/tests/arch/arm/arm_interrupt/src/arm_interrupt.c +++ b/tests/arch/arm/arm_interrupt/src/arm_interrupt.c @@ -177,7 +177,7 @@ ZTEST(arm_interrupt, test_arm_esf_collection) * crashy thread we create below runs to completion before we get * to the end of this function */ - k_thread_priority_set(arch_current_thread(), K_PRIO_PREEMPT(MAIN_PRIORITY)); + k_thread_priority_set(_current, K_PRIO_PREEMPT(MAIN_PRIORITY)); TC_PRINT("Testing ESF Reporting\n"); k_thread_create(&esf_collection_thread, esf_collection_stack, @@ -366,9 +366,9 @@ ZTEST(arm_interrupt, test_arm_interrupt) uint32_t fp_extra_size = (__get_CONTROL() & CONTROL_FPCA_Msk) ? FPU_STACK_EXTRA_SIZE : 0; - __set_PSP(arch_current_thread()->stack_info.start + 0x10 + fp_extra_size); + __set_PSP(_current->stack_info.start + 0x10 + fp_extra_size); #else - __set_PSP(arch_current_thread()->stack_info.start + 0x10); + __set_PSP(_current->stack_info.start + 0x10); #endif __enable_irq(); diff --git a/tests/arch/arm/arm_thread_swap/src/arm_syscalls.c b/tests/arch/arm/arm_thread_swap/src/arm_syscalls.c index 2e4443761346f..075b36126a155 100644 --- a/tests/arch/arm/arm_thread_swap/src/arm_syscalls.c +++ b/tests/arch/arm/arm_thread_swap/src/arm_syscalls.c @@ -38,20 +38,20 @@ void z_impl_test_arm_user_syscall(void) * - PSPLIM register guards the privileged stack * - MSPLIM register still guards the interrupt stack */ - zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) == 0, + zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) == 0, "mode variable not set to PRIV mode in system call\n"); zassert_false(arch_is_user_context(), "arch_is_user_context() indicates nPRIV\n"); zassert_true( - ((__get_PSP() >= arch_current_thread()->arch.priv_stack_start) && - (__get_PSP() < (arch_current_thread()->arch.priv_stack_start + + ((__get_PSP() >= _current->arch.priv_stack_start) && + (__get_PSP() < (_current->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE))), "Process SP outside thread privileged stack limits\n"); #if defined(CONFIG_BUILTIN_STACK_GUARD) - zassert_true(__get_PSPLIM() == arch_current_thread()->arch.priv_stack_start, + zassert_true(__get_PSPLIM() == _current->arch.priv_stack_start, "PSPLIM not guarding the thread's privileged stack\n"); zassert_true(__get_MSPLIM() == (uint32_t)z_interrupt_stacks, "MSPLIM not guarding the interrupt stack\n"); @@ -78,16 +78,16 @@ void arm_isr_handler(const void *args) * - MSPLIM register still guards the interrupt stack */ - zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) != 0, + zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) != 0, "mode variable not set to nPRIV mode for user thread\n"); zassert_false(arch_is_user_context(), "arch_is_user_context() indicates nPRIV in ISR\n"); zassert_true( - ((__get_PSP() >= arch_current_thread()->stack_info.start) && - (__get_PSP() < (arch_current_thread()->stack_info.start + - arch_current_thread()->stack_info.size))), + ((__get_PSP() >= _current->stack_info.start) && + (__get_PSP() < (_current->stack_info.start + + _current->stack_info.size))), "Process SP outside thread stack limits\n"); static int first_call = 1; @@ -97,7 +97,7 @@ void arm_isr_handler(const void *args) /* Trigger thread yield() manually */ (void)irq_lock(); - z_move_thread_to_end_of_prio_q(arch_current_thread()); + z_move_thread_to_end_of_prio_q(_current); SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk; irq_unlock(0); @@ -165,20 +165,20 @@ ZTEST(arm_thread_swap, test_arm_syscalls) * - PSPLIM register guards the default stack * - MSPLIM register guards the interrupt stack */ - zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) == 0, + zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) == 0, "mode variable not set to PRIV mode for supervisor thread\n"); zassert_false(arch_is_user_context(), "arch_is_user_context() indicates nPRIV\n"); zassert_true( - ((__get_PSP() >= arch_current_thread()->stack_info.start) && - (__get_PSP() < (arch_current_thread()->stack_info.start + - arch_current_thread()->stack_info.size))), + ((__get_PSP() >= _current->stack_info.start) && + (__get_PSP() < (_current->stack_info.start + + _current->stack_info.size))), "Process SP outside thread stack limits\n"); #if defined(CONFIG_BUILTIN_STACK_GUARD) - zassert_true(__get_PSPLIM() == arch_current_thread()->stack_info.start, + zassert_true(__get_PSPLIM() == _current->stack_info.start, "PSPLIM not guarding the default stack\n"); zassert_true(__get_MSPLIM() == (uint32_t)z_interrupt_stacks, "MSPLIM not guarding the interrupt stack\n"); diff --git a/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c b/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c index 2ea9bd3816284..a617abe06802f 100644 --- a/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c +++ b/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c @@ -274,16 +274,16 @@ static void alt_thread_entry(void *p1, void *p2, void *p3) /* Verify that the _current_ (alt) thread is * initialized with EXC_RETURN.Ftype set */ - zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0, + zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0, "Alt thread FPCA flag not clear at initialization\n"); #if defined(CONFIG_MPU_STACK_GUARD) /* Alt thread is created with K_FP_REGS set, so we * expect lazy stacking and long guard to be enabled. */ - zassert_true((arch_current_thread()->arch.mode & + zassert_true((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0, "Alt thread MPU GUAR DFLOAT flag not set at initialization\n"); - zassert_true((arch_current_thread()->base.user_options & K_FP_REGS) != 0, + zassert_true((_current->base.user_options & K_FP_REGS) != 0, "Alt thread K_FP_REGS not set at initialization\n"); zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) != 0, "Lazy FP Stacking not set at initialization\n"); @@ -326,7 +326,7 @@ static void alt_thread_entry(void *p1, void *p2, void *p3) p_ztest_thread->arch.swap_return_value = SWAP_RETVAL; #endif - z_move_thread_to_end_of_prio_q(arch_current_thread()); + z_move_thread_to_end_of_prio_q(_current); /* Modify the callee-saved registers by zero-ing them. * The main test thread will, later, assert that they @@ -451,20 +451,20 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) */ load_callee_saved_regs(&ztest_thread_callee_saved_regs_init); - k_thread_priority_set(arch_current_thread(), K_PRIO_COOP(PRIORITY)); + k_thread_priority_set(_current, K_PRIO_COOP(PRIORITY)); /* Export current thread's callee-saved registers pointer * and arch.basepri variable pointer, into global pointer * variables, so they can be easily accessible by other * (alternative) test thread. */ - p_ztest_thread = arch_current_thread(); + p_ztest_thread = _current; /* Confirm initial conditions before starting the test. */ test_flag = switch_flag; zassert_true(test_flag == false, "Switch flag not initialized properly\n"); - zassert_true(arch_current_thread()->arch.basepri == 0, + zassert_true(_current->arch.basepri == 0, "Thread BASEPRI flag not clear at thread start\n"); /* Verify, also, that the interrupts are unlocked. */ #if defined(CONFIG_CPU_CORTEX_M_HAS_BASEPRI) @@ -484,16 +484,16 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) "Main test thread does not start in privilege mode\n"); /* Assert that the mode status variable indicates privilege mode */ - zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) == 0, + zassert_true((_current->arch.mode & CONTROL_nPRIV_Msk) == 0, "Thread nPRIV flag not clear for supervisor thread: 0x%0x\n", - arch_current_thread()->arch.mode); + _current->arch.mode); #endif /* CONFIG_USERSPACE */ #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) /* The main test thread is not (yet) actively using the FP registers */ - zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0, + zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0, "Thread Ftype flag not set at initialization 0x%0x\n", - arch_current_thread()->arch.mode); + _current->arch.mode); /* Verify that the main test thread is initialized with FPCA cleared. */ zassert_true((__get_CONTROL() & CONTROL_FPCA_Msk) == 0, @@ -506,7 +506,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) /* Clear the thread's floating-point callee-saved registers' container. * The container will, later, be populated by the swap mechanism. */ - memset(&arch_current_thread()->arch.preempt_float, 0, + memset(&_current->arch.preempt_float, 0, sizeof(struct _preempt_float)); /* Randomize the FP callee-saved registers at test initialization */ @@ -520,13 +520,13 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) /* The main test thread is using the FP registers, but the .mode * flag is not updated until the next context switch. */ - zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0, + zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0, "Thread Ftype flag not set at initialization\n"); #if defined(CONFIG_MPU_STACK_GUARD) - zassert_true((arch_current_thread()->arch.mode & + zassert_true((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) == 0, "Thread MPU GUAR DFLOAT flag not clear at initialization\n"); - zassert_true((arch_current_thread()->base.user_options & K_FP_REGS) == 0, + zassert_true((_current->base.user_options & K_FP_REGS) == 0, "Thread K_FP_REGS not clear at initialization\n"); zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) == 0, "Lazy FP Stacking not clear at initialization\n"); @@ -555,13 +555,13 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) * explicitly required by the test. */ (void)irq_lock(); - z_move_thread_to_end_of_prio_q(arch_current_thread()); + z_move_thread_to_end_of_prio_q(_current); /* Clear the thread's callee-saved registers' container. * The container will, later, be populated by the swap * mechanism. */ - memset(&arch_current_thread()->callee_saved, 0, sizeof(_callee_saved_t)); + memset(&_current->callee_saved, 0, sizeof(_callee_saved_t)); /* Verify context-switch has not occurred yet. */ test_flag = switch_flag; @@ -677,7 +677,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) */ verify_callee_saved( &ztest_thread_callee_saved_regs_container, - &arch_current_thread()->callee_saved); + &_current->callee_saved); /* Verify context-switch did occur. */ test_flag = switch_flag; @@ -693,7 +693,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) * the alternative thread modified it, since the thread * is now switched back in. */ - zassert_true(arch_current_thread()->arch.basepri == 0, + zassert_true(_current->arch.basepri == 0, "arch.basepri value not in accordance with the update\n"); #if defined(CONFIG_CPU_CORTEX_M_HAS_BASEPRI) @@ -714,12 +714,12 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) #if !defined(CONFIG_NO_OPTIMIZATIONS) /* The thread is now swapped-back in. */ - zassert_equal(arch_current_thread()->arch.swap_return_value, SWAP_RETVAL, + zassert_equal(_current->arch.swap_return_value, SWAP_RETVAL, "Swap value not set as expected: 0x%x (0x%x)\n", - arch_current_thread()->arch.swap_return_value, SWAP_RETVAL); - zassert_equal(arch_current_thread()->arch.swap_return_value, ztest_swap_return_val, + _current->arch.swap_return_value, SWAP_RETVAL); + zassert_equal(_current->arch.swap_return_value, ztest_swap_return_val, "Swap value not returned as expected 0x%x (0x%x)\n", - arch_current_thread()->arch.swap_return_value, ztest_swap_return_val); + _current->arch.swap_return_value, ztest_swap_return_val); #endif #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) @@ -737,7 +737,7 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) */ verify_fp_callee_saved( &ztest_thread_fp_callee_saved_regs, - &arch_current_thread()->arch.preempt_float); + &_current->arch.preempt_float); /* Verify that the main test thread restored the FPSCR bit-0. */ zassert_true((__get_FPSCR() & 0x1) == 0x1, @@ -746,13 +746,13 @@ ZTEST(arm_thread_swap, test_arm_thread_swap) /* The main test thread is using the FP registers, and the .mode * flag and MPU GUARD flag are now updated. */ - zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0, + zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0, "Thread Ftype flag not cleared after main returned back\n"); #if defined(CONFIG_MPU_STACK_GUARD) - zassert_true((arch_current_thread()->arch.mode & + zassert_true((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0, "Thread MPU GUARD FLOAT flag not set\n"); - zassert_true((arch_current_thread()->base.user_options & K_FP_REGS) != 0, + zassert_true((_current->base.user_options & K_FP_REGS) != 0, "Thread K_FPREGS not set after main returned back\n"); zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) != 0, "Lazy FP Stacking not set after main returned back\n"); diff --git a/tests/arch/riscv/userspace/riscv_gp/src/main.c b/tests/arch/riscv/userspace/riscv_gp/src/main.c index 738ba94b96848..fcbb7ba8d5273 100644 --- a/tests/arch/riscv/userspace/riscv_gp/src/main.c +++ b/tests/arch/riscv/userspace/riscv_gp/src/main.c @@ -39,7 +39,7 @@ static void rogue_user_fn(void *p1, void *p2, void *p3) if (IS_ENABLED(CONFIG_RISCV_GP)) { zassert_equal(reg_read(gp), 0xbad); } else { /* CONFIG_RISCV_CURRENT_VIA_GP */ - zassert_equal((uintptr_t)arch_current_thread(), 0xbad); + zassert_equal((uintptr_t)_current, 0xbad); } /* Sleep to force a context switch, which will sanitize `gp` */ diff --git a/tests/benchmarks/footprints/src/system_thread.c b/tests/benchmarks/footprints/src/system_thread.c index c44ed9146be58..c88be8c631cb0 100644 --- a/tests/benchmarks/footprints/src/system_thread.c +++ b/tests/benchmarks/footprints/src/system_thread.c @@ -28,12 +28,12 @@ void test_thread_entry(void *p, void *p1, void *p2) void thread_swap(void *p1, void *p2, void *p3) { - k_thread_abort(arch_current_thread()); + k_thread_abort(_current); } void thread_suspend(void *p1, void *p2, void *p3) { - k_thread_suspend(arch_current_thread()); + k_thread_suspend(_current); } void thread_yield0(void *p1, void *p2, void *p3) diff --git a/tests/kernel/context/src/main.c b/tests/kernel/context/src/main.c index f7b1886aee14e..699c7bdc642bb 100644 --- a/tests/kernel/context/src/main.c +++ b/tests/kernel/context/src/main.c @@ -135,7 +135,7 @@ static void isr_handler(const void *data) break; } - if (arch_current_thread()->base.prio < 0) { + if (_current->base.prio < 0) { isr_info.value = K_COOP_THREAD; break; } @@ -643,9 +643,9 @@ ZTEST(context, test_ctx_thread) TC_PRINT("Testing k_is_in_isr() from a preemptible thread\n"); zassert_false(k_is_in_isr(), "Should not be in ISR context"); - zassert_false(arch_current_thread()->base.prio < 0, + zassert_false(_current->base.prio < 0, "Current thread should have preemptible priority: %d", - arch_current_thread()->base.prio); + _current->base.prio); } @@ -683,7 +683,7 @@ static void _test_kernel_thread(k_tid_t _thread_id) zassert_false(k_is_in_isr(), "k_is_in_isr() when called from a thread is true"); - zassert_false((arch_current_thread()->base.prio >= 0), + zassert_false((_current->base.prio >= 0), "thread is not a cooperative thread"); } diff --git a/tests/kernel/fatal/exception/src/main.c b/tests/kernel/fatal/exception/src/main.c index 3d289af407bbd..6eb97068b6ce1 100644 --- a/tests/kernel/fatal/exception/src/main.c +++ b/tests/kernel/fatal/exception/src/main.c @@ -314,7 +314,7 @@ ZTEST(fatal_exception, test_fatal) * priority -1. To run the test smoothly make both main and ztest * threads run at same priority level. */ - k_thread_priority_set(arch_current_thread(), K_PRIO_PREEMPT(MAIN_PRIORITY)); + k_thread_priority_set(_current, K_PRIO_PREEMPT(MAIN_PRIORITY)); #ifndef CONFIG_ARCH_POSIX TC_PRINT("test alt thread 1: generic CPU exception\n"); diff --git a/tests/kernel/fatal/message_capture/src/main.c b/tests/kernel/fatal/message_capture/src/main.c index c23b042c1a3c9..514f3ea6e55c9 100644 --- a/tests/kernel/fatal/message_capture/src/main.c +++ b/tests/kernel/fatal/message_capture/src/main.c @@ -86,7 +86,7 @@ int main(void) * panic and not an oops). Set the thread non-essential as a * workaround. */ - z_thread_essential_clear(arch_current_thread()); + z_thread_essential_clear(_current); test_message_capture(); return 0; diff --git a/tests/kernel/ipi_cascade/src/main.c b/tests/kernel/ipi_cascade/src/main.c index 95e77607cb420..1f96e677ce681 100644 --- a/tests/kernel/ipi_cascade/src/main.c +++ b/tests/kernel/ipi_cascade/src/main.c @@ -116,7 +116,7 @@ void thread3_entry(void *p1, void *p2, void *p3) /* 9.1 - T3 should be executing on the same CPU that T1 was. */ - cpu_t3 = arch_current_thread()->base.cpu; + cpu_t3 = _current->base.cpu; zassert_true(cpu_t3 == cpu_t1, "T3 not executing on T1's original CPU"); @@ -136,7 +136,7 @@ void thread4_entry(void *p1, void *p2, void *p3) * It is expected to execute on the same CPU that T2 did. */ - cpu_t4 = arch_current_thread()->base.cpu; + cpu_t4 = _current->base.cpu; zassert_true(cpu_t4 == cpu_t2, "T4 on unexpected CPU"); @@ -165,7 +165,7 @@ void thread2_entry(void *p1, void *p2, void *p3) arch_irq_unlock(key); } - cpu_t2 = arch_current_thread()->base.cpu; + cpu_t2 = _current->base.cpu; zassert_false(cpu_t2 == cpu_t1, "T2 and T1 unexpectedly on the same CPU"); @@ -205,7 +205,7 @@ ZTEST(ipi_cascade, test_ipi_cascade) /* 3. T3 and T4 are blocked. Pin T3 to this CPU */ - cpu_t1 = arch_current_thread()->base.cpu; + cpu_t1 = _current->base.cpu; status = k_thread_cpu_pin(&thread3, cpu_t1); zassert_true(status == 0, "Failed to pin T3 to %d : %d\n", cpu_t1, status); @@ -249,7 +249,7 @@ ZTEST(ipi_cascade, test_ipi_cascade) zassert_false(timer_expired, "Test terminated by timer"); - zassert_true(cpu_t1 != arch_current_thread()->base.cpu, + zassert_true(cpu_t1 != _current->base.cpu, "Main thread (T1) did not change CPUs\n"); show_executing_threads("Final"); diff --git a/tests/kernel/mem_protect/mem_protect/src/inherit.c b/tests/kernel/mem_protect/mem_protect/src/inherit.c index c0d614fae09d1..021fdf884fdd7 100644 --- a/tests/kernel/mem_protect/mem_protect/src/inherit.c +++ b/tests/kernel/mem_protect/mem_protect/src/inherit.c @@ -125,7 +125,7 @@ ZTEST(mem_protect, test_permission_inheritance) struct k_heap *z_impl_ret_resource_pool_ptr(void) { - return arch_current_thread()->resource_pool; + return _current->resource_pool; } static inline struct k_heap *z_vrfy_ret_resource_pool_ptr(void) diff --git a/tests/kernel/mem_protect/obj_validation/src/main.c b/tests/kernel/mem_protect/obj_validation/src/main.c index a6c03dce1b1e0..df7ddbbc8456e 100644 --- a/tests/kernel/mem_protect/obj_validation/src/main.c +++ b/tests/kernel/mem_protect/obj_validation/src/main.c @@ -132,7 +132,7 @@ ZTEST(object_validation, test_generic_object) ZTEST(object_validation, test_kobj_assign_perms_on_alloc_obj) { static struct k_sem *test_dyn_sem; - struct k_thread *thread = arch_current_thread(); + struct k_thread *thread = _current; uintptr_t start_addr, end_addr; size_t size_heap = K_HEAP_MEM_POOL_SIZE; @@ -173,7 +173,7 @@ ZTEST(object_validation, test_no_ref_dyn_kobj_release_mem) zassert_not_null(test_dyn_mutex, "Can not allocate dynamic kernel object"); - struct k_thread *thread = arch_current_thread(); + struct k_thread *thread = _current; /* revoke access from the current thread */ k_object_access_revoke(test_dyn_mutex, thread); diff --git a/tests/kernel/mem_protect/userspace/src/main.c b/tests/kernel/mem_protect/userspace/src/main.c index 08b3932c7adfa..76d5581b90678 100644 --- a/tests/kernel/mem_protect/userspace/src/main.c +++ b/tests/kernel/mem_protect/userspace/src/main.c @@ -312,7 +312,7 @@ ZTEST_USER(userspace, test_read_kernram) set_fault(K_ERR_CPU_EXCEPTION); - p = arch_current_thread()->init_data; + p = _current->init_data; printk("%p\n", p); zassert_unreachable("Read from kernel RAM did not fault"); } @@ -327,7 +327,7 @@ ZTEST_USER(userspace, test_write_kernram) /* Try to write to kernel RAM. */ set_fault(K_ERR_CPU_EXCEPTION); - arch_current_thread()->init_data = NULL; + _current->init_data = NULL; zassert_unreachable("Write to kernel RAM did not fault"); } @@ -1038,11 +1038,11 @@ ZTEST(userspace, test_tls_leakage) * supervisor mode to be leaked */ - memset(arch_current_thread()->userspace_local_data, 0xff, + memset(_current->userspace_local_data, 0xff, sizeof(struct _thread_userspace_local_data)); k_thread_user_mode_enter(tls_leakage_user_part, - arch_current_thread()->userspace_local_data, NULL, NULL); + _current->userspace_local_data, NULL, NULL); #else ztest_test_skip(); #endif diff --git a/tests/kernel/sched/deadline/src/main.c b/tests/kernel/sched/deadline/src/main.c index 8b9d3a02f4300..cf19255d5ec8b 100644 --- a/tests/kernel/sched/deadline/src/main.c +++ b/tests/kernel/sched/deadline/src/main.c @@ -232,7 +232,7 @@ static void test_reschedule_helper0(void *p1, void *p2, void *p3) { /* 4. Reschedule brings us here */ - zassert_true(expected_thread == arch_current_thread(), ""); + zassert_true(expected_thread == _current, ""); expected_thread = &worker_threads[1]; } @@ -243,26 +243,26 @@ static void test_reschedule_helper1(void *p1, void *p2, void *p3) /* 1. First helper expected to execute */ - zassert_true(expected_thread == arch_current_thread(), ""); + zassert_true(expected_thread == _current, ""); offload(reschedule_wrapper, NULL); /* 2. Deadlines have not changed. Expected no changes */ - zassert_true(expected_thread == arch_current_thread(), ""); + zassert_true(expected_thread == _current, ""); - k_thread_deadline_set(arch_current_thread(), MSEC_TO_CYCLES(1000)); + k_thread_deadline_set(_current, MSEC_TO_CYCLES(1000)); /* 3. Deadline changed, but there was no reschedule */ - zassert_true(expected_thread == arch_current_thread(), ""); + zassert_true(expected_thread == _current, ""); expected_thread = &worker_threads[0]; offload(reschedule_wrapper, NULL); /* 5. test_thread_reschedule_helper0 executed */ - zassert_true(expected_thread == arch_current_thread(), ""); + zassert_true(expected_thread == _current, ""); } static void thread_offload(void (*f)(const void *p), const void *param) diff --git a/tests/kernel/smp/src/main.c b/tests/kernel/smp/src/main.c index 065dbcf8b92c7..f3d3f356eede8 100644 --- a/tests/kernel/smp/src/main.c +++ b/tests/kernel/smp/src/main.c @@ -318,9 +318,8 @@ ZTEST(smp, test_coop_switch_in_abort) unsigned int num_threads = arch_num_cpus(); unsigned int i; - zassert_true(arch_current_thread()->base.prio < 0, - "test case relies on ztest thread be cooperative"); - zassert_true(arch_current_thread()->base.prio > SPAWN_AB_PRIO, + zassert_true(_current->base.prio < 0, "test case relies on ztest thread be cooperative"); + zassert_true(_current->base.prio > SPAWN_AB_PRIO, "spawn test need to have higher priority than ztest thread"); /* Spawn N number of cooperative threads, where N = number of CPUs */ @@ -871,15 +870,15 @@ static void t2_mutex_lock(void *p1, void *p2, void *p3) ARG_UNUSED(p2); ARG_UNUSED(p3); - zassert_equal(arch_current_thread()->base.global_lock_count, 0, + zassert_equal(_current->base.global_lock_count, 0, "thread global lock cnt %d is incorrect", - arch_current_thread()->base.global_lock_count); + _current->base.global_lock_count); k_mutex_lock((struct k_mutex *)p1, K_FOREVER); - zassert_equal(arch_current_thread()->base.global_lock_count, 0, + zassert_equal(_current->base.global_lock_count, 0, "thread global lock cnt %d is incorrect", - arch_current_thread()->base.global_lock_count); + _current->base.global_lock_count); k_mutex_unlock((struct k_mutex *)p1); @@ -887,9 +886,9 @@ static void t2_mutex_lock(void *p1, void *p2, void *p3) * context switch but global_lock_cnt has not been decrease * because no irq_lock() was called. */ - zassert_equal(arch_current_thread()->base.global_lock_count, 0, + zassert_equal(_current->base.global_lock_count, 0, "thread global lock cnt %d is incorrect", - arch_current_thread()->base.global_lock_count); + _current->base.global_lock_count); } /** diff --git a/tests/kernel/threads/thread_apis/src/main.c b/tests/kernel/threads/thread_apis/src/main.c index 1b351068abedf..162f85e5f5f6d 100644 --- a/tests/kernel/threads/thread_apis/src/main.c +++ b/tests/kernel/threads/thread_apis/src/main.c @@ -232,7 +232,7 @@ static void umode_entry(void *thread_id, void *p2, void *p3) ARG_UNUSED(p2); ARG_UNUSED(p3); - if (!z_is_thread_essential(arch_current_thread()) && + if (!z_is_thread_essential(_current) && (k_current_get() == (k_tid_t)thread_id)) { ztest_test_pass(); } else { @@ -249,9 +249,9 @@ static void umode_entry(void *thread_id, void *p2, void *p3) */ static void enter_user_mode_entry(void *p1, void *p2, void *p3) { - z_thread_essential_set(arch_current_thread()); + z_thread_essential_set(_current); - zassert_true(z_is_thread_essential(arch_current_thread()), "Thread isn't set" + zassert_true(z_is_thread_essential(_current), "Thread isn't set" " as essential\n"); k_thread_user_mode_enter(umode_entry, diff --git a/tests/kernel/threads/thread_apis/src/test_essential_thread.c b/tests/kernel/threads/thread_apis/src/test_essential_thread.c index fc101e7caf794..082765bd14816 100644 --- a/tests/kernel/threads/thread_apis/src/test_essential_thread.c +++ b/tests/kernel/threads/thread_apis/src/test_essential_thread.c @@ -27,16 +27,16 @@ static void thread_entry(void *p1, void *p2, void *p3) ARG_UNUSED(p2); ARG_UNUSED(p3); - z_thread_essential_set(arch_current_thread()); + z_thread_essential_set(_current); - if (z_is_thread_essential(arch_current_thread())) { + if (z_is_thread_essential(_current)) { k_busy_wait(100); } else { zassert_unreachable("The thread is not set as essential"); } - z_thread_essential_clear(arch_current_thread()); - zassert_false(z_is_thread_essential(arch_current_thread()), + z_thread_essential_clear(_current); + zassert_false(z_is_thread_essential(_current), "Essential flag of the thread is not cleared"); k_sem_give(&sync_sem); @@ -68,7 +68,7 @@ void k_sys_fatal_error_handler(unsigned int reason, fatal_error_signaled = true; - z_thread_essential_clear(arch_current_thread()); + z_thread_essential_clear(_current); } static void abort_thread_entry(void *p1, void *p2, void *p3) @@ -77,9 +77,9 @@ static void abort_thread_entry(void *p1, void *p2, void *p3) ARG_UNUSED(p2); ARG_UNUSED(p3); - z_thread_essential_set(arch_current_thread()); + z_thread_essential_set(_current); - if (z_is_thread_essential(arch_current_thread())) { + if (z_is_thread_essential(_current)) { k_busy_wait(100); } else { zassert_unreachable("The thread is not set as essential"); diff --git a/tests/kernel/usage/thread_runtime_stats/src/test_thread_runtime_stats.c b/tests/kernel/usage/thread_runtime_stats/src/test_thread_runtime_stats.c index 08fd395ed2e61..ff0ceeac7c844 100644 --- a/tests/kernel/usage/thread_runtime_stats/src/test_thread_runtime_stats.c +++ b/tests/kernel/usage/thread_runtime_stats/src/test_thread_runtime_stats.c @@ -72,7 +72,7 @@ ZTEST(usage_api, test_all_stats_usage) k_thread_runtime_stats_t stats4; k_thread_runtime_stats_t stats5; - priority = k_thread_priority_get(arch_current_thread()); + priority = k_thread_priority_get(_current); tid = k_thread_create(&helper_thread, helper_stack, K_THREAD_STACK_SIZEOF(helper_stack), helper1, NULL, NULL, NULL, @@ -196,7 +196,7 @@ ZTEST(usage_api, test_thread_stats_enable_disable) k_thread_runtime_stats_t helper_stats3; int priority; - priority = k_thread_priority_get(arch_current_thread()); + priority = k_thread_priority_get(_current); tid = k_thread_create(&helper_thread, helper_stack, K_THREAD_STACK_SIZEOF(helper_stack), helper1, NULL, NULL, NULL, @@ -209,7 +209,7 @@ ZTEST(usage_api, test_thread_stats_enable_disable) k_sleep(K_TICKS(5)); - k_thread_runtime_stats_get(arch_current_thread(), &stats1); + k_thread_runtime_stats_get(_current, &stats1); k_thread_runtime_stats_get(tid, &helper_stats1); k_thread_runtime_stats_disable(tid); @@ -225,7 +225,7 @@ ZTEST(usage_api, test_thread_stats_enable_disable) k_sleep(K_TICKS(2)); k_thread_runtime_stats_enable(tid); - k_thread_runtime_stats_get(arch_current_thread(), &stats2); + k_thread_runtime_stats_get(_current, &stats2); k_thread_runtime_stats_get(tid, &helper_stats2); /* Sleep for two ticks to let the helper thread execute again. */ @@ -280,12 +280,12 @@ ZTEST(usage_api, test_sys_stats_enable_disable) k_sys_runtime_stats_disable(); - k_thread_runtime_stats_get(arch_current_thread(), &thread_stats1); + k_thread_runtime_stats_get(_current, &thread_stats1); k_thread_runtime_stats_all_get(&sys_stats1); busy_loop(2); - k_thread_runtime_stats_get(arch_current_thread(), &thread_stats2); + k_thread_runtime_stats_get(_current, &thread_stats2); k_thread_runtime_stats_all_get(&sys_stats2); /* @@ -297,7 +297,7 @@ ZTEST(usage_api, test_sys_stats_enable_disable) busy_loop(2); - k_thread_runtime_stats_get(arch_current_thread(), &thread_stats3); + k_thread_runtime_stats_get(_current, &thread_stats3); k_thread_runtime_stats_all_get(&sys_stats3); /* @@ -398,7 +398,7 @@ ZTEST(usage_api, test_thread_stats_usage) k_thread_runtime_stats_t stats2; k_thread_runtime_stats_t stats3; - priority = k_thread_priority_get(arch_current_thread()); + priority = k_thread_priority_get(_current); /* * Verify that k_thread_runtime_stats_get() returns the expected @@ -408,7 +408,7 @@ ZTEST(usage_api, test_thread_stats_usage) status = k_thread_runtime_stats_get(NULL, &stats1); zassert_true(status == -EINVAL); - status = k_thread_runtime_stats_get(arch_current_thread(), NULL); + status = k_thread_runtime_stats_get(_current, NULL); zassert_true(status == -EINVAL); /* Align to the next tick */ @@ -422,7 +422,7 @@ ZTEST(usage_api, test_thread_stats_usage) helper1, NULL, NULL, NULL, priority + 2, 0, K_TICKS(1)); - main_thread = arch_current_thread(); + main_thread = _current; k_timer_init(&timer, resume_main, NULL); k_timer_start(&timer, K_TICKS(1), K_TICKS(10)); @@ -440,7 +440,7 @@ ZTEST(usage_api, test_thread_stats_usage) * the helper threads runtime stats. */ - k_thread_suspend(arch_current_thread()); + k_thread_suspend(_current); /* * T = 1. @@ -449,14 +449,14 @@ ZTEST(usage_api, test_thread_stats_usage) */ k_thread_runtime_stats_get(tid, &stats1); - k_thread_suspend(arch_current_thread()); + k_thread_suspend(_current); /* * T = 11. * Timer woke the main thread. Suspend main thread again. */ - k_thread_suspend(arch_current_thread()); + k_thread_suspend(_current); /* * T = 21. @@ -465,7 +465,7 @@ ZTEST(usage_api, test_thread_stats_usage) */ k_thread_runtime_stats_get(tid, &stats2); - k_thread_suspend(arch_current_thread()); + k_thread_suspend(_current); /* * T = 31. diff --git a/tests/subsys/pm/power_mgmt/src/main.c b/tests/subsys/pm/power_mgmt/src/main.c index da47ecee19d11..3c75e44587fc7 100644 --- a/tests/subsys/pm/power_mgmt/src/main.c +++ b/tests/subsys/pm/power_mgmt/src/main.c @@ -254,7 +254,7 @@ const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int32_t ticks) "There is no power state defined"); /* make sure this is idle thread */ - zassert_true(z_is_idle_thread_object(arch_current_thread())); + zassert_true(z_is_idle_thread_object(_current)); zassert_true(ticks == _kernel.idle); zassert_false(k_can_yield()); idle_entered = true; @@ -276,7 +276,7 @@ static void notify_pm_state_entry(enum pm_state state) /* enter suspend */ zassert_true(notify_app_entry == true, "Notification to enter suspend was not sent to the App"); - zassert_true(z_is_idle_thread_object(arch_current_thread())); + zassert_true(z_is_idle_thread_object(_current)); zassert_equal(state, PM_STATE_SUSPEND_TO_IDLE); pm_device_state_get(device_dummy, &device_power_state); @@ -301,7 +301,7 @@ static void notify_pm_state_exit(enum pm_state state) /* leave suspend */ zassert_true(notify_app_exit == true, "Notification to leave suspend was not sent to the App"); - zassert_true(z_is_idle_thread_object(arch_current_thread())); + zassert_true(z_is_idle_thread_object(_current)); zassert_equal(state, PM_STATE_SUSPEND_TO_IDLE); /* at this point, devices are active again*/ diff --git a/tests/ztest/error_hook/src/main.c b/tests/ztest/error_hook/src/main.c index b775c38c04b27..e4b05fc3eb0d1 100644 --- a/tests/ztest/error_hook/src/main.c +++ b/tests/ztest/error_hook/src/main.c @@ -71,10 +71,10 @@ __no_optimization static void trigger_fault_access(void) #elif defined(CONFIG_CPU_CORTEX_M) || defined(CONFIG_CPU_AARCH32_CORTEX_R) || \ defined(CONFIG_CPU_AARCH64_CORTEX_R) /* As this test case only runs when User Mode is enabled, - * accessing arch_current_thread() always triggers a memory access fault, + * accessing _current always triggers a memory access fault, * and is guaranteed not to trigger SecureFault exceptions. */ - void *a = (void *)arch_current_thread(); + void *a = (void *)_current; #else /* For most arch which support userspace, dereferencing NULL * pointer will be caught by exception. @@ -338,7 +338,7 @@ ZTEST(error_hook_tests, test_catch_assert_in_isr) static void trigger_z_oops(void) { /* Set up a dummy syscall frame, pointing to a valid area in memory. */ - arch_current_thread()->syscall_frame = _image_ram_start; + _current->syscall_frame = _image_ram_start; K_OOPS(true); } From f5aeb44dfecb8af632cdfb4bdf5c5a172a2d3016 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 7 Jan 2025 15:42:07 -0500 Subject: [PATCH 2/3] kernel: move current thread pointer management to core code Define the generic _current directly and get rid of the generic arch_current_get(). The SMP default implementation is now known as z_smp_current_get(). It is no longer inlined which saves significant binary size (about 10% for some random test case I checked). Introduce z_current_thread_set() and use it in place of arch_current_thread_set() for updating the current thread pointer given this is not necessarily an architecture specific operation. The architecture specific optimization, when enabled, should only care about its own things and not have to also update the generic _current_cpu->current copy. Signed-off-by: Nicolas Pitre --- arch/arm/core/cortex_m/thread.c | 2 +- arch/posix/core/swap.c | 4 +- include/zephyr/arch/arch_inlines.h | 2 - include/zephyr/arch/common/arch_inlines.h | 45 ----------------------- include/zephyr/arch/riscv/arch_inlines.h | 7 ++-- include/zephyr/kernel_structs.h | 16 +++++++- kernel/include/kswap.h | 4 +- kernel/sched.c | 4 +- kernel/smp.c | 14 +++++++ 9 files changed, 38 insertions(+), 60 deletions(-) delete mode 100644 include/zephyr/arch/common/arch_inlines.h diff --git a/arch/arm/core/cortex_m/thread.c b/arch/arm/core/cortex_m/thread.c index b67cbe8ee3e33..c88fd8e41a0fe 100644 --- a/arch/arm/core/cortex_m/thread.c +++ b/arch/arm/core/cortex_m/thread.c @@ -522,7 +522,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr, { z_arm_prepare_switch_to_main(); - arch_current_thread_set(main_thread); + z_current_thread_set(main_thread); #if defined(CONFIG_THREAD_LOCAL_STORAGE) /* On Cortex-M, TLS uses a global variable as pointer to diff --git a/arch/posix/core/swap.c b/arch/posix/core/swap.c index 70d2cdedacc55..cf13ab4d4d311 100644 --- a/arch/posix/core/swap.c +++ b/arch/posix/core/swap.c @@ -50,7 +50,7 @@ int arch_swap(unsigned int key) _current->callee_saved.thread_status; - arch_current_thread_set(_kernel.ready_q.cache); + z_current_thread_set(_kernel.ready_q.cache); #if CONFIG_INSTRUMENT_THREAD_SWITCHING z_thread_mark_switched_in(); #endif @@ -94,7 +94,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr, z_thread_mark_switched_out(); #endif - arch_current_thread_set(_kernel.ready_q.cache); + z_current_thread_set(_kernel.ready_q.cache); #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING z_thread_mark_switched_in(); diff --git a/include/zephyr/arch/arch_inlines.h b/include/zephyr/arch/arch_inlines.h index 04c4a649f1e14..0f32159e2f1bf 100644 --- a/include/zephyr/arch/arch_inlines.h +++ b/include/zephyr/arch/arch_inlines.h @@ -34,6 +34,4 @@ #include #endif -#include - #endif /* ZEPHYR_INCLUDE_ARCH_INLINES_H_ */ diff --git a/include/zephyr/arch/common/arch_inlines.h b/include/zephyr/arch/common/arch_inlines.h deleted file mode 100644 index 0490dba71aab5..0000000000000 --- a/include/zephyr/arch/common/arch_inlines.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2024 Meta Platforms. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#ifndef ZEPHYR_INCLUDE_ZEPHYR_ARCH_COMMON_ARCH_INLINES_H_ -#define ZEPHYR_INCLUDE_ZEPHYR_ARCH_COMMON_ARCH_INLINES_H_ - -#ifndef ZEPHYR_INCLUDE_ARCH_INLINES_H_ -#error "This header shouldn't be included directly" -#endif /* ZEPHYR_INCLUDE_ARCH_INLINES_H_ */ - -#ifndef _ASMLANGUAGE - -#include - -#ifndef CONFIG_ARCH_HAS_CUSTOM_CURRENT_IMPL -static ALWAYS_INLINE struct k_thread *arch_current_thread(void) -{ -#ifdef CONFIG_SMP - /* In SMP, _current is a field read from _current_cpu, which - * can race with preemption before it is read. We must lock - * local interrupts when reading it. - */ - unsigned int k = arch_irq_lock(); - - struct k_thread *ret = _current_cpu->current; - - arch_irq_unlock(k); -#else - struct k_thread *ret = _kernel.cpus[0].current; -#endif /* CONFIG_SMP */ - return ret; -} - -static ALWAYS_INLINE void arch_current_thread_set(struct k_thread *thread) -{ - _current_cpu->current = thread; -} -#endif /* CONFIG_ARCH_HAS_CUSTOM_CURRENT_IMPL */ - -#endif /* _ASMLANGUAGE */ - -#endif /* ZEPHYR_INCLUDE_ZEPHYR_ARCH_COMMON_ARCH_INLINES_H_ */ diff --git a/include/zephyr/arch/riscv/arch_inlines.h b/include/zephyr/arch/riscv/arch_inlines.h index c97413a5f8f5b..022879c88d40d 100644 --- a/include/zephyr/arch/riscv/arch_inlines.h +++ b/include/zephyr/arch/riscv/arch_inlines.h @@ -28,13 +28,12 @@ static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void) } #ifdef CONFIG_RISCV_CURRENT_VIA_GP + register struct k_thread *__arch_current_thread __asm__("gp"); #define arch_current_thread() __arch_current_thread -#define arch_current_thread_set(thread) \ - do { \ - __arch_current_thread = _current_cpu->current = (thread); \ - } while (0) +#define arch_current_thread_set(thread) ({ __arch_current_thread = (thread); }) + #endif /* CONFIG_RISCV_CURRENT_VIA_GP */ static ALWAYS_INLINE unsigned int arch_num_cpus(void) diff --git a/include/zephyr/kernel_structs.h b/include/zephyr/kernel_structs.h index 3e2a9fe428560..5cea718f0b4c3 100644 --- a/include/zephyr/kernel_structs.h +++ b/include/zephyr/kernel_structs.h @@ -260,16 +260,28 @@ extern atomic_t _cpus_active; * another SMP CPU. */ bool z_smp_cpu_mobile(void); - #define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \ arch_curr_cpu(); }) -#define _current arch_current_thread() + +struct k_thread *z_smp_current_get(void); +#define _current z_smp_current_get() #else #define _current_cpu (&_kernel.cpus[0]) #define _current _kernel.cpus[0].current #endif +/* This is always invoked from a context where preemption is disabled */ +#define z_current_thread_set(thread) ({ _current_cpu->current = (thread); }) + +#ifdef CONFIG_ARCH_HAS_CUSTOM_CURRENT_IMPL +#undef _current +#define _current arch_current_thread() +#undef z_current_thread_set +#define z_current_thread_set(thread) \ + arch_current_thread_set(({ _current_cpu->current = (thread); })) +#endif + /* kernel wait queue record */ #ifdef CONFIG_WAITQ_SCALABLE diff --git a/kernel/include/kswap.h b/kernel/include/kswap.h index a4e75e7ed6da8..803a4a546eb72 100644 --- a/kernel/include/kswap.h +++ b/kernel/include/kswap.h @@ -133,7 +133,7 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, #endif /* CONFIG_SMP */ z_thread_mark_switched_out(); z_sched_switch_spin(new_thread); - arch_current_thread_set(new_thread); + z_current_thread_set(new_thread); #ifdef CONFIG_TIMESLICING z_reset_time_slice(new_thread); @@ -259,6 +259,6 @@ static inline void z_dummy_thread_init(struct k_thread *dummy_thread) dummy_thread->base.slice_ticks = 0; #endif /* CONFIG_TIMESLICE_PER_THREAD */ - arch_current_thread_set(dummy_thread); + z_current_thread_set(dummy_thread); } #endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */ diff --git a/kernel/sched.c b/kernel/sched.c index 02dc0b699d223..c126792bcbb0b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -797,11 +797,11 @@ struct k_thread *z_swap_next_thread(void) } #ifdef CONFIG_USE_SWITCH -/* Just a wrapper around arch_current_thread_set(xxx) with tracing */ +/* Just a wrapper around z_current_thread_set(xxx) with tracing */ static inline void set_current(struct k_thread *new_thread) { z_thread_mark_switched_out(); - arch_current_thread_set(new_thread); + z_current_thread_set(new_thread); } /** diff --git a/kernel/smp.c b/kernel/smp.c index a56595252789a..f97f1b2a17e3a 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -248,3 +248,17 @@ bool z_smp_cpu_mobile(void) arch_irq_unlock(k); return !pinned; } + +struct k_thread *z_smp_current_get(void) +{ + /* + * _current is a field read from _current_cpu, which can race + * with preemption before it is read. We must lock local + * interrupts when reading it. + */ + unsigned int key = arch_irq_lock(); + struct k_thread *t = _current_cpu->current; + + arch_irq_unlock(key); + return t; +} From 33775a2150e7c34460657b29d5088775580b19da Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Wed, 8 Jan 2025 12:36:37 -0500 Subject: [PATCH 3/3] kernel: mark z_smp_current_get() with the const attribute Repeated references to _current won't produce a different result as the executing thread instance is always the same. Use the const attribute to let the compiler know it may reuse a previously obtained value. This offset the penalty for moving z_smp_current_get() out of line and provides yet more binary size reduction. This change is isolated in its own commit to ease bisecting in case some unexpected misbehavior is eventually observed. Signed-off-by: Nicolas Pitre --- include/zephyr/kernel_structs.h | 2 +- kernel/smp.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/zephyr/kernel_structs.h b/include/zephyr/kernel_structs.h index 5cea718f0b4c3..56df49dcb23db 100644 --- a/include/zephyr/kernel_structs.h +++ b/include/zephyr/kernel_structs.h @@ -263,7 +263,7 @@ bool z_smp_cpu_mobile(void); #define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \ arch_curr_cpu(); }) -struct k_thread *z_smp_current_get(void); +__attribute_const__ struct k_thread *z_smp_current_get(void); #define _current z_smp_current_get() #else diff --git a/kernel/smp.c b/kernel/smp.c index f97f1b2a17e3a..63ac7bc8975e7 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -249,7 +249,7 @@ bool z_smp_cpu_mobile(void) return !pinned; } -struct k_thread *z_smp_current_get(void) +__attribute_const__ struct k_thread *z_smp_current_get(void) { /* * _current is a field read from _current_cpu, which can race