diff --git a/arch/arc/core/fatal.c b/arch/arc/core/fatal.c index f193c0b09f1f2..512d1cc442c6f 100644 --- a/arch/arc/core/fatal.c +++ b/arch/arc/core/fatal.c @@ -23,7 +23,7 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); #ifdef CONFIG_EXCEPTION_DEBUG -static void dump_arc_esf(const z_arch_esf_t *esf) +static void dump_arc_esf(const struct arch_esf *esf) { ARC_EXCEPTION_DUMP(" r0: 0x%" PRIxPTR " r1: 0x%" PRIxPTR " r2: 0x%" PRIxPTR " r3: 0x%" PRIxPTR "", esf->r0, esf->r1, esf->r2, esf->r3); @@ -42,7 +42,7 @@ static void dump_arc_esf(const z_arch_esf_t *esf) } #endif -void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf) +void z_arc_fatal_error(unsigned int reason, const struct arch_esf *esf) { #ifdef CONFIG_EXCEPTION_DEBUG if (esf != NULL) { diff --git a/arch/arc/core/fault.c b/arch/arc/core/fault.c index 763ed7a2c737a..6f9da3cd1e0e9 100644 --- a/arch/arc/core/fault.c +++ b/arch/arc/core/fault.c @@ -346,7 +346,7 @@ static void dump_exception_info(uint32_t vector, uint32_t cause, uint32_t parame * invokes the user provided routine k_sys_fatal_error_handler() which is * responsible for implementing the error handling policy. */ -void _Fault(z_arch_esf_t *esf, uint32_t old_sp) +void _Fault(struct arch_esf *esf, uint32_t old_sp) { uint32_t vector, cause, parameter; uint32_t exc_addr = z_arc_v2_aux_reg_read(_ARC_V2_EFA); diff --git a/arch/arc/include/kernel_arch_data.h b/arch/arc/include/kernel_arch_data.h index efe2bd7d1c658..b0dc733446b08 100644 --- a/arch/arc/include/kernel_arch_data.h +++ b/arch/arc/include/kernel_arch_data.h @@ -36,7 +36,7 @@ extern "C" { #endif #ifdef CONFIG_ARC_HAS_SECURE -struct _irq_stack_frame { +struct arch_esf { #ifdef CONFIG_ARC_HAS_ZOL uintptr_t lp_end; uintptr_t lp_start; @@ -72,7 +72,7 @@ struct _irq_stack_frame { uintptr_t status32; }; #else -struct _irq_stack_frame { +struct arch_esf { uintptr_t r0; uintptr_t r1; uintptr_t r2; @@ -108,7 +108,7 @@ struct _irq_stack_frame { }; #endif -typedef struct _irq_stack_frame _isf_t; +typedef struct arch_esf _isf_t; diff --git a/arch/arc/include/kernel_arch_func.h b/arch/arc/include/kernel_arch_func.h index 1c46423cb4f03..65a497e02d078 100644 --- a/arch/arc/include/kernel_arch_func.h +++ b/arch/arc/include/kernel_arch_func.h @@ -62,7 +62,7 @@ extern void z_arc_userspace_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3, uint32_t stack, uint32_t size, struct k_thread *thread); -extern void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf); +extern void z_arc_fatal_error(unsigned int reason, const struct arch_esf *esf); extern void arch_sched_ipi(void); diff --git a/arch/arm/core/cortex_a_r/fault.c b/arch/arm/core/cortex_a_r/fault.c index a39efeb96e027..d792fc57da8de 100644 --- a/arch/arm/core/cortex_a_r/fault.c +++ b/arch/arm/core/cortex_a_r/fault.c @@ -206,7 +206,7 @@ bool z_arm_fault_undef_instruction_fp(void) * * @return Returns true if the fault is fatal */ -bool z_arm_fault_undef_instruction(z_arch_esf_t *esf) +bool z_arm_fault_undef_instruction(struct arch_esf *esf) { #if defined(CONFIG_FPU_SHARING) /* @@ -243,7 +243,7 @@ bool z_arm_fault_undef_instruction(z_arch_esf_t *esf) * * @return Returns true if the fault is fatal */ -bool z_arm_fault_prefetch(z_arch_esf_t *esf) +bool z_arm_fault_prefetch(struct arch_esf *esf) { uint32_t reason = K_ERR_CPU_EXCEPTION; @@ -299,7 +299,7 @@ static const struct z_exc_handle exceptions[] = { * * @return true if error is recoverable, otherwise return false. */ -static bool memory_fault_recoverable(z_arch_esf_t *esf) +static bool memory_fault_recoverable(struct arch_esf *esf) { for (int i = 0; i < ARRAY_SIZE(exceptions); i++) { /* Mask out instruction mode */ @@ -321,7 +321,7 @@ static bool memory_fault_recoverable(z_arch_esf_t *esf) * * @return Returns true if the fault is fatal */ -bool z_arm_fault_data(z_arch_esf_t *esf) +bool z_arm_fault_data(struct arch_esf *esf) { uint32_t reason = K_ERR_CPU_EXCEPTION; diff --git a/arch/arm/core/cortex_a_r/irq_manage.c b/arch/arm/core/cortex_a_r/irq_manage.c index 1dca75e297ab0..48c9ede3327bb 100644 --- a/arch/arm/core/cortex_a_r/irq_manage.c +++ b/arch/arm/core/cortex_a_r/irq_manage.c @@ -71,7 +71,7 @@ void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags) } #endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */ -void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf); +void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf); /** * diff --git a/arch/arm/core/cortex_m/coredump.c b/arch/arm/core/cortex_m/coredump.c index 2b4a86a1bb9b2..c688c91d9819d 100644 --- a/arch/arm/core/cortex_m/coredump.c +++ b/arch/arm/core/cortex_m/coredump.c @@ -41,7 +41,7 @@ struct arm_arch_block { */ static struct arm_arch_block arch_blk; -void arch_coredump_info_dump(const z_arch_esf_t *esf) +void arch_coredump_info_dump(const struct arch_esf *esf) { struct coredump_arch_hdr_t hdr = { .id = COREDUMP_ARCH_HDR_ID, diff --git a/arch/arm/core/cortex_m/fault.c b/arch/arm/core/cortex_m/fault.c index 5090381fa317d..78b87092976f6 100644 --- a/arch/arm/core/cortex_m/fault.c +++ b/arch/arm/core/cortex_m/fault.c @@ -146,7 +146,7 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); */ #if (CONFIG_FAULT_DUMP == 1) -static void fault_show(const z_arch_esf_t *esf, int fault) +static void fault_show(const struct arch_esf *esf, int fault) { PR_EXC("Fault! EXC #%d", fault); @@ -165,7 +165,7 @@ static void fault_show(const z_arch_esf_t *esf, int fault) * * For Dump level 0, no information needs to be generated. */ -static void fault_show(const z_arch_esf_t *esf, int fault) +static void fault_show(const struct arch_esf *esf, int fault) { (void)esf; (void)fault; @@ -185,7 +185,7 @@ static const struct z_exc_handle exceptions[] = { * * @return true if error is recoverable, otherwise return false. */ -static bool memory_fault_recoverable(z_arch_esf_t *esf, bool synchronous) +static bool memory_fault_recoverable(struct arch_esf *esf, bool synchronous) { #ifdef CONFIG_USERSPACE for (int i = 0; i < ARRAY_SIZE(exceptions); i++) { @@ -228,7 +228,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, * * @return error code to identify the fatal error reason */ -static uint32_t mem_manage_fault(z_arch_esf_t *esf, int from_hard_fault, +static uint32_t mem_manage_fault(struct arch_esf *esf, int from_hard_fault, bool *recoverable) { uint32_t reason = K_ERR_ARM_MEM_GENERIC; @@ -387,7 +387,7 @@ static uint32_t mem_manage_fault(z_arch_esf_t *esf, int from_hard_fault, * @return error code to identify the fatal error reason. * */ -static int bus_fault(z_arch_esf_t *esf, int from_hard_fault, bool *recoverable) +static int bus_fault(struct arch_esf *esf, int from_hard_fault, bool *recoverable) { uint32_t reason = K_ERR_ARM_BUS_GENERIC; @@ -549,7 +549,7 @@ static int bus_fault(z_arch_esf_t *esf, int from_hard_fault, bool *recoverable) * * @return error code to identify the fatal error reason */ -static uint32_t usage_fault(const z_arch_esf_t *esf) +static uint32_t usage_fault(const struct arch_esf *esf) { uint32_t reason = K_ERR_ARM_USAGE_GENERIC; @@ -612,7 +612,7 @@ static uint32_t usage_fault(const z_arch_esf_t *esf) * * @return error code to identify the fatal error reason */ -static uint32_t secure_fault(const z_arch_esf_t *esf) +static uint32_t secure_fault(const struct arch_esf *esf) { uint32_t reason = K_ERR_ARM_SECURE_GENERIC; @@ -661,7 +661,7 @@ static uint32_t secure_fault(const z_arch_esf_t *esf) * See z_arm_fault_dump() for example. * */ -static void debug_monitor(z_arch_esf_t *esf, bool *recoverable) +static void debug_monitor(struct arch_esf *esf, bool *recoverable) { *recoverable = false; @@ -687,7 +687,7 @@ static void debug_monitor(z_arch_esf_t *esf, bool *recoverable) #error Unknown ARM architecture #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ -static inline bool z_arm_is_synchronous_svc(z_arch_esf_t *esf) +static inline bool z_arm_is_synchronous_svc(struct arch_esf *esf) { uint16_t *ret_addr = (uint16_t *)esf->basic.pc; /* SVC is a 16-bit instruction. On a synchronous SVC @@ -762,7 +762,7 @@ static inline bool z_arm_is_pc_valid(uintptr_t pc) * * @return error code to identify the fatal error reason */ -static uint32_t hard_fault(z_arch_esf_t *esf, bool *recoverable) +static uint32_t hard_fault(struct arch_esf *esf, bool *recoverable) { uint32_t reason = K_ERR_CPU_EXCEPTION; @@ -829,7 +829,7 @@ static uint32_t hard_fault(z_arch_esf_t *esf, bool *recoverable) * See z_arm_fault_dump() for example. * */ -static void reserved_exception(const z_arch_esf_t *esf, int fault) +static void reserved_exception(const struct arch_esf *esf, int fault) { ARG_UNUSED(esf); @@ -839,7 +839,7 @@ static void reserved_exception(const z_arch_esf_t *esf, int fault) } /* Handler function for ARM fault conditions. */ -static uint32_t fault_handle(z_arch_esf_t *esf, int fault, bool *recoverable) +static uint32_t fault_handle(struct arch_esf *esf, int fault, bool *recoverable) { uint32_t reason = K_ERR_CPU_EXCEPTION; @@ -893,7 +893,7 @@ static uint32_t fault_handle(z_arch_esf_t *esf, int fault, bool *recoverable) * * @param secure_esf Pointer to the secure stack frame. */ -static void secure_stack_dump(const z_arch_esf_t *secure_esf) +static void secure_stack_dump(const struct arch_esf *secure_esf) { /* * In case a Non-Secure exception interrupted the Secure @@ -918,7 +918,7 @@ static void secure_stack_dump(const z_arch_esf_t *secure_esf) * Non-Secure exception entry. */ top_of_sec_stack += ADDITIONAL_STATE_CONTEXT_WORDS; - secure_esf = (const z_arch_esf_t *)top_of_sec_stack; + secure_esf = (const struct arch_esf *)top_of_sec_stack; sec_ret_addr = secure_esf->basic.pc; } else { /* Exception during Non-Secure function call. @@ -947,11 +947,11 @@ static void secure_stack_dump(const z_arch_esf_t *secure_esf) * * @return ESF pointer on success, otherwise return NULL */ -static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_return, +static inline struct arch_esf *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_return, bool *nested_exc) { bool alternative_state_exc = false; - z_arch_esf_t *ptr_esf = NULL; + struct arch_esf *ptr_esf = NULL; *nested_exc = false; @@ -979,14 +979,14 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret alternative_state_exc = true; /* Dump the Secure stack before handling the actual fault. */ - z_arch_esf_t *secure_esf; + struct arch_esf *secure_esf; if (exc_return & EXC_RETURN_SPSEL_PROCESS) { /* Secure stack pointed by PSP */ - secure_esf = (z_arch_esf_t *)psp; + secure_esf = (struct arch_esf *)psp; } else { /* Secure stack pointed by MSP */ - secure_esf = (z_arch_esf_t *)msp; + secure_esf = (struct arch_esf *)msp; *nested_exc = true; } @@ -997,9 +997,9 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret * and supply it to the fault handing function. */ if (exc_return & EXC_RETURN_MODE_THREAD) { - ptr_esf = (z_arch_esf_t *)__TZ_get_PSP_NS(); + ptr_esf = (struct arch_esf *)__TZ_get_PSP_NS(); } else { - ptr_esf = (z_arch_esf_t *)__TZ_get_MSP_NS(); + ptr_esf = (struct arch_esf *)__TZ_get_MSP_NS(); } } #elif defined(CONFIG_ARM_NONSECURE_FIRMWARE) @@ -1024,10 +1024,10 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret if (exc_return & EXC_RETURN_SPSEL_PROCESS) { /* Non-Secure stack frame on PSP */ - ptr_esf = (z_arch_esf_t *)psp; + ptr_esf = (struct arch_esf *)psp; } else { /* Non-Secure stack frame on MSP */ - ptr_esf = (z_arch_esf_t *)msp; + ptr_esf = (struct arch_esf *)msp; } } else { /* Exception entry occurred in Non-Secure stack. */ @@ -1046,11 +1046,11 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret if (!alternative_state_exc) { if (exc_return & EXC_RETURN_MODE_THREAD) { /* Returning to thread mode */ - ptr_esf = (z_arch_esf_t *)psp; + ptr_esf = (struct arch_esf *)psp; } else { /* Returning to handler mode */ - ptr_esf = (z_arch_esf_t *)msp; + ptr_esf = (struct arch_esf *)msp; *nested_exc = true; } } @@ -1095,12 +1095,12 @@ void z_arm_fault(uint32_t msp, uint32_t psp, uint32_t exc_return, uint32_t reason = K_ERR_CPU_EXCEPTION; int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk; bool recoverable, nested_exc; - z_arch_esf_t *esf; + struct arch_esf *esf; /* Create a stack-ed copy of the ESF to be used during * the fault handling process. */ - z_arch_esf_t esf_copy; + struct arch_esf esf_copy; /* Force unlock interrupts */ arch_irq_unlock(0); @@ -1123,13 +1123,13 @@ void z_arm_fault(uint32_t msp, uint32_t psp, uint32_t exc_return, /* Copy ESF */ #if !defined(CONFIG_EXTRA_EXCEPTION_INFO) - memcpy(&esf_copy, esf, sizeof(z_arch_esf_t)); + memcpy(&esf_copy, esf, sizeof(struct arch_esf)); ARG_UNUSED(callee_regs); #else /* the extra exception info is not present in the original esf * so we only copy the fields before those. */ - memcpy(&esf_copy, esf, offsetof(z_arch_esf_t, extra_info)); + memcpy(&esf_copy, esf, offsetof(struct arch_esf, extra_info)); esf_copy.extra_info = (struct __extra_esf_info) { .callee = callee_regs, .exc_return = exc_return, diff --git a/arch/arm/core/cortex_m/irq_manage.c b/arch/arm/core/cortex_m/irq_manage.c index b72332b32e81a..f2030ec4ab020 100644 --- a/arch/arm/core/cortex_m/irq_manage.c +++ b/arch/arm/core/cortex_m/irq_manage.c @@ -94,7 +94,7 @@ void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags) #endif /* !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) */ -void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf); +void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf); /** * diff --git a/arch/arm/core/fatal.c b/arch/arm/core/fatal.c index 4364d48d45d57..4532e238f05c9 100644 --- a/arch/arm/core/fatal.c +++ b/arch/arm/core/fatal.c @@ -18,7 +18,7 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); #ifdef CONFIG_EXCEPTION_DEBUG -static void esf_dump(const z_arch_esf_t *esf) +static void esf_dump(const struct arch_esf *esf) { LOG_ERR("r0/a1: 0x%08x r1/a2: 0x%08x r2/a3: 0x%08x", esf->basic.a1, esf->basic.a2, esf->basic.a3); @@ -66,7 +66,7 @@ static void esf_dump(const z_arch_esf_t *esf) } #endif /* CONFIG_EXCEPTION_DEBUG */ -void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf) +void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf) { #ifdef CONFIG_EXCEPTION_DEBUG if (esf != NULL) { @@ -102,7 +102,7 @@ void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf) * @param esf exception frame * @param callee_regs Callee-saved registers (R4-R11) */ -void z_do_kernel_oops(const z_arch_esf_t *esf, _callee_saved_t *callee_regs) +void z_do_kernel_oops(const struct arch_esf *esf, _callee_saved_t *callee_regs) { #if !(defined(CONFIG_EXTRA_EXCEPTION_INFO) && defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)) ARG_UNUSED(callee_regs); @@ -130,9 +130,9 @@ void z_do_kernel_oops(const z_arch_esf_t *esf, _callee_saved_t *callee_regs) #if !defined(CONFIG_EXTRA_EXCEPTION_INFO) z_arm_fatal_error(reason, esf); #else - z_arch_esf_t esf_copy; + struct arch_esf esf_copy; - memcpy(&esf_copy, esf, offsetof(z_arch_esf_t, extra_info)); + memcpy(&esf_copy, esf, offsetof(struct arch_esf, extra_info)); #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) /* extra exception info is collected in callee_reg param * on CONFIG_ARMV7_M_ARMV8_M_MAINLINE @@ -156,7 +156,7 @@ void z_do_kernel_oops(const z_arch_esf_t *esf, _callee_saved_t *callee_regs) FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr) { uint32_t *ssf_contents = ssf_ptr; - z_arch_esf_t oops_esf = { 0 }; + struct arch_esf oops_esf = { 0 }; /* TODO: Copy the rest of the register set out of ssf_ptr */ oops_esf.basic.pc = ssf_contents[3]; diff --git a/arch/arm/core/gdbstub.c b/arch/arm/core/gdbstub.c index 5386cfa619f1a..60d16b78c319c 100644 --- a/arch/arm/core/gdbstub.c +++ b/arch/arm/core/gdbstub.c @@ -42,7 +42,7 @@ static int is_bkpt(unsigned int exc_cause) } /* Wrapper function to save and restore execution c */ -void z_gdb_entry(z_arch_esf_t *esf, unsigned int exc_cause) +void z_gdb_entry(struct arch_esf *esf, unsigned int exc_cause) { /* Disable the hardware breakpoint in case it was set */ __asm__ volatile("mcr p14, 0, %0, c0, c0, 5" ::"r"(0x0) :); diff --git a/arch/arm/include/cortex_a_r/exception.h b/arch/arm/include/cortex_a_r/exception.h index 7519016176c5e..6daa9c106ee2b 100644 --- a/arch/arm/include/cortex_a_r/exception.h +++ b/arch/arm/include/cortex_a_r/exception.h @@ -38,7 +38,7 @@ static ALWAYS_INLINE bool arch_is_in_isr(void) return (arch_curr_cpu()->nested != 0U); } -static ALWAYS_INLINE bool arch_is_in_nested_exception(const z_arch_esf_t *esf) +static ALWAYS_INLINE bool arch_is_in_nested_exception(const struct arch_esf *esf) { return (arch_curr_cpu()->arch.exc_depth > 1U) ? (true) : (false); } @@ -48,7 +48,7 @@ static ALWAYS_INLINE bool arch_is_in_nested_exception(const z_arch_esf_t *esf) * This function is used by privileged code to determine if the thread * associated with the stack frame is in user mode. */ -static ALWAYS_INLINE bool z_arm_preempted_thread_in_user_mode(const z_arch_esf_t *esf) +static ALWAYS_INLINE bool z_arm_preempted_thread_in_user_mode(const struct arch_esf *esf) { return ((esf->basic.xpsr & CPSR_M_Msk) == CPSR_M_USR); } diff --git a/arch/arm/include/cortex_a_r/kernel_arch_func.h b/arch/arm/include/cortex_a_r/kernel_arch_func.h index 88f631ff4b487..3486d7d4d4e02 100644 --- a/arch/arm/include/cortex_a_r/kernel_arch_func.h +++ b/arch/arm/include/cortex_a_r/kernel_arch_func.h @@ -59,7 +59,7 @@ extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry, uint32_t stack_end, uint32_t stack_start); -extern void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf); +extern void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf); #endif /* _ASMLANGUAGE */ diff --git a/arch/arm/include/cortex_m/exception.h b/arch/arm/include/cortex_m/exception.h index bf86abd77c70f..89bdd4b83e9a2 100644 --- a/arch/arm/include/cortex_m/exception.h +++ b/arch/arm/include/cortex_m/exception.h @@ -68,7 +68,7 @@ static ALWAYS_INLINE bool arch_is_in_isr(void) * @return true if execution state was in handler mode, before * the current exception occurred, otherwise false. */ -static ALWAYS_INLINE bool arch_is_in_nested_exception(const z_arch_esf_t *esf) +static ALWAYS_INLINE bool arch_is_in_nested_exception(const struct arch_esf *esf) { return (esf->basic.xpsr & IPSR_ISR_Msk) ? (true) : (false); } @@ -80,7 +80,7 @@ static ALWAYS_INLINE bool arch_is_in_nested_exception(const z_arch_esf_t *esf) * @param esf the exception stack frame (unused) * @return true if the current thread was in unprivileged mode */ -static ALWAYS_INLINE bool z_arm_preempted_thread_in_user_mode(const z_arch_esf_t *esf) +static ALWAYS_INLINE bool z_arm_preempted_thread_in_user_mode(const struct arch_esf *esf) { return z_arm_thread_is_in_user_mode(); } diff --git a/arch/arm/include/cortex_m/kernel_arch_func.h b/arch/arm/include/cortex_m/kernel_arch_func.h index 77619c9d6c4f8..132c056c91022 100644 --- a/arch/arm/include/cortex_m/kernel_arch_func.h +++ b/arch/arm/include/cortex_m/kernel_arch_func.h @@ -76,7 +76,7 @@ extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry, uint32_t stack_end, uint32_t stack_start); -extern void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf); +extern void z_arm_fatal_error(unsigned int reason, const struct arch_esf *esf); #endif /* _ASMLANGUAGE */ diff --git a/arch/arm/include/kernel_arch_data.h b/arch/arm/include/kernel_arch_data.h index 5ad19db8f84b8..9b4ca04f66c56 100644 --- a/arch/arm/include/kernel_arch_data.h +++ b/arch/arm/include/kernel_arch_data.h @@ -42,7 +42,7 @@ extern "C" { #endif -typedef struct __esf _esf_t; +typedef struct arch_esf _esf_t; typedef struct __basic_sf _basic_sf_t; #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) typedef struct __fpu_sf _fpu_sf_t; diff --git a/arch/arm64/core/coredump.c b/arch/arm64/core/coredump.c index 399cf85e3d0f4..0176b61612ec1 100644 --- a/arch/arm64/core/coredump.c +++ b/arch/arm64/core/coredump.c @@ -13,7 +13,7 @@ #define ARCH_HDR_VER 1 /* Structure to store the architecture registers passed arch_coredump_info_dump - * As callee saved registers are not provided in z_arch_esf_t structure in Zephyr + * As callee saved registers are not provided in struct arch_esf structure in Zephyr * we just need 22 registers. */ struct arm64_arch_block { @@ -50,7 +50,7 @@ struct arm64_arch_block { */ static struct arm64_arch_block arch_blk; -void arch_coredump_info_dump(const z_arch_esf_t *esf) +void arch_coredump_info_dump(const struct arch_esf *esf) { /* Target architecture information header */ /* Information just relevant to the python parser */ @@ -69,7 +69,7 @@ void arch_coredump_info_dump(const z_arch_esf_t *esf) /* * Copies the thread registers to a memory block that will be printed out - * The thread registers are already provided by structure z_arch_esf_t + * The thread registers are already provided by structure struct arch_esf */ arch_blk.r.x0 = esf->x0; arch_blk.r.x1 = esf->x1; diff --git a/arch/arm64/core/fatal.c b/arch/arm64/core/fatal.c index f921c4ccf5aa6..a02ae13acf89c 100644 --- a/arch/arm64/core/fatal.c +++ b/arch/arm64/core/fatal.c @@ -181,7 +181,7 @@ static void dump_esr(uint64_t esr, bool *dump_far) LOG_ERR(" ISS: 0x%llx", GET_ESR_ISS(esr)); } -static void esf_dump(const z_arch_esf_t *esf) +static void esf_dump(const struct arch_esf *esf) { LOG_ERR("x0: 0x%016llx x1: 0x%016llx", esf->x0, esf->x1); LOG_ERR("x2: 0x%016llx x3: 0x%016llx", esf->x2, esf->x3); @@ -196,7 +196,7 @@ static void esf_dump(const z_arch_esf_t *esf) } #ifdef CONFIG_EXCEPTION_STACK_TRACE -static void esf_unwind(const z_arch_esf_t *esf) +static void esf_unwind(const struct arch_esf *esf) { /* * For GCC: @@ -244,7 +244,7 @@ static void esf_unwind(const z_arch_esf_t *esf) #endif /* CONFIG_EXCEPTION_DEBUG */ #ifdef CONFIG_ARM64_STACK_PROTECTION -static bool z_arm64_stack_corruption_check(z_arch_esf_t *esf, uint64_t esr, uint64_t far) +static bool z_arm64_stack_corruption_check(struct arch_esf *esf, uint64_t esr, uint64_t far) { uint64_t sp, sp_limit, guard_start; /* 0x25 means data abort from current EL */ @@ -284,7 +284,7 @@ static bool z_arm64_stack_corruption_check(z_arch_esf_t *esf, uint64_t esr, uint } #endif -static bool is_recoverable(z_arch_esf_t *esf, uint64_t esr, uint64_t far, +static bool is_recoverable(struct arch_esf *esf, uint64_t esr, uint64_t far, uint64_t elr) { if (!esf) @@ -306,7 +306,7 @@ static bool is_recoverable(z_arch_esf_t *esf, uint64_t esr, uint64_t far, return false; } -void z_arm64_fatal_error(unsigned int reason, z_arch_esf_t *esf) +void z_arm64_fatal_error(unsigned int reason, struct arch_esf *esf) { uint64_t esr = 0; uint64_t elr = 0; @@ -379,7 +379,7 @@ void z_arm64_fatal_error(unsigned int reason, z_arch_esf_t *esf) * * @param esf exception frame */ -void z_arm64_do_kernel_oops(z_arch_esf_t *esf) +void z_arm64_do_kernel_oops(struct arch_esf *esf) { /* x8 holds the exception reason */ unsigned int reason = esf->x8; diff --git a/arch/arm64/core/fpu.c b/arch/arm64/core/fpu.c index 0133eed2dcaaf..a585165b94339 100644 --- a/arch/arm64/core/fpu.c +++ b/arch/arm64/core/fpu.c @@ -159,7 +159,7 @@ void z_arm64_fpu_enter_exc(void) * simulate them and leave the FPU access disabled. This also avoids the * need for disabling interrupts in syscalls and IRQ handlers as well. */ -static bool simulate_str_q_insn(z_arch_esf_t *esf) +static bool simulate_str_q_insn(struct arch_esf *esf) { /* * Support only the "FP in exception" cases for now. @@ -221,7 +221,7 @@ static bool simulate_str_q_insn(z_arch_esf_t *esf) * don't get interrupted that is. To ensure that we mask interrupts to * the triggering exception context. */ -void z_arm64_fpu_trap(z_arch_esf_t *esf) +void z_arm64_fpu_trap(struct arch_esf *esf) { __ASSERT(read_daif() & DAIF_IRQ_BIT, "must be called with IRQs disabled"); diff --git a/arch/arm64/core/irq_manage.c b/arch/arm64/core/irq_manage.c index 4e96ce77bfa20..6344d1e3696c0 100644 --- a/arch/arm64/core/irq_manage.c +++ b/arch/arm64/core/irq_manage.c @@ -18,7 +18,7 @@ #include #include -void z_arm64_fatal_error(unsigned int reason, z_arch_esf_t *esf); +void z_arm64_fatal_error(unsigned int reason, struct arch_esf *esf); #if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER) /* diff --git a/arch/arm64/core/thread.c b/arch/arm64/core/thread.c index a0269501c19ac..18f49945eda49 100644 --- a/arch/arm64/core/thread.c +++ b/arch/arm64/core/thread.c @@ -87,7 +87,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, void *p1, void *p2, void *p3) { extern void z_arm64_exit_exc(void); - z_arch_esf_t *pInitCtx; + struct arch_esf *pInitCtx; /* * Clean the thread->arch to avoid unexpected behavior because the @@ -102,7 +102,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, * dropping into EL0. */ - pInitCtx = Z_STACK_PTR_TO_FRAME(struct __esf, stack_ptr); + pInitCtx = Z_STACK_PTR_TO_FRAME(struct arch_esf, stack_ptr); pInitCtx->x0 = (uint64_t)entry; pInitCtx->x1 = (uint64_t)p1; diff --git a/arch/arm64/include/kernel_arch_data.h b/arch/arm64/include/kernel_arch_data.h index ec781fc902dd5..8b607c1dbf47d 100644 --- a/arch/arm64/include/kernel_arch_data.h +++ b/arch/arm64/include/kernel_arch_data.h @@ -36,7 +36,7 @@ extern "C" { #endif -typedef struct __esf _esf_t; +typedef struct arch_esf _esf_t; typedef struct __basic_sf _basic_sf_t; #ifdef __cplusplus diff --git a/arch/arm64/include/kernel_arch_func.h b/arch/arm64/include/kernel_arch_func.h index a5c3d59d87a6f..cc91abc3b49f3 100644 --- a/arch/arm64/include/kernel_arch_func.h +++ b/arch/arm64/include/kernel_arch_func.h @@ -43,7 +43,7 @@ static inline void arch_switch(void *switch_to, void **switched_from) z_arm64_context_switch(new, old); } -extern void z_arm64_fatal_error(unsigned int reason, z_arch_esf_t *esf); +extern void z_arm64_fatal_error(unsigned int reason, struct arch_esf *esf); extern void z_arm64_set_ttbr0(uint64_t ttbr0); extern void z_arm64_mem_cfg_ipi(void); diff --git a/arch/mips/core/fatal.c b/arch/mips/core/fatal.c index 16011241666ab..a53e5bb0f5e6c 100644 --- a/arch/mips/core/fatal.c +++ b/arch/mips/core/fatal.c @@ -9,7 +9,7 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); FUNC_NORETURN void z_mips_fatal_error(unsigned int reason, - const z_arch_esf_t *esf) + const struct arch_esf *esf) { #ifdef CONFIG_EXCEPTION_DEBUG if (esf != NULL) { @@ -84,7 +84,7 @@ static char *cause_str(unsigned long cause) } } -void _Fault(z_arch_esf_t *esf) +void _Fault(struct arch_esf *esf) { unsigned long cause; diff --git a/arch/mips/core/isr.S b/arch/mips/core/isr.S index 44babb2149be2..86d05d1983356 100644 --- a/arch/mips/core/isr.S +++ b/arch/mips/core/isr.S @@ -14,7 +14,7 @@ #include #include -#define ESF_O(FIELD) __z_arch_esf_t_##FIELD##_OFFSET +#define ESF_O(FIELD) __struct_arch_esf_##FIELD##_OFFSET #define THREAD_O(FIELD) _thread_offset_to_##FIELD /* Convenience macros for loading/storing register states. */ @@ -58,12 +58,12 @@ op v1, ESF_O(v1)(sp) ; #define STORE_CALLER_SAVED() \ - addi sp, sp, -__z_arch_esf_t_SIZEOF ;\ + addi sp, sp, -__struct_arch_esf_SIZEOF ;\ DO_CALLER_SAVED(OP_STOREREG) ; #define LOAD_CALLER_SAVED() \ DO_CALLER_SAVED(OP_LOADREG) ;\ - addi sp, sp, __z_arch_esf_t_SIZEOF ; + addi sp, sp, __struct_arch_esf_SIZEOF ; /* imports */ GTEXT(_Fault) diff --git a/arch/mips/core/offsets/offsets.c b/arch/mips/core/offsets/offsets.c index 24b477e9558ea..c70ce3c39fc7e 100644 --- a/arch/mips/core/offsets/offsets.c +++ b/arch/mips/core/offsets/offsets.c @@ -23,32 +23,32 @@ GEN_OFFSET_SYM(_callee_saved_t, s6); GEN_OFFSET_SYM(_callee_saved_t, s7); GEN_OFFSET_SYM(_callee_saved_t, s8); -GEN_OFFSET_SYM(z_arch_esf_t, ra); -GEN_OFFSET_SYM(z_arch_esf_t, gp); -GEN_OFFSET_SYM(z_arch_esf_t, t0); -GEN_OFFSET_SYM(z_arch_esf_t, t1); -GEN_OFFSET_SYM(z_arch_esf_t, t2); -GEN_OFFSET_SYM(z_arch_esf_t, t3); -GEN_OFFSET_SYM(z_arch_esf_t, t4); -GEN_OFFSET_SYM(z_arch_esf_t, t5); -GEN_OFFSET_SYM(z_arch_esf_t, t6); -GEN_OFFSET_SYM(z_arch_esf_t, t7); -GEN_OFFSET_SYM(z_arch_esf_t, t8); -GEN_OFFSET_SYM(z_arch_esf_t, t9); -GEN_OFFSET_SYM(z_arch_esf_t, a0); -GEN_OFFSET_SYM(z_arch_esf_t, a1); -GEN_OFFSET_SYM(z_arch_esf_t, a2); -GEN_OFFSET_SYM(z_arch_esf_t, a3); -GEN_OFFSET_SYM(z_arch_esf_t, v0); -GEN_OFFSET_SYM(z_arch_esf_t, v1); -GEN_OFFSET_SYM(z_arch_esf_t, at); -GEN_OFFSET_SYM(z_arch_esf_t, epc); -GEN_OFFSET_SYM(z_arch_esf_t, badvaddr); -GEN_OFFSET_SYM(z_arch_esf_t, hi); -GEN_OFFSET_SYM(z_arch_esf_t, lo); -GEN_OFFSET_SYM(z_arch_esf_t, status); -GEN_OFFSET_SYM(z_arch_esf_t, cause); +GEN_OFFSET_STRUCT(arch_esf, ra); +GEN_OFFSET_STRUCT(arch_esf, gp); +GEN_OFFSET_STRUCT(arch_esf, t0); +GEN_OFFSET_STRUCT(arch_esf, t1); +GEN_OFFSET_STRUCT(arch_esf, t2); +GEN_OFFSET_STRUCT(arch_esf, t3); +GEN_OFFSET_STRUCT(arch_esf, t4); +GEN_OFFSET_STRUCT(arch_esf, t5); +GEN_OFFSET_STRUCT(arch_esf, t6); +GEN_OFFSET_STRUCT(arch_esf, t7); +GEN_OFFSET_STRUCT(arch_esf, t8); +GEN_OFFSET_STRUCT(arch_esf, t9); +GEN_OFFSET_STRUCT(arch_esf, a0); +GEN_OFFSET_STRUCT(arch_esf, a1); +GEN_OFFSET_STRUCT(arch_esf, a2); +GEN_OFFSET_STRUCT(arch_esf, a3); +GEN_OFFSET_STRUCT(arch_esf, v0); +GEN_OFFSET_STRUCT(arch_esf, v1); +GEN_OFFSET_STRUCT(arch_esf, at); +GEN_OFFSET_STRUCT(arch_esf, epc); +GEN_OFFSET_STRUCT(arch_esf, badvaddr); +GEN_OFFSET_STRUCT(arch_esf, hi); +GEN_OFFSET_STRUCT(arch_esf, lo); +GEN_OFFSET_STRUCT(arch_esf, status); +GEN_OFFSET_STRUCT(arch_esf, cause); -GEN_ABSOLUTE_SYM(__z_arch_esf_t_SIZEOF, STACK_ROUND_UP(sizeof(z_arch_esf_t))); +GEN_ABSOLUTE_SYM(__struct_arch_esf_SIZEOF, STACK_ROUND_UP(sizeof(struct arch_esf))); GEN_ABS_SYM_END diff --git a/arch/mips/core/thread.c b/arch/mips/core/thread.c index e551674d5215d..7966ff462f5fd 100644 --- a/arch/mips/core/thread.c +++ b/arch/mips/core/thread.c @@ -19,11 +19,11 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *stack_ptr, k_thread_entry_t entry, void *p1, void *p2, void *p3) { - struct __esf *stack_init; + struct arch_esf *stack_init; /* Initial stack frame for thread */ - stack_init = (struct __esf *)Z_STACK_PTR_ALIGN( - Z_STACK_PTR_TO_FRAME(struct __esf, stack_ptr) + stack_init = (struct arch_esf *)Z_STACK_PTR_ALIGN( + Z_STACK_PTR_TO_FRAME(struct arch_esf, stack_ptr) ); /* Setup the initial stack frame */ diff --git a/arch/mips/include/kernel_arch_func.h b/arch/mips/include/kernel_arch_func.h index ad89f75dd7f14..b01cc1a4c65da 100644 --- a/arch/mips/include/kernel_arch_func.h +++ b/arch/mips/include/kernel_arch_func.h @@ -35,7 +35,7 @@ arch_thread_return_value_set(struct k_thread *thread, unsigned int value) } FUNC_NORETURN void z_mips_fatal_error(unsigned int reason, - const z_arch_esf_t *esf); + const struct arch_esf *esf); static inline bool arch_is_in_isr(void) { diff --git a/arch/nios2/core/exception.S b/arch/nios2/core/exception.S index 6b003262bb04d..ab2d3463dd437 100644 --- a/arch/nios2/core/exception.S +++ b/arch/nios2/core/exception.S @@ -35,35 +35,35 @@ GTEXT(_offload_routine) */ SECTION_FUNC(exception.entry, _exception) /* Reserve thread stack space for saving context */ - subi sp, sp, __z_arch_esf_t_SIZEOF + subi sp, sp, __struct_arch_esf_SIZEOF /* Preserve all caller-saved registers onto the thread's stack */ - stw ra, __z_arch_esf_t_ra_OFFSET(sp) - stw r1, __z_arch_esf_t_r1_OFFSET(sp) - stw r2, __z_arch_esf_t_r2_OFFSET(sp) - stw r3, __z_arch_esf_t_r3_OFFSET(sp) - stw r4, __z_arch_esf_t_r4_OFFSET(sp) - stw r5, __z_arch_esf_t_r5_OFFSET(sp) - stw r6, __z_arch_esf_t_r6_OFFSET(sp) - stw r7, __z_arch_esf_t_r7_OFFSET(sp) - stw r8, __z_arch_esf_t_r8_OFFSET(sp) - stw r9, __z_arch_esf_t_r9_OFFSET(sp) - stw r10, __z_arch_esf_t_r10_OFFSET(sp) - stw r11, __z_arch_esf_t_r11_OFFSET(sp) - stw r12, __z_arch_esf_t_r12_OFFSET(sp) - stw r13, __z_arch_esf_t_r13_OFFSET(sp) - stw r14, __z_arch_esf_t_r14_OFFSET(sp) - stw r15, __z_arch_esf_t_r15_OFFSET(sp) + stw ra, __struct_arch_esf_ra_OFFSET(sp) + stw r1, __struct_arch_esf_r1_OFFSET(sp) + stw r2, __struct_arch_esf_r2_OFFSET(sp) + stw r3, __struct_arch_esf_r3_OFFSET(sp) + stw r4, __struct_arch_esf_r4_OFFSET(sp) + stw r5, __struct_arch_esf_r5_OFFSET(sp) + stw r6, __struct_arch_esf_r6_OFFSET(sp) + stw r7, __struct_arch_esf_r7_OFFSET(sp) + stw r8, __struct_arch_esf_r8_OFFSET(sp) + stw r9, __struct_arch_esf_r9_OFFSET(sp) + stw r10, __struct_arch_esf_r10_OFFSET(sp) + stw r11, __struct_arch_esf_r11_OFFSET(sp) + stw r12, __struct_arch_esf_r12_OFFSET(sp) + stw r13, __struct_arch_esf_r13_OFFSET(sp) + stw r14, __struct_arch_esf_r14_OFFSET(sp) + stw r15, __struct_arch_esf_r15_OFFSET(sp) /* Store value of estatus control register */ rdctl et, estatus - stw et, __z_arch_esf_t_estatus_OFFSET(sp) + stw et, __struct_arch_esf_estatus_OFFSET(sp) /* ea-4 is the address of the instruction when the exception happened, * put this in the stack frame as well */ addi r15, ea, -4 - stw r15, __z_arch_esf_t_instr_OFFSET(sp) + stw r15, __struct_arch_esf_instr_OFFSET(sp) /* Figure out whether we are here because of an interrupt or an * exception. If an interrupt, switch stacks and enter IRQ handling @@ -157,7 +157,7 @@ not_interrupt: * * We earlier put ea - 4 in the stack frame, replace it with just ea */ - stw ea, __z_arch_esf_t_instr_OFFSET(sp) + stw ea, __struct_arch_esf_instr_OFFSET(sp) #ifdef CONFIG_IRQ_OFFLOAD /* Check the contents of _offload_routine. If non-NULL, jump into @@ -193,35 +193,35 @@ _exception_exit: * and return to the interrupted context */ /* Return address from the exception */ - ldw ea, __z_arch_esf_t_instr_OFFSET(sp) + ldw ea, __struct_arch_esf_instr_OFFSET(sp) /* Restore estatus * XXX is this right??? */ - ldw r5, __z_arch_esf_t_estatus_OFFSET(sp) + ldw r5, __struct_arch_esf_estatus_OFFSET(sp) wrctl estatus, r5 /* Restore caller-saved registers */ - ldw ra, __z_arch_esf_t_ra_OFFSET(sp) - ldw r1, __z_arch_esf_t_r1_OFFSET(sp) - ldw r2, __z_arch_esf_t_r2_OFFSET(sp) - ldw r3, __z_arch_esf_t_r3_OFFSET(sp) - ldw r4, __z_arch_esf_t_r4_OFFSET(sp) - ldw r5, __z_arch_esf_t_r5_OFFSET(sp) - ldw r6, __z_arch_esf_t_r6_OFFSET(sp) - ldw r7, __z_arch_esf_t_r7_OFFSET(sp) - ldw r8, __z_arch_esf_t_r8_OFFSET(sp) - ldw r9, __z_arch_esf_t_r9_OFFSET(sp) - ldw r10, __z_arch_esf_t_r10_OFFSET(sp) - ldw r11, __z_arch_esf_t_r11_OFFSET(sp) - ldw r12, __z_arch_esf_t_r12_OFFSET(sp) - ldw r13, __z_arch_esf_t_r13_OFFSET(sp) - ldw r14, __z_arch_esf_t_r14_OFFSET(sp) - ldw r15, __z_arch_esf_t_r15_OFFSET(sp) + ldw ra, __struct_arch_esf_ra_OFFSET(sp) + ldw r1, __struct_arch_esf_r1_OFFSET(sp) + ldw r2, __struct_arch_esf_r2_OFFSET(sp) + ldw r3, __struct_arch_esf_r3_OFFSET(sp) + ldw r4, __struct_arch_esf_r4_OFFSET(sp) + ldw r5, __struct_arch_esf_r5_OFFSET(sp) + ldw r6, __struct_arch_esf_r6_OFFSET(sp) + ldw r7, __struct_arch_esf_r7_OFFSET(sp) + ldw r8, __struct_arch_esf_r8_OFFSET(sp) + ldw r9, __struct_arch_esf_r9_OFFSET(sp) + ldw r10, __struct_arch_esf_r10_OFFSET(sp) + ldw r11, __struct_arch_esf_r11_OFFSET(sp) + ldw r12, __struct_arch_esf_r12_OFFSET(sp) + ldw r13, __struct_arch_esf_r13_OFFSET(sp) + ldw r14, __struct_arch_esf_r14_OFFSET(sp) + ldw r15, __struct_arch_esf_r15_OFFSET(sp) /* Put the stack pointer back where it was when we entered * exception state */ - addi sp, sp, __z_arch_esf_t_SIZEOF + addi sp, sp, __struct_arch_esf_SIZEOF /* All done, copy estatus into status and transfer to ea */ eret diff --git a/arch/nios2/core/fatal.c b/arch/nios2/core/fatal.c index ac64b5bc30944..b531bb41e1789 100644 --- a/arch/nios2/core/fatal.c +++ b/arch/nios2/core/fatal.c @@ -12,7 +12,7 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason, - const z_arch_esf_t *esf) + const struct arch_esf *esf) { #if CONFIG_EXCEPTION_DEBUG if (esf != NULL) { @@ -102,7 +102,7 @@ static char *cause_str(uint32_t cause_code) } #endif -FUNC_NORETURN void _Fault(const z_arch_esf_t *esf) +FUNC_NORETURN void _Fault(const struct arch_esf *esf) { #if defined(CONFIG_PRINTK) || defined(CONFIG_LOG) /* Unfortunately, completely unavailable on Nios II/e cores */ diff --git a/arch/nios2/core/offsets/offsets.c b/arch/nios2/core/offsets/offsets.c index 8f3b3f748c1a1..9d381d87446c1 100644 --- a/arch/nios2/core/offsets/offsets.c +++ b/arch/nios2/core/offsets/offsets.c @@ -44,24 +44,24 @@ GEN_OFFSET_SYM(_callee_saved_t, sp); GEN_OFFSET_SYM(_callee_saved_t, key); GEN_OFFSET_SYM(_callee_saved_t, retval); -GEN_OFFSET_SYM(z_arch_esf_t, ra); -GEN_OFFSET_SYM(z_arch_esf_t, r1); -GEN_OFFSET_SYM(z_arch_esf_t, r2); -GEN_OFFSET_SYM(z_arch_esf_t, r3); -GEN_OFFSET_SYM(z_arch_esf_t, r4); -GEN_OFFSET_SYM(z_arch_esf_t, r5); -GEN_OFFSET_SYM(z_arch_esf_t, r6); -GEN_OFFSET_SYM(z_arch_esf_t, r7); -GEN_OFFSET_SYM(z_arch_esf_t, r8); -GEN_OFFSET_SYM(z_arch_esf_t, r9); -GEN_OFFSET_SYM(z_arch_esf_t, r10); -GEN_OFFSET_SYM(z_arch_esf_t, r11); -GEN_OFFSET_SYM(z_arch_esf_t, r12); -GEN_OFFSET_SYM(z_arch_esf_t, r13); -GEN_OFFSET_SYM(z_arch_esf_t, r14); -GEN_OFFSET_SYM(z_arch_esf_t, r15); -GEN_OFFSET_SYM(z_arch_esf_t, estatus); -GEN_OFFSET_SYM(z_arch_esf_t, instr); -GEN_ABSOLUTE_SYM(__z_arch_esf_t_SIZEOF, sizeof(z_arch_esf_t)); +GEN_OFFSET_STRUCT(arch_esf, ra); +GEN_OFFSET_STRUCT(arch_esf, r1); +GEN_OFFSET_STRUCT(arch_esf, r2); +GEN_OFFSET_STRUCT(arch_esf, r3); +GEN_OFFSET_STRUCT(arch_esf, r4); +GEN_OFFSET_STRUCT(arch_esf, r5); +GEN_OFFSET_STRUCT(arch_esf, r6); +GEN_OFFSET_STRUCT(arch_esf, r7); +GEN_OFFSET_STRUCT(arch_esf, r8); +GEN_OFFSET_STRUCT(arch_esf, r9); +GEN_OFFSET_STRUCT(arch_esf, r10); +GEN_OFFSET_STRUCT(arch_esf, r11); +GEN_OFFSET_STRUCT(arch_esf, r12); +GEN_OFFSET_STRUCT(arch_esf, r13); +GEN_OFFSET_STRUCT(arch_esf, r14); +GEN_OFFSET_STRUCT(arch_esf, r15); +GEN_OFFSET_STRUCT(arch_esf, estatus); +GEN_OFFSET_STRUCT(arch_esf, instr); +GEN_ABSOLUTE_SYM(__struct_arch_esf_SIZEOF, sizeof(struct arch_esf)); GEN_ABS_SYM_END diff --git a/arch/nios2/include/kernel_arch_func.h b/arch/nios2/include/kernel_arch_func.h index 2f2030c1c731c..2df268a1c6245 100644 --- a/arch/nios2/include/kernel_arch_func.h +++ b/arch/nios2/include/kernel_arch_func.h @@ -39,7 +39,7 @@ arch_thread_return_value_set(struct k_thread *thread, unsigned int value) } FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason, - const z_arch_esf_t *esf); + const struct arch_esf *esf); static inline bool arch_is_in_isr(void) { diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 0993189b99cca..44f5d67d56cc3 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -88,7 +88,7 @@ config RISCV_SOC_HAS_ISR_STACKING guarded by !_ASMLANGUAGE. The ESF should be defined to account for the hardware stacked registers in the proper order as they are saved on the stack by the hardware, and the registers saved by the - software macros. The structure must be called '__esf'. + software macros. The structure must be called 'struct arch_esf'. config RISCV_SOC_HAS_CUSTOM_IRQ_HANDLING bool diff --git a/arch/riscv/core/coredump.c b/arch/riscv/core/coredump.c index f232816433a08..70d7a9976d4ad 100644 --- a/arch/riscv/core/coredump.c +++ b/arch/riscv/core/coredump.c @@ -67,7 +67,7 @@ struct riscv_arch_block { */ static struct riscv_arch_block arch_blk; -void arch_coredump_info_dump(const z_arch_esf_t *esf) +void arch_coredump_info_dump(const struct arch_esf *esf) { struct coredump_arch_hdr_t hdr = { .id = COREDUMP_ARCH_HDR_ID, diff --git a/arch/riscv/core/fatal.c b/arch/riscv/core/fatal.c index bd4ed3dca36bc..d6dd4bc38869a 100644 --- a/arch/riscv/core/fatal.c +++ b/arch/riscv/core/fatal.c @@ -30,15 +30,15 @@ static const struct z_exc_handle exceptions[] = { #endif /* Stack trace function */ -void z_riscv_unwind_stack(const z_arch_esf_t *esf); +void z_riscv_unwind_stack(const struct arch_esf *esf); -uintptr_t z_riscv_get_sp_before_exc(const z_arch_esf_t *esf) +uintptr_t z_riscv_get_sp_before_exc(const struct arch_esf *esf) { /* * Kernel stack pointer prior this exception i.e. before * storing the exception stack frame. */ - uintptr_t sp = (uintptr_t)esf + sizeof(z_arch_esf_t); + uintptr_t sp = (uintptr_t)esf + sizeof(struct arch_esf); #ifdef CONFIG_USERSPACE if ((esf->mstatus & MSTATUS_MPP) == PRV_U) { @@ -54,12 +54,12 @@ uintptr_t z_riscv_get_sp_before_exc(const z_arch_esf_t *esf) } FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason, - const z_arch_esf_t *esf) + const struct arch_esf *esf) { z_riscv_fatal_error_csf(reason, esf, NULL); } -FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const z_arch_esf_t *esf, +FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const struct arch_esf *esf, const _callee_saved_t *csf) { #ifdef CONFIG_EXCEPTION_DEBUG @@ -152,14 +152,14 @@ static char *cause_str(unsigned long cause) } } -static bool bad_stack_pointer(z_arch_esf_t *esf) +static bool bad_stack_pointer(struct arch_esf *esf) { #ifdef CONFIG_PMP_STACK_GUARD /* * Check if the kernel stack pointer prior this exception (before * storing the exception stack frame) was in the stack guard area. */ - uintptr_t sp = (uintptr_t)esf + sizeof(z_arch_esf_t); + uintptr_t sp = (uintptr_t)esf + sizeof(struct arch_esf); #ifdef CONFIG_USERSPACE if (_current->arch.priv_stack_start != 0 && @@ -197,7 +197,7 @@ static bool bad_stack_pointer(z_arch_esf_t *esf) return false; } -void _Fault(z_arch_esf_t *esf) +void _Fault(struct arch_esf *esf) { #ifdef CONFIG_USERSPACE /* @@ -249,7 +249,7 @@ FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr) void z_impl_user_fault(unsigned int reason) { - z_arch_esf_t *oops_esf = _current->syscall_frame; + struct arch_esf *oops_esf = _current->syscall_frame; if (((_current->base.user_options & K_USER) != 0) && reason != K_ERR_STACK_CHK_FAIL) { diff --git a/arch/riscv/core/fpu.c b/arch/riscv/core/fpu.c index da5d07b314640..318e97e0002a9 100644 --- a/arch/riscv/core/fpu.c +++ b/arch/riscv/core/fpu.c @@ -204,7 +204,7 @@ void z_riscv_fpu_enter_exc(void) * Note that the exception depth count was not incremented before this call * as no further exceptions are expected before returning to normal mode. */ -void z_riscv_fpu_trap(z_arch_esf_t *esf) +void z_riscv_fpu_trap(struct arch_esf *esf) { __ASSERT((esf->mstatus & MSTATUS_FS) == 0 && (csr_read(mstatus) & MSTATUS_FS) == 0, @@ -293,7 +293,7 @@ static bool fpu_access_allowed(unsigned int exc_update_level) * This is called on every exception exit except for z_riscv_fpu_trap(). * In that case the exception level of interest is 1 (soon to be 0). */ -void z_riscv_fpu_exit_exc(z_arch_esf_t *esf) +void z_riscv_fpu_exit_exc(struct arch_esf *esf) { if (fpu_access_allowed(1)) { esf->mstatus &= ~MSTATUS_FS; diff --git a/arch/riscv/core/isr.S b/arch/riscv/core/isr.S index e9a3d523127a4..8a829fd66b17f 100644 --- a/arch/riscv/core/isr.S +++ b/arch/riscv/core/isr.S @@ -24,22 +24,22 @@ /* Convenience macro for loading/storing register states. */ #define DO_CALLER_SAVED(op) \ - RV_E( op t0, __z_arch_esf_t_t0_OFFSET(sp) );\ - RV_E( op t1, __z_arch_esf_t_t1_OFFSET(sp) );\ - RV_E( op t2, __z_arch_esf_t_t2_OFFSET(sp) );\ - RV_I( op t3, __z_arch_esf_t_t3_OFFSET(sp) );\ - RV_I( op t4, __z_arch_esf_t_t4_OFFSET(sp) );\ - RV_I( op t5, __z_arch_esf_t_t5_OFFSET(sp) );\ - RV_I( op t6, __z_arch_esf_t_t6_OFFSET(sp) );\ - RV_E( op a0, __z_arch_esf_t_a0_OFFSET(sp) );\ - RV_E( op a1, __z_arch_esf_t_a1_OFFSET(sp) );\ - RV_E( op a2, __z_arch_esf_t_a2_OFFSET(sp) );\ - RV_E( op a3, __z_arch_esf_t_a3_OFFSET(sp) );\ - RV_E( op a4, __z_arch_esf_t_a4_OFFSET(sp) );\ - RV_E( op a5, __z_arch_esf_t_a5_OFFSET(sp) );\ - RV_I( op a6, __z_arch_esf_t_a6_OFFSET(sp) );\ - RV_I( op a7, __z_arch_esf_t_a7_OFFSET(sp) );\ - RV_E( op ra, __z_arch_esf_t_ra_OFFSET(sp) ) + RV_E( op t0, __struct_arch_esf_t0_OFFSET(sp) );\ + RV_E( op t1, __struct_arch_esf_t1_OFFSET(sp) );\ + RV_E( op t2, __struct_arch_esf_t2_OFFSET(sp) );\ + RV_I( op t3, __struct_arch_esf_t3_OFFSET(sp) );\ + RV_I( op t4, __struct_arch_esf_t4_OFFSET(sp) );\ + RV_I( op t5, __struct_arch_esf_t5_OFFSET(sp) );\ + RV_I( op t6, __struct_arch_esf_t6_OFFSET(sp) );\ + RV_E( op a0, __struct_arch_esf_a0_OFFSET(sp) );\ + RV_E( op a1, __struct_arch_esf_a1_OFFSET(sp) );\ + RV_E( op a2, __struct_arch_esf_a2_OFFSET(sp) );\ + RV_E( op a3, __struct_arch_esf_a3_OFFSET(sp) );\ + RV_E( op a4, __struct_arch_esf_a4_OFFSET(sp) );\ + RV_E( op a5, __struct_arch_esf_a5_OFFSET(sp) );\ + RV_I( op a6, __struct_arch_esf_a6_OFFSET(sp) );\ + RV_I( op a7, __struct_arch_esf_a7_OFFSET(sp) );\ + RV_E( op ra, __struct_arch_esf_ra_OFFSET(sp) ) #ifdef CONFIG_EXCEPTION_DEBUG /* Convenience macro for storing callee saved register [s0 - s11] states. */ @@ -157,7 +157,7 @@ SECTION_FUNC(exception.entry, _isr_wrapper) /* Save user stack value. Coming from user space, we know this * can't overflow the privileged stack. The esf will be allocated * later but it is safe to store our saved user sp here. */ - sr t0, (-__z_arch_esf_t_SIZEOF + __z_arch_esf_t_sp_OFFSET)(sp) + sr t0, (-__struct_arch_esf_SIZEOF + __struct_arch_esf_sp_OFFSET)(sp) /* Make sure tls pointer is sane */ lr t0, ___cpu_t_current_OFFSET(s0) @@ -180,21 +180,21 @@ SECTION_FUNC(exception.entry, _isr_wrapper) SOC_ISR_SW_STACKING #else /* Save caller-saved registers on current thread stack. */ - addi sp, sp, -__z_arch_esf_t_SIZEOF + addi sp, sp, -__struct_arch_esf_SIZEOF DO_CALLER_SAVED(sr) ; #endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */ /* Save s0 in the esf and load it with &_current_cpu. */ - sr s0, __z_arch_esf_t_s0_OFFSET(sp) + sr s0, __struct_arch_esf_s0_OFFSET(sp) get_current_cpu s0 /* Save MEPC register */ csrr t0, mepc - sr t0, __z_arch_esf_t_mepc_OFFSET(sp) + sr t0, __struct_arch_esf_mepc_OFFSET(sp) /* Save MSTATUS register */ csrr t2, mstatus - sr t2, __z_arch_esf_t_mstatus_OFFSET(sp) + sr t2, __struct_arch_esf_mstatus_OFFSET(sp) #if defined(CONFIG_FPU_SHARING) /* determine if FPU access was disabled */ @@ -301,7 +301,7 @@ no_fp: /* increment _current->arch.exception_depth */ #ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE /* Handle context saving at SOC level. */ - addi a0, sp, __z_arch_esf_t_soc_context_OFFSET + addi a0, sp, __struct_arch_esf_soc_context_OFFSET jal ra, __soc_save_context #endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */ @@ -351,7 +351,7 @@ no_fp: /* increment _current->arch.exception_depth */ /* * Call _Fault to handle exception. - * Stack pointer is pointing to a z_arch_esf_t structure, pass it + * Stack pointer is pointing to a struct_arch_esf structure, pass it * to _Fault (via register a0). * If _Fault shall return, set return address to * no_reschedule to restore stack. @@ -370,9 +370,9 @@ is_kernel_syscall: * It's safe to always increment by 4, even with compressed * instructions, because the ecall instruction is always 4 bytes. */ - lr t0, __z_arch_esf_t_mepc_OFFSET(sp) + lr t0, __struct_arch_esf_mepc_OFFSET(sp) addi t0, t0, 4 - sr t0, __z_arch_esf_t_mepc_OFFSET(sp) + sr t0, __struct_arch_esf_mepc_OFFSET(sp) #ifdef CONFIG_PMP_STACK_GUARD /* Re-activate PMP for m-mode */ @@ -383,7 +383,7 @@ is_kernel_syscall: #endif /* Determine what to do. Operation code is in t0. */ - lr t0, __z_arch_esf_t_t0_OFFSET(sp) + lr t0, __struct_arch_esf_t0_OFFSET(sp) .if RV_ECALL_RUNTIME_EXCEPT != 0; .err; .endif beqz t0, do_fault @@ -396,8 +396,8 @@ is_kernel_syscall: #ifdef CONFIG_RISCV_ALWAYS_SWITCH_THROUGH_ECALL li t1, RV_ECALL_SCHEDULE bne t0, t1, skip_schedule - lr a0, __z_arch_esf_t_a0_OFFSET(sp) - lr a1, __z_arch_esf_t_a1_OFFSET(sp) + lr a0, __struct_arch_esf_a0_OFFSET(sp) + lr a1, __struct_arch_esf_a1_OFFSET(sp) j reschedule skip_schedule: #endif @@ -408,7 +408,7 @@ skip_schedule: do_fault: /* Handle RV_ECALL_RUNTIME_EXCEPT. Retrieve reason in a0, esf in A1. */ - lr a0, __z_arch_esf_t_a0_OFFSET(sp) + lr a0, __struct_arch_esf_a0_OFFSET(sp) 1: mv a1, sp #ifdef CONFIG_EXCEPTION_DEBUG @@ -431,8 +431,8 @@ do_irq_offload: * Routine pointer is in saved a0, argument in saved a1 * so we load them with a1/a0 (reversed). */ - lr a1, __z_arch_esf_t_a0_OFFSET(sp) - lr a0, __z_arch_esf_t_a1_OFFSET(sp) + lr a1, __struct_arch_esf_a0_OFFSET(sp) + lr a0, __struct_arch_esf_a1_OFFSET(sp) /* Increment _current_cpu->nested */ lw t1, ___cpu_t_nested_OFFSET(s0) @@ -474,18 +474,18 @@ is_user_syscall: * Same as for is_kernel_syscall: increment saved MEPC by 4 to * prevent triggering the same ecall again upon exiting the ISR. */ - lr t1, __z_arch_esf_t_mepc_OFFSET(sp) + lr t1, __struct_arch_esf_mepc_OFFSET(sp) addi t1, t1, 4 - sr t1, __z_arch_esf_t_mepc_OFFSET(sp) + sr t1, __struct_arch_esf_mepc_OFFSET(sp) /* Restore argument registers from user stack */ - lr a0, __z_arch_esf_t_a0_OFFSET(sp) - lr a1, __z_arch_esf_t_a1_OFFSET(sp) - lr a2, __z_arch_esf_t_a2_OFFSET(sp) - lr a3, __z_arch_esf_t_a3_OFFSET(sp) - lr a4, __z_arch_esf_t_a4_OFFSET(sp) - lr a5, __z_arch_esf_t_a5_OFFSET(sp) - lr t0, __z_arch_esf_t_t0_OFFSET(sp) + lr a0, __struct_arch_esf_a0_OFFSET(sp) + lr a1, __struct_arch_esf_a1_OFFSET(sp) + lr a2, __struct_arch_esf_a2_OFFSET(sp) + lr a3, __struct_arch_esf_a3_OFFSET(sp) + lr a4, __struct_arch_esf_a4_OFFSET(sp) + lr a5, __struct_arch_esf_a5_OFFSET(sp) + lr t0, __struct_arch_esf_t0_OFFSET(sp) #if defined(CONFIG_RISCV_ISA_RV32E) /* Stack alignment for RV32E is 4 bytes */ addi sp, sp, -4 @@ -519,7 +519,7 @@ valid_syscall_id: #endif /* CONFIG_RISCV_ISA_RV32E */ /* Update a0 (return value) on the stack */ - sr a0, __z_arch_esf_t_a0_OFFSET(sp) + sr a0, __struct_arch_esf_a0_OFFSET(sp) /* Disable IRQs again before leaving */ csrc mstatus, MSTATUS_IEN @@ -534,7 +534,7 @@ is_interrupt: * If we came from userspace then we need to reconfigure the * PMP for kernel mode stack guard. */ - lr t0, __z_arch_esf_t_mstatus_OFFSET(sp) + lr t0, __struct_arch_esf_mstatus_OFFSET(sp) li t1, MSTATUS_MPP and t0, t0, t1 bnez t0, 1f @@ -665,7 +665,7 @@ no_reschedule: #ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE /* Restore context at SOC level */ - addi a0, sp, __z_arch_esf_t_soc_context_OFFSET + addi a0, sp, __struct_arch_esf_soc_context_OFFSET jal ra, __soc_restore_context #endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */ @@ -683,8 +683,8 @@ fp_trap_exit: #endif /* Restore MEPC and MSTATUS registers */ - lr t0, __z_arch_esf_t_mepc_OFFSET(sp) - lr t2, __z_arch_esf_t_mstatus_OFFSET(sp) + lr t0, __struct_arch_esf_mepc_OFFSET(sp) + lr t2, __struct_arch_esf_mstatus_OFFSET(sp) csrw mepc, t0 csrw mstatus, t2 @@ -711,7 +711,7 @@ fp_trap_exit: sb t1, %tprel_lo(is_user_mode)(t0) /* preserve stack pointer for next exception entry */ - add t0, sp, __z_arch_esf_t_SIZEOF + add t0, sp, __struct_arch_esf_SIZEOF sr t0, _curr_cpu_arch_user_exc_sp(s0) j 2f @@ -720,13 +720,13 @@ fp_trap_exit: * We are returning to kernel mode. Store the stack pointer to * be re-loaded further down. */ - addi t0, sp, __z_arch_esf_t_SIZEOF - sr t0, __z_arch_esf_t_sp_OFFSET(sp) + addi t0, sp, __struct_arch_esf_SIZEOF + sr t0, __struct_arch_esf_sp_OFFSET(sp) 2: #endif /* Restore s0 (it is no longer ours) */ - lr s0, __z_arch_esf_t_s0_OFFSET(sp) + lr s0, __struct_arch_esf_s0_OFFSET(sp) #ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING SOC_ISR_SW_UNSTACKING @@ -736,10 +736,10 @@ fp_trap_exit: #ifdef CONFIG_USERSPACE /* retrieve saved stack pointer */ - lr sp, __z_arch_esf_t_sp_OFFSET(sp) + lr sp, __struct_arch_esf_sp_OFFSET(sp) #else /* remove esf from the stack */ - addi sp, sp, __z_arch_esf_t_SIZEOF + addi sp, sp, __struct_arch_esf_SIZEOF #endif #endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */ diff --git a/arch/riscv/core/offsets/offsets.c b/arch/riscv/core/offsets/offsets.c index 9bc9306c2e9d1..7b2d55953b29b 100644 --- a/arch/riscv/core/offsets/offsets.c +++ b/arch/riscv/core/offsets/offsets.c @@ -13,6 +13,7 @@ * structures. */ +#include #include #include #include @@ -88,43 +89,43 @@ GEN_OFFSET_SYM(_thread_arch_t, exception_depth); #endif /* CONFIG_FPU_SHARING */ /* esf member offsets */ -GEN_OFFSET_SYM(z_arch_esf_t, ra); -GEN_OFFSET_SYM(z_arch_esf_t, t0); -GEN_OFFSET_SYM(z_arch_esf_t, t1); -GEN_OFFSET_SYM(z_arch_esf_t, t2); -GEN_OFFSET_SYM(z_arch_esf_t, a0); -GEN_OFFSET_SYM(z_arch_esf_t, a1); -GEN_OFFSET_SYM(z_arch_esf_t, a2); -GEN_OFFSET_SYM(z_arch_esf_t, a3); -GEN_OFFSET_SYM(z_arch_esf_t, a4); -GEN_OFFSET_SYM(z_arch_esf_t, a5); +GEN_OFFSET_STRUCT(arch_esf, ra); +GEN_OFFSET_STRUCT(arch_esf, t0); +GEN_OFFSET_STRUCT(arch_esf, t1); +GEN_OFFSET_STRUCT(arch_esf, t2); +GEN_OFFSET_STRUCT(arch_esf, a0); +GEN_OFFSET_STRUCT(arch_esf, a1); +GEN_OFFSET_STRUCT(arch_esf, a2); +GEN_OFFSET_STRUCT(arch_esf, a3); +GEN_OFFSET_STRUCT(arch_esf, a4); +GEN_OFFSET_STRUCT(arch_esf, a5); #if !defined(CONFIG_RISCV_ISA_RV32E) -GEN_OFFSET_SYM(z_arch_esf_t, t3); -GEN_OFFSET_SYM(z_arch_esf_t, t4); -GEN_OFFSET_SYM(z_arch_esf_t, t5); -GEN_OFFSET_SYM(z_arch_esf_t, t6); -GEN_OFFSET_SYM(z_arch_esf_t, a6); -GEN_OFFSET_SYM(z_arch_esf_t, a7); +GEN_OFFSET_STRUCT(arch_esf, t3); +GEN_OFFSET_STRUCT(arch_esf, t4); +GEN_OFFSET_STRUCT(arch_esf, t5); +GEN_OFFSET_STRUCT(arch_esf, t6); +GEN_OFFSET_STRUCT(arch_esf, a6); +GEN_OFFSET_STRUCT(arch_esf, a7); #endif /* !CONFIG_RISCV_ISA_RV32E */ -GEN_OFFSET_SYM(z_arch_esf_t, mepc); -GEN_OFFSET_SYM(z_arch_esf_t, mstatus); +GEN_OFFSET_STRUCT(arch_esf, mepc); +GEN_OFFSET_STRUCT(arch_esf, mstatus); -GEN_OFFSET_SYM(z_arch_esf_t, s0); +GEN_OFFSET_STRUCT(arch_esf, s0); #ifdef CONFIG_USERSPACE -GEN_OFFSET_SYM(z_arch_esf_t, sp); +GEN_OFFSET_STRUCT(arch_esf, sp); #endif #if defined(CONFIG_RISCV_SOC_CONTEXT_SAVE) -GEN_OFFSET_SYM(z_arch_esf_t, soc_context); +GEN_OFFSET_STRUCT(arch_esf, soc_context); #endif #if defined(CONFIG_RISCV_SOC_OFFSETS) GEN_SOC_OFFSET_SYMS(); #endif -GEN_ABSOLUTE_SYM(__z_arch_esf_t_SIZEOF, sizeof(z_arch_esf_t)); +GEN_ABSOLUTE_SYM(__struct_arch_esf_SIZEOF, sizeof(struct arch_esf)); #ifdef CONFIG_EXCEPTION_DEBUG GEN_ABSOLUTE_SYM(__callee_saved_t_SIZEOF, ROUND_UP(sizeof(_callee_saved_t), ARCH_STACK_PTR_ALIGN)); diff --git a/arch/riscv/core/stacktrace.c b/arch/riscv/core/stacktrace.c index 7dbcd8067cc34..cda6748c36371 100644 --- a/arch/riscv/core/stacktrace.c +++ b/arch/riscv/core/stacktrace.c @@ -12,7 +12,7 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); -uintptr_t z_riscv_get_sp_before_exc(const z_arch_esf_t *esf); +uintptr_t z_riscv_get_sp_before_exc(const struct arch_esf *esf); #if __riscv_xlen == 32 #define PR_REG "%08" PRIxPTR @@ -42,7 +42,7 @@ struct stackframe { LOG_ERR(" %2d: " SFP_FMT PR_REG " ra: " PR_REG, idx, sfp, ra) #endif -static bool in_stack_bound(uintptr_t addr, const z_arch_esf_t *esf) +static bool in_stack_bound(uintptr_t addr, const struct arch_esf *esf) { #ifdef CONFIG_THREAD_STACK_INFO uintptr_t start, end; @@ -86,7 +86,7 @@ static inline bool in_text_region(uintptr_t addr) } #ifdef CONFIG_FRAME_POINTER -void z_riscv_unwind_stack(const z_arch_esf_t *esf) +void z_riscv_unwind_stack(const struct arch_esf *esf) { uintptr_t fp = esf->s0; uintptr_t ra; @@ -115,7 +115,7 @@ void z_riscv_unwind_stack(const z_arch_esf_t *esf) LOG_ERR(""); } #else /* !CONFIG_FRAME_POINTER */ -void z_riscv_unwind_stack(const z_arch_esf_t *esf) +void z_riscv_unwind_stack(const struct arch_esf *esf) { uintptr_t sp = z_riscv_get_sp_before_exc(esf); uintptr_t ra; diff --git a/arch/riscv/core/thread.c b/arch/riscv/core/thread.c index 38d5dbde092c6..60f73a5f531e4 100644 --- a/arch/riscv/core/thread.c +++ b/arch/riscv/core/thread.c @@ -23,15 +23,15 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, void *p1, void *p2, void *p3) { extern void z_riscv_thread_start(void); - struct __esf *stack_init; + struct arch_esf *stack_init; #ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE const struct soc_esf soc_esf_init = {SOC_ESF_INIT}; #endif /* Initial stack frame for thread */ - stack_init = (struct __esf *)Z_STACK_PTR_ALIGN( - Z_STACK_PTR_TO_FRAME(struct __esf, stack_ptr) + stack_init = (struct arch_esf *)Z_STACK_PTR_ALIGN( + Z_STACK_PTR_TO_FRAME(struct arch_esf, stack_ptr) ); /* Setup the initial stack frame */ diff --git a/arch/riscv/include/kernel_arch_func.h b/arch/riscv/include/kernel_arch_func.h index bdfc0527b9502..c5ed6ff3f7f42 100644 --- a/arch/riscv/include/kernel_arch_func.h +++ b/arch/riscv/include/kernel_arch_func.h @@ -71,9 +71,9 @@ arch_switch(void *switch_to, void **switched_from) /* Thin wrapper around z_riscv_fatal_error_csf */ FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason, - const z_arch_esf_t *esf); + const struct arch_esf *esf); -FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const z_arch_esf_t *esf, +FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const struct arch_esf *esf, const _callee_saved_t *csf); static inline bool arch_is_in_isr(void) diff --git a/arch/sparc/core/fatal.c b/arch/sparc/core/fatal.c index 55100606b9242..40fd9d16792bc 100644 --- a/arch/sparc/core/fatal.c +++ b/arch/sparc/core/fatal.c @@ -122,7 +122,7 @@ static const struct { { .tt = 0x0A, .desc = "tag_overflow", }, }; -static void print_trap_type(const z_arch_esf_t *esf) +static void print_trap_type(const struct arch_esf *esf) { const int tt = (esf->tbr & TBR_TT) >> TBR_TT_BIT; const char *desc = "unknown"; @@ -142,7 +142,7 @@ static void print_trap_type(const z_arch_esf_t *esf) LOG_ERR("tt = 0x%02X, %s", tt, desc); } -static void print_integer_registers(const z_arch_esf_t *esf) +static void print_integer_registers(const struct arch_esf *esf) { const struct savearea *flushed = (struct savearea *) esf->out[6]; @@ -159,7 +159,7 @@ static void print_integer_registers(const z_arch_esf_t *esf) } } -static void print_special_registers(const z_arch_esf_t *esf) +static void print_special_registers(const struct arch_esf *esf) { LOG_ERR( "psr: %08x wim: %08x tbr: %08x y: %08x", @@ -168,7 +168,7 @@ static void print_special_registers(const z_arch_esf_t *esf) LOG_ERR(" pc: %08x npc: %08x", esf->pc, esf->npc); } -static void print_backtrace(const z_arch_esf_t *esf) +static void print_backtrace(const struct arch_esf *esf) { const int MAX_LOGLINES = 40; const struct savearea *s = (struct savearea *) esf->out[6]; @@ -190,7 +190,7 @@ static void print_backtrace(const z_arch_esf_t *esf) } } -static void print_all(const z_arch_esf_t *esf) +static void print_all(const struct arch_esf *esf) { LOG_ERR(""); print_trap_type(esf); @@ -205,7 +205,7 @@ static void print_all(const z_arch_esf_t *esf) #endif /* CONFIG_EXCEPTION_DEBUG */ FUNC_NORETURN void z_sparc_fatal_error(unsigned int reason, - const z_arch_esf_t *esf) + const struct arch_esf *esf) { #if CONFIG_EXCEPTION_DEBUG if (esf != NULL) { diff --git a/arch/sparc/core/fault_trap.S b/arch/sparc/core/fault_trap.S index c1a8977ba233c..53b3d9f0b98cd 100644 --- a/arch/sparc/core/fault_trap.S +++ b/arch/sparc/core/fault_trap.S @@ -72,7 +72,7 @@ SECTION_FUNC(TEXT, __sparc_trap_except_reason) mov %l5, %g3 /* Allocate an ABI stack frame and exception stack frame */ - sub %fp, 96 + __z_arch_esf_t_SIZEOF, %sp + sub %fp, 96 + __struct_arch_esf_SIZEOF, %sp /* * %fp: %sp of interrupted task * %sp: %sp of interrupted task - ABI_frame - esf @@ -81,19 +81,19 @@ SECTION_FUNC(TEXT, __sparc_trap_except_reason) mov %l7, %o0 /* Fill in the content of the exception stack frame */ #if defined(CONFIG_EXTRA_EXCEPTION_INFO) - std %i0, [%sp + 96 + __z_arch_esf_t_out_OFFSET + 0x00] - std %i2, [%sp + 96 + __z_arch_esf_t_out_OFFSET + 0x08] - std %i4, [%sp + 96 + __z_arch_esf_t_out_OFFSET + 0x10] - std %i6, [%sp + 96 + __z_arch_esf_t_out_OFFSET + 0x18] - std %g0, [%sp + 96 + __z_arch_esf_t_global_OFFSET + 0x00] - std %g2, [%sp + 96 + __z_arch_esf_t_global_OFFSET + 0x08] - std %g4, [%sp + 96 + __z_arch_esf_t_global_OFFSET + 0x10] - std %g6, [%sp + 96 + __z_arch_esf_t_global_OFFSET + 0x18] + std %i0, [%sp + 96 + __struct_arch_esf_out_OFFSET + 0x00] + std %i2, [%sp + 96 + __struct_arch_esf_out_OFFSET + 0x08] + std %i4, [%sp + 96 + __struct_arch_esf_out_OFFSET + 0x10] + std %i6, [%sp + 96 + __struct_arch_esf_out_OFFSET + 0x18] + std %g0, [%sp + 96 + __struct_arch_esf_global_OFFSET + 0x00] + std %g2, [%sp + 96 + __struct_arch_esf_global_OFFSET + 0x08] + std %g4, [%sp + 96 + __struct_arch_esf_global_OFFSET + 0x10] + std %g6, [%sp + 96 + __struct_arch_esf_global_OFFSET + 0x18] #endif - std %l0, [%sp + 96 + __z_arch_esf_t_psr_OFFSET] /* psr pc */ - std %l2, [%sp + 96 + __z_arch_esf_t_npc_OFFSET] /* npc wim */ + std %l0, [%sp + 96 + __struct_arch_esf_psr_OFFSET] /* psr pc */ + std %l2, [%sp + 96 + __struct_arch_esf_npc_OFFSET] /* npc wim */ rd %y, %l7 - std %l6, [%sp + 96 + __z_arch_esf_t_tbr_OFFSET] /* tbr y */ + std %l6, [%sp + 96 + __struct_arch_esf_tbr_OFFSET] /* tbr y */ /* Enable traps, raise PIL to mask all maskable interrupts. */ or %l0, PSR_PIL, %o2 diff --git a/arch/sparc/core/offsets/offsets.c b/arch/sparc/core/offsets/offsets.c index 3796117ac0945..023ef7452c443 100644 --- a/arch/sparc/core/offsets/offsets.c +++ b/arch/sparc/core/offsets/offsets.c @@ -31,11 +31,11 @@ GEN_OFFSET_SYM(_callee_saved_t, i6); GEN_OFFSET_SYM(_callee_saved_t, o6); /* esf member offsets */ -GEN_OFFSET_SYM(z_arch_esf_t, out); -GEN_OFFSET_SYM(z_arch_esf_t, global); -GEN_OFFSET_SYM(z_arch_esf_t, npc); -GEN_OFFSET_SYM(z_arch_esf_t, psr); -GEN_OFFSET_SYM(z_arch_esf_t, tbr); -GEN_ABSOLUTE_SYM(__z_arch_esf_t_SIZEOF, STACK_ROUND_UP(sizeof(z_arch_esf_t))); +GEN_OFFSET_STRUCT(arch_esf, out); +GEN_OFFSET_STRUCT(arch_esf, global); +GEN_OFFSET_STRUCT(arch_esf, npc); +GEN_OFFSET_STRUCT(arch_esf, psr); +GEN_OFFSET_STRUCT(arch_esf, tbr); +GEN_ABSOLUTE_SYM(__struct_arch_esf_SIZEOF, sizeof(struct arch_esf)); GEN_ABS_SYM_END diff --git a/arch/sparc/include/kernel_arch_func.h b/arch/sparc/include/kernel_arch_func.h index 41f48ccc44a4e..8b79b130ad655 100644 --- a/arch/sparc/include/kernel_arch_func.h +++ b/arch/sparc/include/kernel_arch_func.h @@ -43,7 +43,7 @@ static inline void arch_switch(void *switch_to, void **switched_from) } FUNC_NORETURN void z_sparc_fatal_error(unsigned int reason, - const z_arch_esf_t *esf); + const struct arch_esf *esf); static inline bool arch_is_in_isr(void) { diff --git a/arch/x86/core/fatal.c b/arch/x86/core/fatal.c index 5db20eb5deb27..1561b51b84d54 100644 --- a/arch/x86/core/fatal.c +++ b/arch/x86/core/fatal.c @@ -35,7 +35,7 @@ FUNC_NORETURN void arch_system_halt(unsigned int reason) #ifdef CONFIG_THREAD_STACK_INFO -static inline uintptr_t esf_get_sp(const z_arch_esf_t *esf) +static inline uintptr_t esf_get_sp(const struct arch_esf *esf) { #ifdef CONFIG_X86_64 return esf->rsp; @@ -122,7 +122,7 @@ bool z_x86_check_guard_page(uintptr_t addr) #ifdef CONFIG_EXCEPTION_DEBUG -static inline uintptr_t esf_get_code(const z_arch_esf_t *esf) +static inline uintptr_t esf_get_code(const struct arch_esf *esf) { #ifdef CONFIG_X86_64 return esf->code; @@ -188,7 +188,7 @@ static void unwind_stack(uintptr_t base_ptr, uint16_t cs) } #endif /* CONFIG_EXCEPTION_STACK_TRACE */ -static inline uintptr_t get_cr3(const z_arch_esf_t *esf) +static inline uintptr_t get_cr3(const struct arch_esf *esf) { #if defined(CONFIG_USERSPACE) && defined(CONFIG_X86_KPTI) /* If the interrupted thread was in user mode, we did a page table @@ -206,14 +206,14 @@ static inline uintptr_t get_cr3(const z_arch_esf_t *esf) return z_x86_cr3_get(); } -static inline pentry_t *get_ptables(const z_arch_esf_t *esf) +static inline pentry_t *get_ptables(const struct arch_esf *esf) { return z_mem_virt_addr(get_cr3(esf)); } #ifdef CONFIG_X86_64 __pinned_func -static void dump_regs(const z_arch_esf_t *esf) +static void dump_regs(const struct arch_esf *esf) { LOG_ERR("RAX: 0x%016lx RBX: 0x%016lx RCX: 0x%016lx RDX: 0x%016lx", esf->rax, esf->rbx, esf->rcx, esf->rdx); @@ -236,7 +236,7 @@ static void dump_regs(const z_arch_esf_t *esf) } #else /* 32-bit */ __pinned_func -static void dump_regs(const z_arch_esf_t *esf) +static void dump_regs(const struct arch_esf *esf) { LOG_ERR("EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x", esf->eax, esf->ebx, esf->ecx, esf->edx); @@ -327,7 +327,7 @@ static void log_exception(uintptr_t vector, uintptr_t code) } __pinned_func -static void dump_page_fault(z_arch_esf_t *esf) +static void dump_page_fault(struct arch_esf *esf) { uintptr_t err; void *cr2; @@ -362,7 +362,7 @@ static void dump_page_fault(z_arch_esf_t *esf) __pinned_func FUNC_NORETURN void z_x86_fatal_error(unsigned int reason, - const z_arch_esf_t *esf) + const struct arch_esf *esf) { if (esf != NULL) { #ifdef CONFIG_EXCEPTION_DEBUG @@ -385,7 +385,7 @@ FUNC_NORETURN void z_x86_fatal_error(unsigned int reason, __pinned_func FUNC_NORETURN void z_x86_unhandled_cpu_exception(uintptr_t vector, - const z_arch_esf_t *esf) + const struct arch_esf *esf) { #ifdef CONFIG_EXCEPTION_DEBUG log_exception(vector, esf_get_code(esf)); @@ -404,7 +404,7 @@ static const struct z_exc_handle exceptions[] = { #endif __pinned_func -void z_x86_page_fault_handler(z_arch_esf_t *esf) +void z_x86_page_fault_handler(struct arch_esf *esf) { #ifdef CONFIG_DEMAND_PAGING if ((esf->errorCode & PF_P) == 0) { @@ -488,7 +488,7 @@ void z_x86_page_fault_handler(z_arch_esf_t *esf) } __pinned_func -void z_x86_do_kernel_oops(const z_arch_esf_t *esf) +void z_x86_do_kernel_oops(const struct arch_esf *esf) { uintptr_t reason; diff --git a/arch/x86/core/ia32/coredump.c b/arch/x86/core/ia32/coredump.c index b49373aab771e..fb7d0fcfd8cc5 100644 --- a/arch/x86/core/ia32/coredump.c +++ b/arch/x86/core/ia32/coredump.c @@ -34,7 +34,7 @@ struct x86_arch_block { */ static struct x86_arch_block arch_blk; -void arch_coredump_info_dump(const z_arch_esf_t *esf) +void arch_coredump_info_dump(const struct arch_esf *esf) { struct coredump_arch_hdr_t hdr = { .id = COREDUMP_ARCH_HDR_ID, diff --git a/arch/x86/core/ia32/excstub.S b/arch/x86/core/ia32/excstub.S index 9c5f3f0319152..6c0a13a37cde3 100644 --- a/arch/x86/core/ia32/excstub.S +++ b/arch/x86/core/ia32/excstub.S @@ -161,12 +161,12 @@ SECTION_FUNC(PINNED_TEXT, _exception_enter) /* ESP is still pointing to the ESF at this point */ - testl $0x200, __z_arch_esf_t_eflags_OFFSET(%esp) + testl $0x200, __struct_arch_esf_eflags_OFFSET(%esp) je allDone sti allDone: - pushl %esp /* push z_arch_esf_t * parameter */ + pushl %esp /* push struct_arch_esf * parameter */ call *%ecx /* call exception handler */ addl $0x4, %esp diff --git a/arch/x86/core/ia32/fatal.c b/arch/x86/core/ia32/fatal.c index 597f21a01adc6..e510d6b1a2dbc 100644 --- a/arch/x86/core/ia32/fatal.c +++ b/arch/x86/core/ia32/fatal.c @@ -27,10 +27,10 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); unsigned int z_x86_exception_vector; #endif -__weak void z_debug_fatal_hook(const z_arch_esf_t *esf) { ARG_UNUSED(esf); } +__weak void z_debug_fatal_hook(const struct arch_esf *esf) { ARG_UNUSED(esf); } __pinned_func -void z_x86_spurious_irq(const z_arch_esf_t *esf) +void z_x86_spurious_irq(const struct arch_esf *esf) { int vector = z_irq_controller_isr_vector_get(); @@ -46,7 +46,7 @@ void arch_syscall_oops(void *ssf) { struct _x86_syscall_stack_frame *ssf_ptr = (struct _x86_syscall_stack_frame *)ssf; - z_arch_esf_t oops = { + struct arch_esf oops = { .eip = ssf_ptr->eip, .cs = ssf_ptr->cs, .eflags = ssf_ptr->eflags @@ -66,7 +66,7 @@ NANO_CPU_INT_REGISTER(_kernel_oops_handler, NANO_SOFT_IRQ, #if CONFIG_EXCEPTION_DEBUG __pinned_func FUNC_NORETURN static void generic_exc_handle(unsigned int vector, - const z_arch_esf_t *pEsf) + const struct arch_esf *pEsf) { #ifdef CONFIG_DEBUG_COREDUMP z_x86_exception_vector = vector; @@ -77,7 +77,7 @@ FUNC_NORETURN static void generic_exc_handle(unsigned int vector, #define _EXC_FUNC(vector) \ __pinned_func \ -FUNC_NORETURN __used static void handle_exc_##vector(const z_arch_esf_t *pEsf) \ +FUNC_NORETURN __used static void handle_exc_##vector(const struct arch_esf *pEsf) \ { \ generic_exc_handle(vector, pEsf); \ } @@ -120,7 +120,7 @@ EXC_FUNC_NOCODE(IV_MACHINE_CHECK, 0); _EXCEPTION_CONNECT_CODE(z_x86_page_fault_handler, IV_PAGE_FAULT, 0); #ifdef CONFIG_X86_ENABLE_TSS -static __pinned_noinit volatile z_arch_esf_t _df_esf; +static __pinned_noinit volatile struct arch_esf _df_esf; /* Very tiny stack; just enough for the bogus error code pushed by the CPU * and a frame pointer push by the compiler. All df_handler_top does is @@ -182,14 +182,14 @@ static __used void df_handler_bottom(void) reason = K_ERR_STACK_CHK_FAIL; } #endif - z_x86_fatal_error(reason, (z_arch_esf_t *)&_df_esf); + z_x86_fatal_error(reason, (struct arch_esf *)&_df_esf); } __pinned_func static FUNC_NORETURN __used void df_handler_top(void) { /* State of the system when the double-fault forced a task switch - * will be in _main_tss. Set up a z_arch_esf_t and copy system state into + * will be in _main_tss. Set up a struct arch_esf and copy system state into * it */ _df_esf.esp = _main_tss.esp; diff --git a/arch/x86/core/ia32/float.c b/arch/x86/core/ia32/float.c index a33a40a0a7832..c89bf7accd5a1 100644 --- a/arch/x86/core/ia32/float.c +++ b/arch/x86/core/ia32/float.c @@ -302,7 +302,7 @@ int z_float_disable(struct k_thread *thread) * instruction is executed while CR0[TS]=1. The handler then enables the * current thread to use all supported floating point registers. */ -void _FpNotAvailableExcHandler(z_arch_esf_t *pEsf) +void _FpNotAvailableExcHandler(struct arch_esf *pEsf) { ARG_UNUSED(pEsf); diff --git a/arch/x86/core/ia32/gdbstub.c b/arch/x86/core/ia32/gdbstub.c index 692ea78baf41c..252f15d79ffeb 100644 --- a/arch/x86/core/ia32/gdbstub.c +++ b/arch/x86/core/ia32/gdbstub.c @@ -78,7 +78,7 @@ static unsigned int get_exception(unsigned int vector) /* * Debug exception handler. */ -static void z_gdb_interrupt(unsigned int vector, z_arch_esf_t *esf) +static void z_gdb_interrupt(unsigned int vector, struct arch_esf *esf) { debug_ctx.exception = get_exception(vector); @@ -212,7 +212,7 @@ size_t arch_gdb_reg_writeone(struct gdb_ctx *ctx, uint8_t *hex, size_t hexlen, return ret; } -static __used void z_gdb_debug_isr(z_arch_esf_t *esf) +static __used void z_gdb_debug_isr(struct arch_esf *esf) { #ifdef CONFIG_GDBSTUB_TRACE printk("gdbstub:enter %s (IV_DEBUG)\n", __func__); @@ -225,7 +225,7 @@ static __used void z_gdb_debug_isr(z_arch_esf_t *esf) #endif } -static __used void z_gdb_break_isr(z_arch_esf_t *esf) +static __used void z_gdb_break_isr(struct arch_esf *esf) { #ifdef CONFIG_GDBSTUB_TRACE printk("gdbstub:enter %s (IV_BREAKPOINT)\n", __func__); diff --git a/arch/x86/core/intel64/coredump.c b/arch/x86/core/intel64/coredump.c index f1c1a15eaff37..65a9306ca07da 100644 --- a/arch/x86/core/intel64/coredump.c +++ b/arch/x86/core/intel64/coredump.c @@ -46,7 +46,7 @@ struct x86_64_arch_block { */ static struct x86_64_arch_block arch_blk; -void arch_coredump_info_dump(const z_arch_esf_t *esf) +void arch_coredump_info_dump(const struct arch_esf *esf) { struct coredump_arch_hdr_t hdr = { .id = COREDUMP_ARCH_HDR_ID, diff --git a/arch/x86/core/intel64/fatal.c b/arch/x86/core/intel64/fatal.c index 9dd97614dc1e3..9eed95bfaa3ad 100644 --- a/arch/x86/core/intel64/fatal.c +++ b/arch/x86/core/intel64/fatal.c @@ -13,14 +13,14 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); /* NMI handlers should override weak implementation * return true if NMI is handled, false otherwise */ -__weak bool z_x86_do_kernel_nmi(const z_arch_esf_t *esf) +__weak bool z_x86_do_kernel_nmi(const struct arch_esf *esf) { ARG_UNUSED(esf); return false; } -void z_x86_exception(z_arch_esf_t *esf) +void z_x86_exception(struct arch_esf *esf) { switch (esf->vector) { case Z_X86_OOPS_VECTOR: diff --git a/arch/x86/core/offsets/ia32_offsets.c b/arch/x86/core/offsets/ia32_offsets.c index 61a7f25bb2a40..2dfbb5c38ef28 100644 --- a/arch/x86/core/offsets/ia32_offsets.c +++ b/arch/x86/core/offsets/ia32_offsets.c @@ -52,7 +52,6 @@ GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF, GEN_OFFSET_SYM(_callee_saved_t, esp); -/* z_arch_esf_t structure member offsets */ - -GEN_OFFSET_SYM(z_arch_esf_t, eflags); +/* struct arch_esf structure member offsets */ +GEN_OFFSET_STRUCT(arch_esf, eflags); #endif /* _X86_OFFSETS_INC_ */ diff --git a/arch/x86/include/ia32/exception.h b/arch/x86/include/ia32/exception.h index 27119709c2ade..1b0ce9ee3b52d 100644 --- a/arch/x86/include/ia32/exception.h +++ b/arch/x86/include/ia32/exception.h @@ -62,7 +62,7 @@ * Assign an exception handler to a particular vector in the IDT. * * @param handler A handler function of the prototype - * void handler(const z_arch_esf_t *esf) + * void handler(const struct arch_esf *esf) * @param vector Vector index in the IDT */ #define _EXCEPTION_CONNECT_NOCODE(handler, vector, dpl) \ @@ -75,7 +75,7 @@ * The error code will be accessible in esf->errorCode * * @param handler A handler function of the prototype - * void handler(const z_arch_esf_t *esf) + * void handler(const struct arch_esf *esf) * @param vector Vector index in the IDT */ #define _EXCEPTION_CONNECT_CODE(handler, vector, dpl) \ diff --git a/arch/x86/include/intel64/kernel_arch_func.h b/arch/x86/include/intel64/kernel_arch_func.h index a749a9b9af178..abf022fe5fd55 100644 --- a/arch/x86/include/intel64/kernel_arch_func.h +++ b/arch/x86/include/intel64/kernel_arch_func.h @@ -36,7 +36,7 @@ void x86_sse_init(struct k_thread *thread); void z_x86_syscall_entry_stub(void); -bool z_x86_do_kernel_nmi(const z_arch_esf_t *esf); +bool z_x86_do_kernel_nmi(const struct arch_esf *esf); #endif /* _ASMLANGUAGE */ diff --git a/arch/x86/include/kernel_arch_func.h b/arch/x86/include/kernel_arch_func.h index 00b411978ec65..9bc7cfe4212f8 100644 --- a/arch/x86/include/kernel_arch_func.h +++ b/arch/x86/include/kernel_arch_func.h @@ -49,16 +49,16 @@ void z_x86_early_serial_init(void); * interesting info and call z_x86_fatal_error() */ FUNC_NORETURN void z_x86_unhandled_cpu_exception(uintptr_t vector, - const z_arch_esf_t *esf); + const struct arch_esf *esf); /* Called upon unrecoverable error; dump registers and transfer control to * kernel via z_fatal_error() */ FUNC_NORETURN void z_x86_fatal_error(unsigned int reason, - const z_arch_esf_t *esf); + const struct arch_esf *esf); /* Common handling for page fault exceptions */ -void z_x86_page_fault_handler(z_arch_esf_t *esf); +void z_x86_page_fault_handler(struct arch_esf *esf); #ifdef CONFIG_THREAD_STACK_INFO /** @@ -90,7 +90,7 @@ void *z_x86_userspace_prepare_thread(struct k_thread *thread); #endif /* CONFIG_USERSPACE */ -void z_x86_do_kernel_oops(const z_arch_esf_t *esf); +void z_x86_do_kernel_oops(const struct arch_esf *esf); /* * Find a free IRQ vector at the specified priority, or return -1 if none left. diff --git a/arch/xtensa/core/coredump.c b/arch/xtensa/core/coredump.c index 7f010eb1954a7..0ee1f8992a6b7 100644 --- a/arch/xtensa/core/coredump.c +++ b/arch/xtensa/core/coredump.c @@ -91,7 +91,7 @@ struct xtensa_arch_block { */ static struct xtensa_arch_block arch_blk; -void arch_coredump_info_dump(const z_arch_esf_t *esf) +void arch_coredump_info_dump(const struct arch_esf *esf) { struct coredump_arch_hdr_t hdr = { .id = COREDUMP_ARCH_HDR_ID, diff --git a/arch/xtensa/core/fatal.c b/arch/xtensa/core/fatal.c index 0d5da1ca8179d..39e84b86f6b13 100644 --- a/arch/xtensa/core/fatal.c +++ b/arch/xtensa/core/fatal.c @@ -84,7 +84,7 @@ char *xtensa_exccause(unsigned int cause_code) #endif } -void xtensa_fatal_error(unsigned int reason, const z_arch_esf_t *esf) +void xtensa_fatal_error(unsigned int reason, const struct arch_esf *esf) { #ifdef CONFIG_EXCEPTION_DEBUG if (esf) { diff --git a/arch/xtensa/core/gdbstub.c b/arch/xtensa/core/gdbstub.c index 4df72f0d355c7..0ebc9cc68ccd5 100644 --- a/arch/xtensa/core/gdbstub.c +++ b/arch/xtensa/core/gdbstub.c @@ -422,7 +422,7 @@ static unsigned int get_gdb_exception_reason(unsigned int reason) * @param ctx GDB context * @param stack Pointer to the stack frame */ -static void copy_to_ctx(struct gdb_ctx *ctx, const z_arch_esf_t *stack) +static void copy_to_ctx(struct gdb_ctx *ctx, const struct arch_esf *stack) { struct xtensa_register *reg; int idx, num_laddr_regs; @@ -513,7 +513,7 @@ static void copy_to_ctx(struct gdb_ctx *ctx, const z_arch_esf_t *stack) * @param ctx GDB context * @param stack Pointer to the stack frame */ -static void restore_from_ctx(struct gdb_ctx *ctx, const z_arch_esf_t *stack) +static void restore_from_ctx(struct gdb_ctx *ctx, const struct arch_esf *stack) { struct xtensa_register *reg; int idx, num_laddr_regs; @@ -913,7 +913,7 @@ int arch_gdb_remove_breakpoint(struct gdb_ctx *ctx, uint8_t type, return ret; } -void z_gdb_isr(z_arch_esf_t *esf) +void z_gdb_isr(struct arch_esf *esf) { uint32_t reg; diff --git a/arch/xtensa/core/vector_handlers.c b/arch/xtensa/core/vector_handlers.c index dd3c0c00f5246..471a67c086304 100644 --- a/arch/xtensa/core/vector_handlers.c +++ b/arch/xtensa/core/vector_handlers.c @@ -37,7 +37,7 @@ static const struct z_exc_handle exceptions[] = { }; #endif /* CONFIG_USERSPACE */ -void xtensa_dump_stack(const z_arch_esf_t *stack) +void xtensa_dump_stack(const void *stack) { _xtensa_irq_stack_frame_raw_t *frame = (void *)stack; _xtensa_irq_bsa_t *bsa = frame->ptr_to_bsa; @@ -218,9 +218,10 @@ static inline DEF_INT_C_HANDLER(1) * different because exceptions and interrupts land at the same * vector; other interrupt levels have their own vectors. */ -void *xtensa_excint1_c(int *interrupted_stack) +void *xtensa_excint1_c(void *esf) { int cause; + int *interrupted_stack = &((struct arch_esf *)esf)->dummy; _xtensa_irq_bsa_t *bsa = (void *)*(int **)interrupted_stack; bool is_fatal_error = false; bool is_dblexc = false; @@ -385,7 +386,7 @@ void *xtensa_excint1_c(int *interrupted_stack) #if defined(CONFIG_GDBSTUB) void *xtensa_debugint_c(int *interrupted_stack) { - extern void z_gdb_isr(z_arch_esf_t *esf); + extern void z_gdb_isr(struct arch_esf *esf); z_gdb_isr((void *)interrupted_stack); diff --git a/arch/xtensa/include/xtensa_internal.h b/arch/xtensa/include/xtensa_internal.h index 60b512ab57135..f3e1ab4f44e20 100644 --- a/arch/xtensa/include/xtensa_internal.h +++ b/arch/xtensa/include/xtensa_internal.h @@ -25,7 +25,7 @@ * * @param stack Pointer to stack frame. */ -void xtensa_dump_stack(const z_arch_esf_t *stack); +void xtensa_dump_stack(const void *stack); /** * @brief Get string description from an exception code. @@ -43,7 +43,7 @@ char *xtensa_exccause(unsigned int cause_code); * @param esf Exception context, with details and partial or full register * state when the error occurred. May in some cases be NULL. */ -void xtensa_fatal_error(unsigned int reason, const z_arch_esf_t *esf); +void xtensa_fatal_error(unsigned int reason, const struct arch_esf *esf); /** * @brief Perform a one-way transition from supervisor to user mode. diff --git a/doc/releases/migration-guide-3.7.rst b/doc/releases/migration-guide-3.7.rst index 0fa08a6af9db0..b7f2b3630eeb7 100644 --- a/doc/releases/migration-guide-3.7.rst +++ b/doc/releases/migration-guide-3.7.rst @@ -59,6 +59,11 @@ Build System Kernel ****** +* All architectures are now required to define the new ``struct arch_esf``, which describes the members + of a stack frame. This new struct replaces the named struct ``z_arch_esf_t``. (:github:`73593`) + +* The named struct ``z_arch_esf_t`` is now deprecated. Use ``struct arch_esf`` instead. (:github:`73593`) + Boards ****** diff --git a/drivers/edac/edac_ibecc.c b/drivers/edac/edac_ibecc.c index 06033a2bbb407..f8ea556f434ba 100644 --- a/drivers/edac/edac_ibecc.c +++ b/drivers/edac/edac_ibecc.c @@ -387,7 +387,7 @@ static bool handle_nmi(void) return true; } -bool z_x86_do_kernel_nmi(const z_arch_esf_t *esf) +bool z_x86_do_kernel_nmi(const struct arch_esf *esf) { const struct device *const dev = DEVICE_DT_GET(DEVICE_NODE); struct ibecc_data *data = dev->data; diff --git a/include/zephyr/arch/arc/v2/exception.h b/include/zephyr/arch/arc/v2/exception.h index 553024fa3a8c6..f606cf1ea9a7c 100644 --- a/include/zephyr/arch/arc/v2/exception.h +++ b/include/zephyr/arch/arc/v2/exception.h @@ -18,11 +18,6 @@ extern "C" { #endif -#ifdef _ASMLANGUAGE -#else -typedef struct _irq_stack_frame z_arch_esf_t; -#endif - #ifdef __cplusplus } #endif diff --git a/include/zephyr/arch/arch_inlines.h b/include/zephyr/arch/arch_inlines.h index 4e1cd149dfb4d..0f32159e2f1bf 100644 --- a/include/zephyr/arch/arch_inlines.h +++ b/include/zephyr/arch/arch_inlines.h @@ -12,7 +12,7 @@ #ifndef ZEPHYR_INCLUDE_ARCH_INLINES_H_ #define ZEPHYR_INCLUDE_ARCH_INLINES_H_ -#if defined(CONFIG_X86) || defined(CONFIG_X86_64) +#if defined(CONFIG_X86) #include #elif defined(CONFIG_ARM) #include @@ -32,8 +32,6 @@ #include #elif defined(CONFIG_SPARC) #include -#else -#error "Unknown Architecture" #endif #endif /* ZEPHYR_INCLUDE_ARCH_INLINES_H_ */ diff --git a/include/zephyr/arch/arch_interface.h b/include/zephyr/arch/arch_interface.h index 04aef8157558b..797a60bbaa58c 100644 --- a/include/zephyr/arch/arch_interface.h +++ b/include/zephyr/arch/arch_interface.h @@ -39,6 +39,7 @@ extern "C" { #endif /* NOTE: We cannot pull in kernel.h here, need some forward declarations */ +struct arch_esf; struct k_thread; struct k_mem_domain; @@ -46,6 +47,8 @@ typedef struct z_thread_stack_element k_thread_stack_t; typedef void (*k_thread_entry_t)(void *p1, void *p2, void *p3); +__deprecated typedef struct arch_esf z_arch_esf_t; + /** * @defgroup arch-timing Architecture timing APIs * @ingroup arch-interface diff --git a/include/zephyr/arch/arm/cortex_a_r/exception.h b/include/zephyr/arch/arm/cortex_a_r/exception.h index 3bef647566d3c..cd8377bc3a70c 100644 --- a/include/zephyr/arch/arm/cortex_a_r/exception.h +++ b/include/zephyr/arch/arm/cortex_a_r/exception.h @@ -54,7 +54,7 @@ struct __extra_esf_info { }; #endif /* CONFIG_EXTRA_EXCEPTION_INFO */ -struct __esf { +struct arch_esf { #if defined(CONFIG_EXTRA_EXCEPTION_INFO) struct __extra_esf_info extra_info; #endif @@ -75,8 +75,6 @@ struct __esf { extern uint32_t z_arm_coredump_fault_sp; -typedef struct __esf z_arch_esf_t; - extern void z_arm_exc_exit(bool fatal); #ifdef __cplusplus diff --git a/include/zephyr/arch/arm/cortex_m/exception.h b/include/zephyr/arch/arm/cortex_m/exception.h index a9896cea1e4ed..2deed9bdf832d 100644 --- a/include/zephyr/arch/arm/cortex_m/exception.h +++ b/include/zephyr/arch/arm/cortex_m/exception.h @@ -98,7 +98,7 @@ struct __extra_esf_info { }; #endif /* CONFIG_EXTRA_EXCEPTION_INFO */ -struct __esf { +struct arch_esf { struct __basic_sf { sys_define_gpr_with_alias(a1, r0); sys_define_gpr_with_alias(a2, r1); @@ -119,8 +119,6 @@ struct __esf { extern uint32_t z_arm_coredump_fault_sp; -typedef struct __esf z_arch_esf_t; - extern void z_arm_exc_exit(void); #ifdef __cplusplus diff --git a/include/zephyr/arch/arm/gdbstub.h b/include/zephyr/arch/arm/gdbstub.h index e8e606d7def80..55fceff173d10 100644 --- a/include/zephyr/arch/arm/gdbstub.h +++ b/include/zephyr/arch/arm/gdbstub.h @@ -64,7 +64,7 @@ struct gdb_ctx { unsigned int registers[GDB_NUM_REGS]; }; -void z_gdb_entry(z_arch_esf_t *esf, unsigned int exc_cause); +void z_gdb_entry(struct arch_esf *esf, unsigned int exc_cause); #endif diff --git a/include/zephyr/arch/arm64/exception.h b/include/zephyr/arch/arm64/exception.h index 05257c087feda..a1348f608e3b9 100644 --- a/include/zephyr/arch/arm64/exception.h +++ b/include/zephyr/arch/arm64/exception.h @@ -24,7 +24,7 @@ extern "C" { #endif -struct __esf { +struct arch_esf { uint64_t x0; uint64_t x1; uint64_t x2; @@ -55,8 +55,6 @@ struct __esf { #endif } __aligned(16); -typedef struct __esf z_arch_esf_t; - #ifdef __cplusplus } #endif diff --git a/include/zephyr/arch/cpu.h b/include/zephyr/arch/cpu.h index 11abe8df8c0b5..1e107512fa2a4 100644 --- a/include/zephyr/arch/cpu.h +++ b/include/zephyr/arch/cpu.h @@ -31,8 +31,6 @@ #include #elif defined(CONFIG_SPARC) #include -#else -#error "Unknown Architecture" #endif #endif /* ZEPHYR_INCLUDE_ARCH_CPU_H_ */ diff --git a/include/zephyr/arch/exception.h b/include/zephyr/arch/exception.h new file mode 100644 index 0000000000000..074a5e0b0d71c --- /dev/null +++ b/include/zephyr/arch/exception.h @@ -0,0 +1,35 @@ +/* exception.h - automatically selects the correct exception.h file to include */ + +/* + * Copyright (c) 2024 Meta Platforms + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_ARCH_EXCEPTION_H_ +#define ZEPHYR_INCLUDE_ARCH_EXCEPTION_H_ + +#if defined(CONFIG_X86_64) +#include +#elif defined(CONFIG_X86) +#include +#elif defined(CONFIG_ARM64) +#include +#elif defined(CONFIG_ARM) +#include +#elif defined(CONFIG_ARC) +#include +#elif defined(CONFIG_NIOS2) +#include +#elif defined(CONFIG_RISCV) +#include +#elif defined(CONFIG_XTENSA) +#include +#elif defined(CONFIG_MIPS) +#include +#elif defined(CONFIG_ARCH_POSIX) +#include +#elif defined(CONFIG_SPARC) +#include +#endif + +#endif /* ZEPHYR_INCLUDE_ARCH_EXCEPTION_H_ */ diff --git a/include/zephyr/arch/mips/exception.h b/include/zephyr/arch/mips/exception.h index d4403f1d5995e..f33af4c4387d8 100644 --- a/include/zephyr/arch/mips/exception.h +++ b/include/zephyr/arch/mips/exception.h @@ -17,7 +17,7 @@ extern "C" { #endif -struct __esf { +struct arch_esf { unsigned long ra; /* return address */ unsigned long gp; /* global pointer */ @@ -50,8 +50,6 @@ struct __esf { unsigned long cause; }; -typedef struct __esf z_arch_esf_t; - #ifdef __cplusplus } #endif diff --git a/include/zephyr/arch/nios2/arch.h b/include/zephyr/arch/nios2/arch.h index 7df7ae9a91aa8..5369f690b5d37 100644 --- a/include/zephyr/arch/nios2/arch.h +++ b/include/zephyr/arch/nios2/arch.h @@ -17,6 +17,7 @@ #include #include +#include #include #include #include @@ -99,34 +100,11 @@ static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key) void arch_irq_enable(unsigned int irq); void arch_irq_disable(unsigned int irq); -struct __esf { - uint32_t ra; /* return address r31 */ - uint32_t r1; /* at */ - uint32_t r2; /* return value */ - uint32_t r3; /* return value */ - uint32_t r4; /* register args */ - uint32_t r5; /* register args */ - uint32_t r6; /* register args */ - uint32_t r7; /* register args */ - uint32_t r8; /* Caller-saved general purpose */ - uint32_t r9; /* Caller-saved general purpose */ - uint32_t r10; /* Caller-saved general purpose */ - uint32_t r11; /* Caller-saved general purpose */ - uint32_t r12; /* Caller-saved general purpose */ - uint32_t r13; /* Caller-saved general purpose */ - uint32_t r14; /* Caller-saved general purpose */ - uint32_t r15; /* Caller-saved general purpose */ - uint32_t estatus; - uint32_t instr; /* Instruction being executed when exc occurred */ -}; - -typedef struct __esf z_arch_esf_t; - FUNC_NORETURN void z_SysFatalErrorHandler(unsigned int reason, - const z_arch_esf_t *esf); + const struct arch_esf *esf); FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason, - const z_arch_esf_t *esf); + const struct arch_esf *esf); enum nios2_exception_cause { NIOS2_EXCEPTION_UNKNOWN = -1, diff --git a/include/zephyr/arch/nios2/exception.h b/include/zephyr/arch/nios2/exception.h new file mode 100644 index 0000000000000..223fa583114e6 --- /dev/null +++ b/include/zephyr/arch/nios2/exception.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_ARCH_NIOS2_EXPCEPTION_H_ +#define ZEPHYR_INCLUDE_ARCH_NIOS2_EXPCEPTION_H_ + +#ifndef _ASMLANGUAGE +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct arch_esf { + uint32_t ra; /* return address r31 */ + uint32_t r1; /* at */ + uint32_t r2; /* return value */ + uint32_t r3; /* return value */ + uint32_t r4; /* register args */ + uint32_t r5; /* register args */ + uint32_t r6; /* register args */ + uint32_t r7; /* register args */ + uint32_t r8; /* Caller-saved general purpose */ + uint32_t r9; /* Caller-saved general purpose */ + uint32_t r10; /* Caller-saved general purpose */ + uint32_t r11; /* Caller-saved general purpose */ + uint32_t r12; /* Caller-saved general purpose */ + uint32_t r13; /* Caller-saved general purpose */ + uint32_t r14; /* Caller-saved general purpose */ + uint32_t r15; /* Caller-saved general purpose */ + uint32_t estatus; + uint32_t instr; /* Instruction being executed when exc occurred */ +}; + +#ifdef __cplusplus +} +#endif + +#endif /* _ASMLANGUAGE */ + +#endif /* ZEPHYR_INCLUDE_ARCH_NIOS2_EXPCEPTION_H_ */ diff --git a/include/zephyr/arch/posix/arch.h b/include/zephyr/arch/posix/arch.h index 83aceb1c14ba1..7dbfb6b386450 100644 --- a/include/zephyr/arch/posix/arch.h +++ b/include/zephyr/arch/posix/arch.h @@ -22,6 +22,7 @@ #include #include +#include #include #include #include /* Each board must define this */ @@ -38,12 +39,6 @@ extern "C" { #define ARCH_STACK_PTR_ALIGN 4 #endif -struct __esf { - uint32_t dummy; /*maybe we will want to add something someday*/ -}; - -typedef struct __esf z_arch_esf_t; - extern uint32_t sys_clock_cycle_get_32(void); static inline uint32_t arch_k_cycle_get_32(void) diff --git a/include/zephyr/arch/posix/exception.h b/include/zephyr/arch/posix/exception.h new file mode 100644 index 0000000000000..6c7962aa05799 --- /dev/null +++ b/include/zephyr/arch/posix/exception.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2010-2014 Wind River Systems, Inc. + * Copyright (c) 2017 Oticon A/S + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_ARCH_POSIX_EXPCEPTION_H_ +#define ZEPHYR_INCLUDE_ARCH_POSIX_EXPCEPTION_H_ + +#ifndef _ASMLANGUAGE +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct arch_esf { + uint32_t dummy; /*maybe we will want to add something someday*/ +}; + +#ifdef __cplusplus +} +#endif + +#endif /* _ASMLANGUAGE */ + +#endif /* ZEPHYR_INCLUDE_ARCH_POSIX_EXPCEPTION_H_ */ diff --git a/include/zephyr/arch/riscv/arch.h b/include/zephyr/arch/riscv/arch.h index e7dcfbef3aed1..4cdedb700d326 100644 --- a/include/zephyr/arch/riscv/arch.h +++ b/include/zephyr/arch/riscv/arch.h @@ -48,12 +48,12 @@ */ #ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT #define Z_RISCV_STACK_GUARD_SIZE \ - Z_POW2_CEIL(MAX(sizeof(z_arch_esf_t) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \ + Z_POW2_CEIL(MAX(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \ Z_RISCV_STACK_PMP_ALIGN)) #define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_GUARD_SIZE #else #define Z_RISCV_STACK_GUARD_SIZE \ - ROUND_UP(sizeof(z_arch_esf_t) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \ + ROUND_UP(sizeof(struct arch_esf) + CONFIG_PMP_STACK_GUARD_MIN_SIZE, \ Z_RISCV_STACK_PMP_ALIGN) #define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_STACK_PMP_ALIGN #endif diff --git a/include/zephyr/arch/riscv/exception.h b/include/zephyr/arch/riscv/exception.h index 644df2cd1fbf9..097776227bb47 100644 --- a/include/zephyr/arch/riscv/exception.h +++ b/include/zephyr/arch/riscv/exception.h @@ -48,7 +48,7 @@ struct soc_esf { #if defined(CONFIG_RISCV_SOC_HAS_ISR_STACKING) SOC_ISR_STACKING_ESF_DECLARE; #else -struct __esf { +struct arch_esf { unsigned long ra; /* return address */ unsigned long t0; /* Caller-saved temporary register */ @@ -87,7 +87,6 @@ struct __esf { } __aligned(16); #endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */ -typedef struct __esf z_arch_esf_t; #ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE typedef struct soc_esf soc_esf_t; #endif diff --git a/include/zephyr/arch/sparc/arch.h b/include/zephyr/arch/sparc/arch.h index df3c40e4ba4a3..c1d87682f39bb 100644 --- a/include/zephyr/arch/sparc/arch.h +++ b/include/zephyr/arch/sparc/arch.h @@ -14,6 +14,7 @@ #ifndef ZEPHYR_INCLUDE_ARCH_SPARC_ARCH_H_ #define ZEPHYR_INCLUDE_ARCH_SPARC_ARCH_H_ +#include #include #include #include @@ -107,19 +108,6 @@ static inline uint64_t arch_k_cycle_get_64(void) return sys_clock_cycle_get_64(); } -struct __esf { - uint32_t out[8]; - uint32_t global[8]; - uint32_t psr; - uint32_t pc; - uint32_t npc; - uint32_t wim; - uint32_t tbr; - uint32_t y; -}; - -typedef struct __esf z_arch_esf_t; - #define ARCH_EXCEPT(reason_p) \ do { \ register uint32_t _g1 __asm__("g1") = reason_p; \ diff --git a/include/zephyr/arch/sparc/exception.h b/include/zephyr/arch/sparc/exception.h new file mode 100644 index 0000000000000..a2d3fae52e203 --- /dev/null +++ b/include/zephyr/arch/sparc/exception.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2019-2020 Cobham Gaisler AB + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_ARCH_SPARC_EXPCEPTION_H_ +#define ZEPHYR_INCLUDE_ARCH_SPARC_EXPCEPTION_H_ + +#ifndef _ASMLANGUAGE +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct arch_esf { + uint32_t out[8]; + uint32_t global[8]; + uint32_t psr; + uint32_t pc; + uint32_t npc; + uint32_t wim; + uint32_t tbr; + uint32_t y; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* _ASMLANGUAGE */ + +#endif /* ZEPHYR_INCLUDE_ARCH_SPARC_EXPCEPTION_H_ */ diff --git a/include/zephyr/arch/syscall.h b/include/zephyr/arch/syscall.h index 5b41561b68190..966f6003098ba 100644 --- a/include/zephyr/arch/syscall.h +++ b/include/zephyr/arch/syscall.h @@ -9,12 +9,10 @@ #ifndef ZEPHYR_INCLUDE_ARCH_SYSCALL_H_ #define ZEPHYR_INCLUDE_ARCH_SYSCALL_H_ -#if defined(CONFIG_X86) #if defined(CONFIG_X86_64) #include -#else +#elif defined(CONFIG_X86) #include -#endif #elif defined(CONFIG_ARM64) #include #elif defined(CONFIG_ARM) diff --git a/include/zephyr/arch/x86/ia32/arch.h b/include/zephyr/arch/x86/ia32/arch.h index 490dfdb40f334..b82e0db0f1733 100644 --- a/include/zephyr/arch/x86/ia32/arch.h +++ b/include/zephyr/arch/x86/ia32/arch.h @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -333,53 +334,6 @@ static inline void arch_isr_direct_footer(int swap) static inline int name##_body(void) #endif /* !CONFIG_X86_KPTI */ -/** - * @brief Exception Stack Frame - * - * A pointer to an "exception stack frame" (ESF) is passed as an argument - * to exception handlers registered via nanoCpuExcConnect(). As the system - * always operates at ring 0, only the EIP, CS and EFLAGS registers are pushed - * onto the stack when an exception occurs. - * - * The exception stack frame includes the volatile registers (EAX, ECX, and - * EDX) as well as the 5 non-volatile registers (EDI, ESI, EBX, EBP and ESP). - * Those registers are pushed onto the stack by _ExcEnt(). - */ - -typedef struct nanoEsf { -#ifdef CONFIG_GDBSTUB - unsigned int ss; - unsigned int gs; - unsigned int fs; - unsigned int es; - unsigned int ds; -#endif - unsigned int esp; - unsigned int ebp; - unsigned int ebx; - unsigned int esi; - unsigned int edi; - unsigned int edx; - unsigned int eax; - unsigned int ecx; - unsigned int errorCode; - unsigned int eip; - unsigned int cs; - unsigned int eflags; -} z_arch_esf_t; - -extern unsigned int z_x86_exception_vector; - -struct _x86_syscall_stack_frame { - uint32_t eip; - uint32_t cs; - uint32_t eflags; - - /* These are only present if cs = USER_CODE_SEG */ - uint32_t esp; - uint32_t ss; -}; - static ALWAYS_INLINE unsigned int arch_irq_lock(void) { unsigned int key; diff --git a/include/zephyr/arch/x86/ia32/exception.h b/include/zephyr/arch/x86/ia32/exception.h new file mode 100644 index 0000000000000..de618f4e01d43 --- /dev/null +++ b/include/zephyr/arch/x86/ia32/exception.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2010-2014 Wind River Systems, Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_ARCH_X86_IA32_EXPCEPTION_H_ +#define ZEPHYR_INCLUDE_ARCH_X86_IA32_EXPCEPTION_H_ + +#ifndef _ASMLANGUAGE +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Exception Stack Frame + * + * A pointer to an "exception stack frame" (ESF) is passed as an argument + * to exception handlers registered via nanoCpuExcConnect(). As the system + * always operates at ring 0, only the EIP, CS and EFLAGS registers are pushed + * onto the stack when an exception occurs. + * + * The exception stack frame includes the volatile registers (EAX, ECX, and + * EDX) as well as the 5 non-volatile registers (EDI, ESI, EBX, EBP and ESP). + * Those registers are pushed onto the stack by _ExcEnt(). + */ + +struct arch_esf { +#ifdef CONFIG_GDBSTUB + unsigned int ss; + unsigned int gs; + unsigned int fs; + unsigned int es; + unsigned int ds; +#endif + unsigned int esp; + unsigned int ebp; + unsigned int ebx; + unsigned int esi; + unsigned int edi; + unsigned int edx; + unsigned int eax; + unsigned int ecx; + unsigned int errorCode; + unsigned int eip; + unsigned int cs; + unsigned int eflags; +}; + +extern unsigned int z_x86_exception_vector; + +struct _x86_syscall_stack_frame { + uint32_t eip; + uint32_t cs; + uint32_t eflags; + + /* These are only present if cs = USER_CODE_SEG */ + uint32_t esp; + uint32_t ss; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* _ASMLANGUAGE */ + +#endif /* ZEPHYR_INCLUDE_ARCH_X86_IA32_EXPCEPTION_H_ */ diff --git a/include/zephyr/arch/x86/intel64/arch.h b/include/zephyr/arch/x86/intel64/arch.h index c176e4e0bb0ba..86de01297f472 100644 --- a/include/zephyr/arch/x86/intel64/arch.h +++ b/include/zephyr/arch/x86/intel64/arch.h @@ -6,6 +6,7 @@ #ifndef ZEPHYR_INCLUDE_ARCH_X86_INTEL64_ARCH_H_ #define ZEPHYR_INCLUDE_ARCH_X86_INTEL64_ARCH_H_ +#include #include #include #if defined(CONFIG_PCIE) && !defined(_ASMLANGUAGE) @@ -52,61 +53,6 @@ static ALWAYS_INLINE unsigned int arch_irq_lock(void) return (unsigned int) key; } -/* - * the exception stack frame - */ - -struct x86_esf { -#ifdef CONFIG_EXCEPTION_DEBUG - /* callee-saved */ - unsigned long rbx; - unsigned long r12; - unsigned long r13; - unsigned long r14; - unsigned long r15; -#endif /* CONFIG_EXCEPTION_DEBUG */ - unsigned long rbp; - - /* Caller-saved regs */ - unsigned long rax; - unsigned long rcx; - unsigned long rdx; - unsigned long rsi; - unsigned long rdi; - unsigned long r8; - unsigned long r9; - unsigned long r10; - /* Must be aligned 16 bytes from the end of this struct due to - * requirements of 'fxsave (%rsp)' - */ - char fxsave[X86_FXSAVE_SIZE]; - unsigned long r11; - - /* Pushed by CPU or assembly stub */ - unsigned long vector; - unsigned long code; - unsigned long rip; - unsigned long cs; - unsigned long rflags; - unsigned long rsp; - unsigned long ss; -}; - -typedef struct x86_esf z_arch_esf_t; - -struct x86_ssf { - unsigned long rip; - unsigned long rflags; - unsigned long r10; - unsigned long r9; - unsigned long r8; - unsigned long rdx; - unsigned long rsi; - char fxsave[X86_FXSAVE_SIZE]; - unsigned long rdi; - unsigned long rsp; -}; - #define ARCH_EXCEPT(reason_p) do { \ __asm__ volatile( \ "movq %[reason], %%rax\n\t" \ diff --git a/include/zephyr/arch/x86/intel64/exception.h b/include/zephyr/arch/x86/intel64/exception.h new file mode 100644 index 0000000000000..55c7cc2b4ee84 --- /dev/null +++ b/include/zephyr/arch/x86/intel64/exception.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2019 Intel Corp. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_ARCH_X86_INTEL64_EXPCEPTION_H_ +#define ZEPHYR_INCLUDE_ARCH_X86_INTEL64_EXPCEPTION_H_ + +#ifndef _ASMLANGUAGE +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * the exception stack frame + */ + +struct arch_esf { +#ifdef CONFIG_EXCEPTION_DEBUG + /* callee-saved */ + unsigned long rbx; + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; +#endif /* CONFIG_EXCEPTION_DEBUG */ + unsigned long rbp; + + /* Caller-saved regs */ + unsigned long rax; + unsigned long rcx; + unsigned long rdx; + unsigned long rsi; + unsigned long rdi; + unsigned long r8; + unsigned long r9; + unsigned long r10; + /* Must be aligned 16 bytes from the end of this struct due to + * requirements of 'fxsave (%rsp)' + */ + char fxsave[X86_FXSAVE_SIZE]; + unsigned long r11; + + /* Pushed by CPU or assembly stub */ + unsigned long vector; + unsigned long code; + unsigned long rip; + unsigned long cs; + unsigned long rflags; + unsigned long rsp; + unsigned long ss; +}; + +struct x86_ssf { + unsigned long rip; + unsigned long rflags; + unsigned long r10; + unsigned long r9; + unsigned long r8; + unsigned long rdx; + unsigned long rsi; + char fxsave[X86_FXSAVE_SIZE]; + unsigned long rdi; + unsigned long rsp; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* _ASMLANGUAGE */ + +#endif /* ZEPHYR_INCLUDE_ARCH_X86_INTEL64_EXPCEPTION_H_ */ diff --git a/include/zephyr/arch/xtensa/exception.h b/include/zephyr/arch/xtensa/exception.h index 51a5d5aef9036..acc6d4a30413b 100644 --- a/include/zephyr/arch/xtensa/exception.h +++ b/include/zephyr/arch/xtensa/exception.h @@ -25,7 +25,9 @@ extern "C" { * register windows are in use. This isn't a struct type, it just * matches the register/stack-unit width. */ -typedef int z_arch_esf_t; +struct arch_esf { + int dummy; +}; #endif diff --git a/include/zephyr/bluetooth/hci_vs.h b/include/zephyr/bluetooth/hci_vs.h index 3561e20e9aa58..dabbe2902eaf9 100644 --- a/include/zephyr/bluetooth/hci_vs.h +++ b/include/zephyr/bluetooth/hci_vs.h @@ -451,7 +451,7 @@ struct bt_hci_evt_mesh_scanning_report { struct bt_hci_evt_mesh_scan_report reports[0]; } __packed; -struct net_buf *hci_vs_err_stack_frame(unsigned int reason, const z_arch_esf_t *esf); +struct net_buf *hci_vs_err_stack_frame(unsigned int reason, const struct arch_esf *esf); struct net_buf *hci_vs_err_trace(const char *file, uint32_t line, uint64_t pc); struct net_buf *hci_vs_err_assert(const char *file, uint32_t line); diff --git a/include/zephyr/debug/coredump.h b/include/zephyr/debug/coredump.h index 9d4f37a4f870f..f61f4e94a38b7 100644 --- a/include/zephyr/debug/coredump.h +++ b/include/zephyr/debug/coredump.h @@ -232,7 +232,7 @@ struct coredump_backend_api { coredump_backend_cmd_t cmd; }; -void coredump(unsigned int reason, const z_arch_esf_t *esf, +void coredump(unsigned int reason, const struct arch_esf *esf, struct k_thread *thread); void coredump_memory_dump(uintptr_t start_addr, uintptr_t end_addr); void coredump_buffer_output(uint8_t *buf, size_t buflen); @@ -242,7 +242,7 @@ int coredump_cmd(enum coredump_cmd_id cmd_id, void *arg); #else -static inline void coredump(unsigned int reason, const z_arch_esf_t *esf, +static inline void coredump(unsigned int reason, const struct arch_esf *esf, struct k_thread *thread) { ARG_UNUSED(reason); @@ -279,7 +279,7 @@ static inline int coredump_cmd(enum coredump_cmd_id query_id, void *arg) #endif /* CONFIG_DEBUG_COREDUMP */ /** - * @fn void coredump(unsigned int reason, const z_arch_esf_t *esf, struct k_thread *thread); + * @fn void coredump(unsigned int reason, const struct arch_esf *esf, struct k_thread *thread); * @brief Perform coredump. * * Normally, this is called inside z_fatal_error() to generate coredump diff --git a/include/zephyr/fatal.h b/include/zephyr/fatal.h index be3dd2078c5cd..0fa1e93363ed4 100644 --- a/include/zephyr/fatal.h +++ b/include/zephyr/fatal.h @@ -12,6 +12,7 @@ #define ZEPHYR_INCLUDE_FATAL_H #include +#include #include #include @@ -64,7 +65,7 @@ FUNC_NORETURN void k_fatal_halt(unsigned int reason); * @param esf Exception context, with details and partial or full register * state when the error occurred. May in some cases be NULL. */ -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf); +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *esf); /** * Called by architecture code upon a fatal error. @@ -80,7 +81,7 @@ void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf); * @param esf Exception context, with details and partial or full register * state when the error occurred. May in some cases be NULL. */ -void z_fatal_error(unsigned int reason, const z_arch_esf_t *esf); +void z_fatal_error(unsigned int reason, const struct arch_esf *esf); /** @} */ diff --git a/kernel/fatal.c b/kernel/fatal.c index caee224bf5f63..06966066e3020 100644 --- a/kernel/fatal.c +++ b/kernel/fatal.c @@ -35,7 +35,7 @@ FUNC_NORETURN __weak void arch_system_halt(unsigned int reason) /* LCOV_EXCL_START */ __weak void k_sys_fatal_error_handler(unsigned int reason, - const z_arch_esf_t *esf) + const struct arch_esf *esf) { ARG_UNUSED(esf); @@ -82,7 +82,7 @@ FUNC_NORETURN void k_fatal_halt(unsigned int reason) } /* LCOV_EXCL_STOP */ -void z_fatal_error(unsigned int reason, const z_arch_esf_t *esf) +void z_fatal_error(unsigned int reason, const struct arch_esf *esf) { /* We can't allow this code to be preempted, but don't need to * synchronize between CPUs, so an arch-layer lock is diff --git a/kernel/include/gen_offset.h b/kernel/include/gen_offset.h index bdc9f785809a6..46a651cf28da7 100644 --- a/kernel/include/gen_offset.h +++ b/kernel/include/gen_offset.h @@ -84,7 +84,13 @@ #define GEN_OFFSET_SYM(S, M) \ GEN_ABSOLUTE_SYM(__##S##_##M##_##OFFSET, offsetof(S, M)) +#define GEN_OFFSET_STRUCT(S, M) \ + GEN_ABSOLUTE_SYM(__struct_##S##_##M##_##OFFSET, offsetof(struct S, M)) + #define GEN_NAMED_OFFSET_SYM(S, M, N) \ GEN_ABSOLUTE_SYM(__##S##_##N##_##OFFSET, offsetof(S, M)) +#define GEN_NAMED_OFFSET_STRUCT(S, M, N) \ + GEN_ABSOLUTE_SYM(__struct_##S##_##N##_##OFFSET, offsetof(struct S, M)) + #endif /* ZEPHYR_KERNEL_INCLUDE_GEN_OFFSET_H_ */ diff --git a/kernel/include/kernel_arch_interface.h b/kernel/include/kernel_arch_interface.h index 9860070542edf..f12a9d3577b20 100644 --- a/kernel/include/kernel_arch_interface.h +++ b/kernel/include/kernel_arch_interface.h @@ -583,7 +583,7 @@ static inline void arch_nop(void); * * @param esf Exception Stack Frame (arch-specific) */ -void arch_coredump_info_dump(const z_arch_esf_t *esf); +void arch_coredump_info_dump(const struct arch_esf *esf); /** * @brief Get the target code specified by the architecture. diff --git a/samples/bluetooth/hci_ipc/src/main.c b/samples/bluetooth/hci_ipc/src/main.c index 814001646e46c..6e1f275ffd03c 100644 --- a/samples/bluetooth/hci_ipc/src/main.c +++ b/samples/bluetooth/hci_ipc/src/main.c @@ -320,7 +320,7 @@ void bt_ctlr_assert_handle(char *file, uint32_t line) #endif /* CONFIG_BT_CTLR_ASSERT_HANDLER */ #if defined(CONFIG_BT_HCI_VS_FATAL_ERROR) -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *esf) { /* Disable interrupts, this is unrecoverable */ (void)irq_lock(); diff --git a/samples/subsys/llext/edk/app/src/main.c b/samples/subsys/llext/edk/app/src/main.c index 502901d59bdd0..78f1a664661a0 100644 --- a/samples/subsys/llext/edk/app/src/main.c +++ b/samples/subsys/llext/edk/app/src/main.c @@ -100,7 +100,7 @@ static void user_function(void *p1, void *p2, void *p3) printk("[app]Thread %p done\n", k_current_get()); } -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *esf) { int i; diff --git a/scripts/coredump/gdbstubs/arch/arm64.py b/scripts/coredump/gdbstubs/arch/arm64.py index 008fb88c1b963..7f4c03eb375ea 100644 --- a/scripts/coredump/gdbstubs/arch/arm64.py +++ b/scripts/coredump/gdbstubs/arch/arm64.py @@ -95,7 +95,7 @@ def parse_arch_data_block(self): self.registers[RegNum.X17] = tu[17] self.registers[RegNum.X18] = tu[18] - # Callee saved registers are not provided in __esf structure + # Callee saved registers are not provided in arch_esf structure # So they will be omitted (set to undefined) when stub generates the # packet in handle_register_group_read_packet. diff --git a/soc/nordic/common/vpr/soc_isr_stacking.h b/soc/nordic/common/vpr/soc_isr_stacking.h index ff34f9d4e09a6..8d2f64ad7291f 100644 --- a/soc/nordic/common/vpr/soc_isr_stacking.h +++ b/soc/nordic/common/vpr/soc_isr_stacking.h @@ -17,7 +17,7 @@ #if DT_PROP(VPR_CPU, nordic_bus_width) == 64 #define SOC_ISR_STACKING_ESF_DECLARE \ - struct __esf { \ + struct arch_esf { \ unsigned long s0; \ unsigned long mstatus; \ unsigned long tp; \ @@ -40,7 +40,7 @@ #else /* DT_PROP(VPR_CPU, nordic_bus_width) == 32 */ #define SOC_ISR_STACKING_ESF_DECLARE \ - struct __esf { \ + struct arch_esf { \ unsigned long s0; \ unsigned long mstatus; \ unsigned long tp; \ @@ -73,7 +73,7 @@ /* * Size of the SW managed part of the ESF in case of exception */ -#define ESF_SW_EXC_SIZEOF (__z_arch_esf_t_SIZEOF - ESF_HW_SIZEOF) +#define ESF_SW_EXC_SIZEOF (__struct_arch_esf_SIZEOF - ESF_HW_SIZEOF) /* * Size of the SW managed part of the ESF in case of interrupt @@ -90,17 +90,17 @@ #define MEPC_SP_ALIGN_BIT_MASK (0x1UL) #define STORE_SP_ALIGN_BIT_FROM_MEPC \ - addi t1, sp, __z_arch_esf_t_soc_context_OFFSET; \ - lr t0, __z_arch_esf_t_mepc_OFFSET(sp); \ + addi t1, sp, __struct_arch_esf_soc_context_OFFSET; \ + lr t0, __struct_arch_esf_mepc_OFFSET(sp); \ andi t0, t0, MEPC_SP_ALIGN_BIT_MASK; \ sr t0, __soc_esf_t_sp_align_OFFSET(t1) #define RESTORE_SP_ALIGN_BIT_TO_MEPC \ - addi t1, sp, __z_arch_esf_t_soc_context_OFFSET; \ + addi t1, sp, __struct_arch_esf_soc_context_OFFSET; \ lr t0, __soc_esf_t_sp_align_OFFSET(t1); \ - lr t1, __z_arch_esf_t_mepc_OFFSET(sp); \ + lr t1, __struct_arch_esf_mepc_OFFSET(sp); \ or t2, t1, t0; \ - sr t2, __z_arch_esf_t_mepc_OFFSET(sp) + sr t2, __struct_arch_esf_mepc_OFFSET(sp) #define SOC_ISR_SW_STACKING \ csrw mscratch, t0; \ diff --git a/submanifests/optional.yaml b/submanifests/optional.yaml index db5f6c392b8f1..8da441bf05ab0 100644 --- a/submanifests/optional.yaml +++ b/submanifests/optional.yaml @@ -34,7 +34,7 @@ manifest: groups: - optional - name: sof - revision: c11a3185afbc8e1b2a79916de3dfefaf326d9ad1 + revision: a44758883f3f6cfb6c67b19bc76fcb01f77ca50b path: modules/audio/sof remote: upstream groups: diff --git a/subsys/bluetooth/controller/hci/hci.c b/subsys/bluetooth/controller/hci/hci.c index 0edc10310cb0f..ac7c55613ad1f 100644 --- a/subsys/bluetooth/controller/hci/hci.c +++ b/subsys/bluetooth/controller/hci/hci.c @@ -5001,7 +5001,7 @@ NET_BUF_POOL_FIXED_DEFINE(vs_err_tx_pool, 1, BT_BUF_EVT_RX_SIZE, typedef struct bt_hci_vs_fata_error_cpu_data_cortex_m bt_hci_vs_fatal_error_cpu_data; static void vs_err_fatal_cpu_data_fill(bt_hci_vs_fatal_error_cpu_data *cpu_data, - const z_arch_esf_t *esf) + const struct arch_esf *esf) { cpu_data->a1 = sys_cpu_to_le32(esf->basic.a1); cpu_data->a2 = sys_cpu_to_le32(esf->basic.a2); @@ -5036,7 +5036,7 @@ static struct net_buf *vs_err_evt_create(uint8_t subevt, uint8_t len) return buf; } -struct net_buf *hci_vs_err_stack_frame(unsigned int reason, const z_arch_esf_t *esf) +struct net_buf *hci_vs_err_stack_frame(unsigned int reason, const struct arch_esf *esf) { /* Prepare vendor specific HCI Fatal Error event */ struct bt_hci_vs_fatal_error_stack_frame *sf; diff --git a/subsys/debug/coredump/coredump_core.c b/subsys/debug/coredump/coredump_core.c index 71a3880006a92..e939a81ea29b5 100644 --- a/subsys/debug/coredump/coredump_core.c +++ b/subsys/debug/coredump/coredump_core.c @@ -117,7 +117,7 @@ void process_memory_region_list(void) #endif } -void coredump(unsigned int reason, const z_arch_esf_t *esf, +void coredump(unsigned int reason, const struct arch_esf *esf, struct k_thread *thread) { z_coredump_start(); diff --git a/subsys/testsuite/ztest/include/zephyr/ztest.h b/subsys/testsuite/ztest/include/zephyr/ztest.h index 52d686756c64e..1c58b2daec664 100644 --- a/subsys/testsuite/ztest/include/zephyr/ztest.h +++ b/subsys/testsuite/ztest/include/zephyr/ztest.h @@ -28,8 +28,7 @@ #ifdef __cplusplus extern "C" { #endif -struct esf; -typedef struct esf z_arch_esf_t; +struct arch_esf; #ifdef __cplusplus } #endif diff --git a/subsys/testsuite/ztest/include/zephyr/ztest_error_hook.h b/subsys/testsuite/ztest/include/zephyr/ztest_error_hook.h index 5d6ac64a9c1bd..e4a5937d13bd3 100644 --- a/subsys/testsuite/ztest/include/zephyr/ztest_error_hook.h +++ b/subsys/testsuite/ztest/include/zephyr/ztest_error_hook.h @@ -38,7 +38,7 @@ __syscall void ztest_set_fault_valid(bool valid); * By default, it will do nothing before leaving error handler. */ void ztest_post_fatal_error_hook(unsigned int reason, - const z_arch_esf_t *pEsf); + const struct arch_esf *pEsf); #endif diff --git a/subsys/testsuite/ztest/src/ztest_error_hook.c b/subsys/testsuite/ztest/src/ztest_error_hook.c index 6982e96c7c552..e910dc33c3151 100644 --- a/subsys/testsuite/ztest/src/ztest_error_hook.c +++ b/subsys/testsuite/ztest/src/ztest_error_hook.c @@ -42,11 +42,11 @@ static inline void z_vrfy_ztest_set_fault_valid(bool valid) #endif __weak void ztest_post_fatal_error_hook(unsigned int reason, - const z_arch_esf_t *pEsf) + const struct arch_esf *pEsf) { } -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { k_tid_t curr_tid = k_current_get(); bool valid_fault = (curr_tid == valid_fault_tid) || fault_in_isr; diff --git a/tests/arch/arm/arm_hardfault_validation/src/arm_hardfault.c b/tests/arch/arm/arm_hardfault_validation/src/arm_hardfault.c index 1e7ac1ef71f84..d7b4b509f1721 100644 --- a/tests/arch/arm/arm_hardfault_validation/src/arm_hardfault.c +++ b/tests/arch/arm/arm_hardfault_validation/src/arm_hardfault.c @@ -13,7 +13,7 @@ static volatile int expected_reason = -1; -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { static bool triggered_synchronous_svc; diff --git a/tests/arch/arm/arm_interrupt/src/arm_interrupt.c b/tests/arch/arm/arm_interrupt/src/arm_interrupt.c index b569bbe1678a3..3891c9b1d41c2 100644 --- a/tests/arch/arm/arm_interrupt/src/arm_interrupt.c +++ b/tests/arch/arm/arm_interrupt/src/arm_interrupt.c @@ -24,7 +24,7 @@ static struct k_thread esf_collection_thread; /** * Validates that pEsf matches state from set_regs_with_known_pattern() */ -static int check_esf_matches_expectations(const z_arch_esf_t *pEsf) +static int check_esf_matches_expectations(const struct arch_esf *pEsf) { const uint16_t expected_fault_instruction = 0xde5a; /* udf #90 */ const bool caller_regs_match_expected = @@ -74,7 +74,7 @@ static int check_esf_matches_expectations(const z_arch_esf_t *pEsf) * is overwritten in fault.c) */ if (memcmp((void *)callee_regs->psp, pEsf, - offsetof(struct __esf, basic.xpsr)) != 0) { + offsetof(struct arch_esf, basic.xpsr)) != 0) { printk("psp does not match __basic_sf provided\n"); return -1; } @@ -88,7 +88,7 @@ static int check_esf_matches_expectations(const z_arch_esf_t *pEsf) return 0; } -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { TC_PRINT("Caught system error -- reason %d\n", reason); diff --git a/tests/arch/arm/arm_no_multithreading/src/main.c b/tests/arch/arm/arm_no_multithreading/src/main.c index a1b5c67a28f43..c9e4cdd02f329 100644 --- a/tests/arch/arm/arm_no_multithreading/src/main.c +++ b/tests/arch/arm/arm_no_multithreading/src/main.c @@ -36,7 +36,7 @@ void arm_isr_handler(const void *args) test_flag++; } -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { printk("Caught system error -- reason %d\n", reason); diff --git a/tests/arch/x86/nmi/src/main.c b/tests/arch/x86/nmi/src/main.c index 852e949c1150f..55a5bc9973400 100644 --- a/tests/arch/x86/nmi/src/main.c +++ b/tests/arch/x86/nmi/src/main.c @@ -35,7 +35,7 @@ uint8_t *nmi_stacks[] = { #endif }; -bool z_x86_do_kernel_nmi(const z_arch_esf_t *esf) +bool z_x86_do_kernel_nmi(const struct arch_esf *esf) { uint64_t stack; diff --git a/tests/arch/x86/static_idt/src/main.c b/tests/arch/x86/static_idt/src/main.c index 79a25d1886672..b260b0bbe7ed0 100644 --- a/tests/arch/x86/static_idt/src/main.c +++ b/tests/arch/x86/static_idt/src/main.c @@ -47,7 +47,7 @@ static volatile int int_handler_executed; /* Assume the spurious interrupt handler will execute and abort the task */ static volatile int spur_handler_aborted_thread = 1; -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *esf) { if (reason != K_ERR_SPURIOUS_IRQ) { printk("wrong error reason\n"); @@ -89,7 +89,7 @@ void isr_handler(void) * */ -void exc_divide_error_handler(z_arch_esf_t *p_esf) +void exc_divide_error_handler(struct arch_esf *p_esf) { p_esf->eip += 2; /* provide evidence that the handler executed */ diff --git a/tests/bluetooth/audio/mocks/src/fatal.c b/tests/bluetooth/audio/mocks/src/fatal.c index f36f518b34573..0793de62267d1 100644 --- a/tests/bluetooth/audio/mocks/src/fatal.c +++ b/tests/bluetooth/audio/mocks/src/fatal.c @@ -7,7 +7,7 @@ #include #include -void z_fatal_error(unsigned int reason, const z_arch_esf_t *esf) +void z_fatal_error(unsigned int reason, const struct arch_esf *esf) { ztest_test_fail(); } diff --git a/tests/drivers/coredump/coredump_api/src/main.c b/tests/drivers/coredump/coredump_api/src/main.c index ddac6d0458527..66797add42b28 100644 --- a/tests/drivers/coredump/coredump_api/src/main.c +++ b/tests/drivers/coredump/coredump_api/src/main.c @@ -32,7 +32,7 @@ static struct coredump_mem_region_node dump_region0 = { .size = sizeof(values_to_dump) }; -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { ARG_UNUSED(pEsf); diff --git a/tests/kernel/fatal/exception/src/main.c b/tests/kernel/fatal/exception/src/main.c index 8d2cff2b1d9ef..a946d02de7f9e 100644 --- a/tests/kernel/fatal/exception/src/main.c +++ b/tests/kernel/fatal/exception/src/main.c @@ -50,7 +50,7 @@ volatile int rv; static ZTEST_DMEM volatile int expected_reason = -1; -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { TC_PRINT("Caught system error -- reason %d\n", reason); diff --git a/tests/kernel/fatal/message_capture/src/main.c b/tests/kernel/fatal/message_capture/src/main.c index 820a28216f9f8..514f3ea6e55c9 100644 --- a/tests/kernel/fatal/message_capture/src/main.c +++ b/tests/kernel/fatal/message_capture/src/main.c @@ -12,7 +12,7 @@ static volatile int expected_reason = -1; void z_thread_essential_clear(struct k_thread *thread); -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { printk("Caught system error -- reason %d\n", reason); diff --git a/tests/kernel/fatal/no-multithreading/src/main.c b/tests/kernel/fatal/no-multithreading/src/main.c index 22f2987c29180..c48ef3d9fd535 100644 --- a/tests/kernel/fatal/no-multithreading/src/main.c +++ b/tests/kernel/fatal/no-multithreading/src/main.c @@ -13,7 +13,7 @@ static ZTEST_DMEM volatile int expected_reason = -1; -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { int rv = TC_PASS; diff --git a/tests/kernel/mem_protect/demand_paging/src/main.c b/tests/kernel/mem_protect/demand_paging/src/main.c index fab82b68f7da8..5df3442b8f979 100644 --- a/tests/kernel/mem_protect/demand_paging/src/main.c +++ b/tests/kernel/mem_protect/demand_paging/src/main.c @@ -64,7 +64,7 @@ __pinned_bss static bool expect_fault; __pinned_func -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { printk("Caught system error -- reason %d\n", reason); diff --git a/tests/kernel/mem_protect/mem_map/src/main.c b/tests/kernel/mem_protect/mem_map/src/main.c index 46fa5d4543d51..3bbc1cd6add57 100644 --- a/tests/kernel/mem_protect/mem_map/src/main.c +++ b/tests/kernel/mem_protect/mem_map/src/main.c @@ -33,7 +33,7 @@ volatile bool expect_fault; __pinned_noinit static uint8_t __aligned(CONFIG_MMU_PAGE_SIZE) test_page[TEST_PAGE_SZ]; -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { printk("Caught system error -- reason %d\n", reason); diff --git a/tests/kernel/mem_protect/mem_protect/src/common.c b/tests/kernel/mem_protect/mem_protect/src/common.c index a0addcbda55b0..93febc8b9a1a4 100644 --- a/tests/kernel/mem_protect/mem_protect/src/common.c +++ b/tests/kernel/mem_protect/mem_protect/src/common.c @@ -8,7 +8,7 @@ ZTEST_BMEM volatile bool valid_fault; -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { printk("Caught system error -- reason %d %d\n", reason, valid_fault); if (valid_fault) { diff --git a/tests/kernel/mem_protect/protection/src/main.c b/tests/kernel/mem_protect/protection/src/main.c index 2f66edcdb90a9..7c72700d98754 100644 --- a/tests/kernel/mem_protect/protection/src/main.c +++ b/tests/kernel/mem_protect/protection/src/main.c @@ -28,7 +28,7 @@ #define INFO(fmt, ...) printk(fmt, ##__VA_ARGS__) -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { INFO("Caught system error -- reason %d\n", reason); ztest_test_pass(); diff --git a/tests/kernel/mem_protect/stackprot/src/main.c b/tests/kernel/mem_protect/stackprot/src/main.c index cb927670eaa1f..6ebbe2deb8980 100644 --- a/tests/kernel/mem_protect/stackprot/src/main.c +++ b/tests/kernel/mem_protect/stackprot/src/main.c @@ -15,7 +15,7 @@ ZTEST_BMEM static int count; ZTEST_BMEM static int ret = TC_PASS; -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *esf) { if (reason != K_ERR_STACK_CHK_FAIL) { printk("wrong error type\n"); diff --git a/tests/kernel/mem_protect/sys_sem/src/main.c b/tests/kernel/mem_protect/sys_sem/src/main.c index f7bcbfd21d0c4..c5aeeee51fc46 100644 --- a/tests/kernel/mem_protect/sys_sem/src/main.c +++ b/tests/kernel/mem_protect/sys_sem/src/main.c @@ -568,7 +568,7 @@ ZTEST_USER(sys_sem_1cpu, test_sem_multiple_threads_wait) * @} */ -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { printk("Caught system error -- reason %d\n", reason); printk("Unexpected fault during test\n"); diff --git a/tests/kernel/mem_protect/syscalls/src/main.c b/tests/kernel/mem_protect/syscalls/src/main.c index 7d2b0568f96d0..2f92257f70eeb 100644 --- a/tests/kernel/mem_protect/syscalls/src/main.c +++ b/tests/kernel/mem_protect/syscalls/src/main.c @@ -36,7 +36,7 @@ char kernel_string[BUF_SIZE]; char kernel_buf[BUF_SIZE]; ZTEST_BMEM char user_string[BUF_SIZE]; -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { printk("Caught system error -- reason %d\n", reason); printk("Unexpected fault during test\n"); diff --git a/tests/kernel/mem_protect/userspace/src/main.c b/tests/kernel/mem_protect/userspace/src/main.c index 98510461e220c..4c88d6e90d54d 100644 --- a/tests/kernel/mem_protect/userspace/src/main.c +++ b/tests/kernel/mem_protect/userspace/src/main.c @@ -76,7 +76,7 @@ static void set_fault(unsigned int reason) compiler_barrier(); } -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { INFO("Caught system error -- reason %d\n", reason); diff --git a/tests/kernel/mutex/mutex_error_case/src/test_mutex_error.c b/tests/kernel/mutex/mutex_error_case/src/test_mutex_error.c index d1ba055772770..ea4210ff3d129 100644 --- a/tests/kernel/mutex/mutex_error_case/src/test_mutex_error.c +++ b/tests/kernel/mutex/mutex_error_case/src/test_mutex_error.c @@ -38,7 +38,7 @@ extern struct k_sem offload_sem; /* A call back function which is hooked in default assert handler. */ void ztest_post_fatal_error_hook(unsigned int reason, - const z_arch_esf_t *pEsf) + const struct arch_esf *pEsf) { /* check if expected error */ diff --git a/tests/kernel/pipe/pipe/src/test_pipe.c b/tests/kernel/pipe/pipe/src/test_pipe.c index 04d1f7ec14e1d..4ad9ee230a457 100644 --- a/tests/kernel/pipe/pipe/src/test_pipe.c +++ b/tests/kernel/pipe/pipe/src/test_pipe.c @@ -674,7 +674,7 @@ void pipe_put_get_timeout(void) /******************************************************************************/ ZTEST_BMEM bool valid_fault; -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { printk("Caught system error -- reason %d\n", reason); if (valid_fault) { diff --git a/tests/kernel/smp/src/main.c b/tests/kernel/smp/src/main.c index 776b5b6beb690..f73a1dfdbbb6e 100644 --- a/tests/kernel/smp/src/main.c +++ b/tests/kernel/smp/src/main.c @@ -757,7 +757,7 @@ ZTEST(smp, test_smp_ipi) } #endif -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *esf) { static int trigger; diff --git a/tests/kernel/threads/dynamic_thread/src/main.c b/tests/kernel/threads/dynamic_thread/src/main.c index 0fa88caee8cc0..e456e8db13304 100644 --- a/tests/kernel/threads/dynamic_thread/src/main.c +++ b/tests/kernel/threads/dynamic_thread/src/main.c @@ -16,7 +16,7 @@ static K_SEM_DEFINE(end_sem, 0, 1); static ZTEST_BMEM struct k_thread *dyn_thread; static struct k_thread *dynamic_threads[CONFIG_MAX_THREAD_BYTES * 8]; -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *esf) { if (reason != K_ERR_KERNEL_OOPS) { printk("wrong error reason\n"); diff --git a/tests/kernel/threads/dynamic_thread_stack/src/main.c b/tests/kernel/threads/dynamic_thread_stack/src/main.c index a833197d36126..cb6fba523cddb 100644 --- a/tests/kernel/threads/dynamic_thread_stack/src/main.c +++ b/tests/kernel/threads/dynamic_thread_stack/src/main.c @@ -179,7 +179,7 @@ static void set_fault(unsigned int reason) compiler_barrier(); } -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { if (expect_fault) { if (expected_reason == reason) { diff --git a/tests/kernel/threads/thread_apis/src/test_essential_thread.c b/tests/kernel/threads/thread_apis/src/test_essential_thread.c index 1814346932301..082765bd14816 100644 --- a/tests/kernel/threads/thread_apis/src/test_essential_thread.c +++ b/tests/kernel/threads/thread_apis/src/test_essential_thread.c @@ -61,7 +61,7 @@ ZTEST(threads_lifecycle, test_essential_thread_operation) } void k_sys_fatal_error_handler(unsigned int reason, - const z_arch_esf_t *esf) + const struct arch_esf *esf) { ARG_UNUSED(esf); ARG_UNUSED(reason); diff --git a/tests/lib/mem_blocks/src/main.c b/tests/lib/mem_blocks/src/main.c index 4641a6db643ef..52232b007efbf 100644 --- a/tests/lib/mem_blocks/src/main.c +++ b/tests/lib/mem_blocks/src/main.c @@ -25,7 +25,7 @@ static sys_multi_mem_blocks_t alloc_group; static ZTEST_DMEM volatile int expected_reason = -1; -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { printk("Caught system error -- reason %d\n", reason); diff --git a/tests/subsys/debug/coredump/src/main.c b/tests/subsys/debug/coredump/src/main.c index fac5395971cf6..f97494df1712d 100644 --- a/tests/subsys/debug/coredump/src/main.c +++ b/tests/subsys/debug/coredump/src/main.c @@ -13,7 +13,7 @@ #endif -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { ARG_UNUSED(pEsf); diff --git a/tests/subsys/debug/coredump_backends/src/main.c b/tests/subsys/debug/coredump_backends/src/main.c index 8b7636edbd9f6..0678bff306207 100644 --- a/tests/subsys/debug/coredump_backends/src/main.c +++ b/tests/subsys/debug/coredump_backends/src/main.c @@ -19,7 +19,7 @@ static struct k_thread dump_thread; static K_THREAD_STACK_DEFINE(dump_stack, STACK_SIZE); -void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) +void k_sys_fatal_error_handler(unsigned int reason, const struct arch_esf *pEsf) { ARG_UNUSED(reason); ARG_UNUSED(pEsf); diff --git a/tests/ztest/error_hook/src/main.c b/tests/ztest/error_hook/src/main.c index dfa998e0075d8..060b856e156d1 100644 --- a/tests/ztest/error_hook/src/main.c +++ b/tests/ztest/error_hook/src/main.c @@ -153,7 +153,7 @@ static void release_offload_sem(void) * default one. */ void ztest_post_fatal_error_hook(unsigned int reason, - const z_arch_esf_t *pEsf) + const struct arch_esf *pEsf) { switch (case_type) { case ZTEST_CATCH_FATAL_ACCESS: