@@ -146,7 +146,7 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
146146 */
147147
148148#if (CONFIG_FAULT_DUMP == 1 )
149- static void fault_show (const z_arch_esf_t * esf , int fault )
149+ static void fault_show (const struct arch_esf * esf , int fault )
150150{
151151 PR_EXC ("Fault! EXC #%d" , fault );
152152
@@ -165,7 +165,7 @@ static void fault_show(const z_arch_esf_t *esf, int fault)
165165 *
166166 * For Dump level 0, no information needs to be generated.
167167 */
168- static void fault_show (const z_arch_esf_t * esf , int fault )
168+ static void fault_show (const struct arch_esf * esf , int fault )
169169{
170170 (void )esf ;
171171 (void )fault ;
@@ -185,7 +185,7 @@ static const struct z_exc_handle exceptions[] = {
185185 *
186186 * @return true if error is recoverable, otherwise return false.
187187 */
188- static bool memory_fault_recoverable (z_arch_esf_t * esf , bool synchronous )
188+ static bool memory_fault_recoverable (struct arch_esf * esf , bool synchronous )
189189{
190190#ifdef CONFIG_USERSPACE
191191 for (int i = 0 ; i < ARRAY_SIZE (exceptions ); i ++ ) {
@@ -228,7 +228,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr,
228228 *
229229 * @return error code to identify the fatal error reason
230230 */
231- static uint32_t mem_manage_fault (z_arch_esf_t * esf , int from_hard_fault ,
231+ static uint32_t mem_manage_fault (struct arch_esf * esf , int from_hard_fault ,
232232 bool * recoverable )
233233{
234234 uint32_t reason = K_ERR_ARM_MEM_GENERIC ;
@@ -387,7 +387,7 @@ static uint32_t mem_manage_fault(z_arch_esf_t *esf, int from_hard_fault,
387387 * @return error code to identify the fatal error reason.
388388 *
389389 */
390- static int bus_fault (z_arch_esf_t * esf , int from_hard_fault , bool * recoverable )
390+ static int bus_fault (struct arch_esf * esf , int from_hard_fault , bool * recoverable )
391391{
392392 uint32_t reason = K_ERR_ARM_BUS_GENERIC ;
393393
@@ -549,7 +549,7 @@ static int bus_fault(z_arch_esf_t *esf, int from_hard_fault, bool *recoverable)
549549 *
550550 * @return error code to identify the fatal error reason
551551 */
552- static uint32_t usage_fault (const z_arch_esf_t * esf )
552+ static uint32_t usage_fault (const struct arch_esf * esf )
553553{
554554 uint32_t reason = K_ERR_ARM_USAGE_GENERIC ;
555555
@@ -612,7 +612,7 @@ static uint32_t usage_fault(const z_arch_esf_t *esf)
612612 *
613613 * @return error code to identify the fatal error reason
614614 */
615- static uint32_t secure_fault (const z_arch_esf_t * esf )
615+ static uint32_t secure_fault (const struct arch_esf * esf )
616616{
617617 uint32_t reason = K_ERR_ARM_SECURE_GENERIC ;
618618
@@ -661,7 +661,7 @@ static uint32_t secure_fault(const z_arch_esf_t *esf)
661661 * See z_arm_fault_dump() for example.
662662 *
663663 */
664- static void debug_monitor (z_arch_esf_t * esf , bool * recoverable )
664+ static void debug_monitor (struct arch_esf * esf , bool * recoverable )
665665{
666666 * recoverable = false;
667667
@@ -687,7 +687,7 @@ static void debug_monitor(z_arch_esf_t *esf, bool *recoverable)
687687#error Unknown ARM architecture
688688#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
689689
690- static inline bool z_arm_is_synchronous_svc (z_arch_esf_t * esf )
690+ static inline bool z_arm_is_synchronous_svc (struct arch_esf * esf )
691691{
692692 uint16_t * ret_addr = (uint16_t * )esf -> basic .pc ;
693693 /* SVC is a 16-bit instruction. On a synchronous SVC
@@ -762,7 +762,7 @@ static inline bool z_arm_is_pc_valid(uintptr_t pc)
762762 *
763763 * @return error code to identify the fatal error reason
764764 */
765- static uint32_t hard_fault (z_arch_esf_t * esf , bool * recoverable )
765+ static uint32_t hard_fault (struct arch_esf * esf , bool * recoverable )
766766{
767767 uint32_t reason = K_ERR_CPU_EXCEPTION ;
768768
@@ -829,7 +829,7 @@ static uint32_t hard_fault(z_arch_esf_t *esf, bool *recoverable)
829829 * See z_arm_fault_dump() for example.
830830 *
831831 */
832- static void reserved_exception (const z_arch_esf_t * esf , int fault )
832+ static void reserved_exception (const struct arch_esf * esf , int fault )
833833{
834834 ARG_UNUSED (esf );
835835
@@ -839,7 +839,7 @@ static void reserved_exception(const z_arch_esf_t *esf, int fault)
839839}
840840
841841/* Handler function for ARM fault conditions. */
842- static uint32_t fault_handle (z_arch_esf_t * esf , int fault , bool * recoverable )
842+ static uint32_t fault_handle (struct arch_esf * esf , int fault , bool * recoverable )
843843{
844844 uint32_t reason = K_ERR_CPU_EXCEPTION ;
845845
@@ -893,7 +893,7 @@ static uint32_t fault_handle(z_arch_esf_t *esf, int fault, bool *recoverable)
893893 *
894894 * @param secure_esf Pointer to the secure stack frame.
895895 */
896- static void secure_stack_dump (const z_arch_esf_t * secure_esf )
896+ static void secure_stack_dump (const struct arch_esf * secure_esf )
897897{
898898 /*
899899 * In case a Non-Secure exception interrupted the Secure
@@ -918,7 +918,7 @@ static void secure_stack_dump(const z_arch_esf_t *secure_esf)
918918 * Non-Secure exception entry.
919919 */
920920 top_of_sec_stack += ADDITIONAL_STATE_CONTEXT_WORDS ;
921- secure_esf = (const z_arch_esf_t * )top_of_sec_stack ;
921+ secure_esf = (const struct arch_esf * )top_of_sec_stack ;
922922 sec_ret_addr = secure_esf -> basic .pc ;
923923 } else {
924924 /* Exception during Non-Secure function call.
@@ -947,11 +947,11 @@ static void secure_stack_dump(const z_arch_esf_t *secure_esf)
947947 *
948948 * @return ESF pointer on success, otherwise return NULL
949949 */
950- static inline z_arch_esf_t * get_esf (uint32_t msp , uint32_t psp , uint32_t exc_return ,
950+ static inline struct arch_esf * get_esf (uint32_t msp , uint32_t psp , uint32_t exc_return ,
951951 bool * nested_exc )
952952{
953953 bool alternative_state_exc = false;
954- z_arch_esf_t * ptr_esf = NULL ;
954+ struct arch_esf * ptr_esf = NULL ;
955955
956956 * nested_exc = false;
957957
@@ -979,14 +979,14 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret
979979 alternative_state_exc = true;
980980
981981 /* Dump the Secure stack before handling the actual fault. */
982- z_arch_esf_t * secure_esf ;
982+ struct arch_esf * secure_esf ;
983983
984984 if (exc_return & EXC_RETURN_SPSEL_PROCESS ) {
985985 /* Secure stack pointed by PSP */
986- secure_esf = (z_arch_esf_t * )psp ;
986+ secure_esf = (struct arch_esf * )psp ;
987987 } else {
988988 /* Secure stack pointed by MSP */
989- secure_esf = (z_arch_esf_t * )msp ;
989+ secure_esf = (struct arch_esf * )msp ;
990990 * nested_exc = true;
991991 }
992992
@@ -997,9 +997,9 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret
997997 * and supply it to the fault handing function.
998998 */
999999 if (exc_return & EXC_RETURN_MODE_THREAD ) {
1000- ptr_esf = (z_arch_esf_t * )__TZ_get_PSP_NS ();
1000+ ptr_esf = (struct arch_esf * )__TZ_get_PSP_NS ();
10011001 } else {
1002- ptr_esf = (z_arch_esf_t * )__TZ_get_MSP_NS ();
1002+ ptr_esf = (struct arch_esf * )__TZ_get_MSP_NS ();
10031003 }
10041004 }
10051005#elif defined(CONFIG_ARM_NONSECURE_FIRMWARE )
@@ -1024,10 +1024,10 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret
10241024
10251025 if (exc_return & EXC_RETURN_SPSEL_PROCESS ) {
10261026 /* Non-Secure stack frame on PSP */
1027- ptr_esf = (z_arch_esf_t * )psp ;
1027+ ptr_esf = (struct arch_esf * )psp ;
10281028 } else {
10291029 /* Non-Secure stack frame on MSP */
1030- ptr_esf = (z_arch_esf_t * )msp ;
1030+ ptr_esf = (struct arch_esf * )msp ;
10311031 }
10321032 } else {
10331033 /* Exception entry occurred in Non-Secure stack. */
@@ -1046,11 +1046,11 @@ static inline z_arch_esf_t *get_esf(uint32_t msp, uint32_t psp, uint32_t exc_ret
10461046 if (!alternative_state_exc ) {
10471047 if (exc_return & EXC_RETURN_MODE_THREAD ) {
10481048 /* Returning to thread mode */
1049- ptr_esf = (z_arch_esf_t * )psp ;
1049+ ptr_esf = (struct arch_esf * )psp ;
10501050
10511051 } else {
10521052 /* Returning to handler mode */
1053- ptr_esf = (z_arch_esf_t * )msp ;
1053+ ptr_esf = (struct arch_esf * )msp ;
10541054 * nested_exc = true;
10551055 }
10561056 }
@@ -1095,12 +1095,12 @@ void z_arm_fault(uint32_t msp, uint32_t psp, uint32_t exc_return,
10951095 uint32_t reason = K_ERR_CPU_EXCEPTION ;
10961096 int fault = SCB -> ICSR & SCB_ICSR_VECTACTIVE_Msk ;
10971097 bool recoverable , nested_exc ;
1098- z_arch_esf_t * esf ;
1098+ struct arch_esf * esf ;
10991099
11001100 /* Create a stack-ed copy of the ESF to be used during
11011101 * the fault handling process.
11021102 */
1103- z_arch_esf_t esf_copy ;
1103+ struct arch_esf esf_copy ;
11041104
11051105 /* Force unlock interrupts */
11061106 arch_irq_unlock (0 );
@@ -1123,13 +1123,13 @@ void z_arm_fault(uint32_t msp, uint32_t psp, uint32_t exc_return,
11231123
11241124 /* Copy ESF */
11251125#if !defined(CONFIG_EXTRA_EXCEPTION_INFO )
1126- memcpy (& esf_copy , esf , sizeof (z_arch_esf_t ));
1126+ memcpy (& esf_copy , esf , sizeof (struct arch_esf ));
11271127 ARG_UNUSED (callee_regs );
11281128#else
11291129 /* the extra exception info is not present in the original esf
11301130 * so we only copy the fields before those.
11311131 */
1132- memcpy (& esf_copy , esf , offsetof(z_arch_esf_t , extra_info ));
1132+ memcpy (& esf_copy , esf , offsetof(struct arch_esf , extra_info ));
11331133 esf_copy .extra_info = (struct __extra_esf_info ) {
11341134 .callee = callee_regs ,
11351135 .exc_return = exc_return ,
0 commit comments