@@ -35,7 +35,7 @@ static void DBG(char *msg, struct k_thread *th)
3535 unsigned int v ;
3636
3737 strcpy (buf , "CPU# exc# " );
38- buf [3 ] = '0' + _current_cpu -> id ;
38+ buf [3 ] = '0' + arch_curr_cpu () -> id ;
3939 buf [8 ] = '0' + _current -> arch .exception_depth ;
4040 strcat (buf , _current -> name );
4141 strcat (buf , ": " );
@@ -70,7 +70,7 @@ static void z_riscv_fpu_disable(void)
7070 csr_clear (mstatus , MSTATUS_FS );
7171
7272 /* remember its clean/dirty state */
73- _current_cpu -> arch .fpu_state = (status & MSTATUS_FS );
73+ arch_curr_cpu () -> arch .fpu_state = (status & MSTATUS_FS );
7474 }
7575}
7676
@@ -82,7 +82,7 @@ static void z_riscv_fpu_load(void)
8282 "must be called with FPU access disabled" );
8383
8484 /* become new owner */
85- atomic_ptr_set (& _current_cpu -> arch .fpu_owner , _current );
85+ atomic_ptr_set (& arch_curr_cpu () -> arch .fpu_owner , _current );
8686
8787 /* restore our content */
8888 csr_set (mstatus , MSTATUS_FS_INIT );
@@ -105,10 +105,10 @@ void arch_flush_local_fpu(void)
105105 __ASSERT ((csr_read (mstatus ) & MSTATUS_FS ) == 0 ,
106106 "must be called with FPU access disabled" );
107107
108- struct k_thread * owner = atomic_ptr_get (& _current_cpu -> arch .fpu_owner );
108+ struct k_thread * owner = atomic_ptr_get (& arch_curr_cpu () -> arch .fpu_owner );
109109
110110 if (owner != NULL ) {
111- bool dirty = (_current_cpu -> arch .fpu_state == MSTATUS_FS_DIRTY );
111+ bool dirty = (arch_curr_cpu () -> arch .fpu_state == MSTATUS_FS_DIRTY );
112112
113113 if (dirty ) {
114114 /* turn on FPU access */
@@ -124,7 +124,7 @@ void arch_flush_local_fpu(void)
124124 csr_clear (mstatus , MSTATUS_FS );
125125
126126 /* release ownership */
127- atomic_ptr_clear (& _current_cpu -> arch .fpu_owner );
127+ atomic_ptr_clear (& arch_curr_cpu () -> arch .fpu_owner );
128128 DBG ("disable" , owner );
129129 }
130130}
@@ -147,7 +147,7 @@ static void flush_owned_fpu(struct k_thread *thread)
147147 continue ;
148148 }
149149 /* we found it live on CPU i */
150- if (i == _current_cpu -> id ) {
150+ if (i == arch_curr_cpu () -> id ) {
151151 z_riscv_fpu_disable ();
152152 arch_flush_local_fpu ();
153153 break ;
@@ -258,7 +258,7 @@ static bool fpu_access_allowed(unsigned int exc_update_level)
258258
259259 if (_current -> arch .exception_depth == exc_update_level ) {
260260 /* We're about to execute non-exception code */
261- if (_current_cpu -> arch .fpu_owner == _current ) {
261+ if (arch_curr_cpu () -> arch .fpu_owner == _current ) {
262262 /* everything is already in place */
263263 return true;
264264 }
@@ -276,7 +276,7 @@ static bool fpu_access_allowed(unsigned int exc_update_level)
276276 flush_owned_fpu (_current );
277277#endif
278278 z_riscv_fpu_load ();
279- _current_cpu -> arch .fpu_state = MSTATUS_FS_CLEAN ;
279+ arch_curr_cpu () -> arch .fpu_state = MSTATUS_FS_CLEAN ;
280280 return true;
281281 }
282282 return false;
@@ -297,7 +297,7 @@ void z_riscv_fpu_exit_exc(struct arch_esf *esf)
297297{
298298 if (fpu_access_allowed (1 )) {
299299 esf -> mstatus &= ~MSTATUS_FS ;
300- esf -> mstatus |= _current_cpu -> arch .fpu_state ;
300+ esf -> mstatus |= arch_curr_cpu () -> arch .fpu_state ;
301301 } else {
302302 esf -> mstatus &= ~MSTATUS_FS ;
303303 }
@@ -313,7 +313,7 @@ void z_riscv_fpu_thread_context_switch(void)
313313{
314314 if (fpu_access_allowed (0 )) {
315315 csr_clear (mstatus , MSTATUS_FS );
316- csr_set (mstatus , _current_cpu -> arch .fpu_state );
316+ csr_set (mstatus , arch_curr_cpu () -> arch .fpu_state );
317317 } else {
318318 z_riscv_fpu_disable ();
319319 }
@@ -327,7 +327,7 @@ int arch_float_disable(struct k_thread *thread)
327327#ifdef CONFIG_SMP
328328 flush_owned_fpu (thread );
329329#else
330- if (thread == _current_cpu -> arch .fpu_owner ) {
330+ if (thread == arch_curr_cpu () -> arch .fpu_owner ) {
331331 z_riscv_fpu_disable ();
332332 arch_flush_local_fpu ();
333333 }
0 commit comments