Skip to content

Commit f5b59ea

Browse files
committed
arch: replace _current_cpu with arch_curr_cpu()
Use the `arch_curr_cpu()` directly instead of the `_current_cpu` abstraction. Guard the `z_smp_cpu_mobile()` usage validation with a new Kconfig `CONFIG_VALIDATE_ARCH_CURR_CPU`, and move it into the `arch_curr_cpu()` function. Signed-off-by: Yong Cong Sin <[email protected]> Signed-off-by: Yong Cong Sin <[email protected]>
1 parent 049b243 commit f5b59ea

File tree

50 files changed

+188
-140
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

50 files changed

+188
-140
lines changed

arch/arc/core/thread.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -334,7 +334,7 @@ int arc_vpx_lock(k_timeout_t timeout)
334334

335335
key = k_spin_lock(&lock);
336336

337-
id = _current_cpu->id;
337+
id = arch_curr_cpu()->id;
338338
#if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
339339
__ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), "");
340340
#endif
@@ -357,7 +357,7 @@ void arc_vpx_unlock(void)
357357
#if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
358358
__ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), "");
359359
#endif
360-
id = _current_cpu->id;
360+
id = arch_curr_cpu()->id;
361361
k_spin_unlock(&lock, key);
362362

363363
/*

arch/arm/core/cortex_a_r/fault.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -153,21 +153,21 @@ bool z_arm_fault_undef_instruction_fp(void)
153153

154154
__set_FPEXC(FPEXC_EN);
155155

156-
if (_current_cpu->nested > 1) {
156+
if (arch_curr_cpu()->nested > 1) {
157157
/*
158158
* If the nested count is greater than 1, the undefined
159159
* instruction exception came from an irq/svc context. (The
160160
* irq/svc handler would have the nested count at 1 and then
161161
* the undef exception would increment it to 2).
162162
*/
163163
struct __fpu_sf *spill_esf =
164-
(struct __fpu_sf *)_current_cpu->fp_ctx;
164+
(struct __fpu_sf *)arch_curr_cpu()->fp_ctx;
165165

166166
if (spill_esf == NULL) {
167167
return false;
168168
}
169169

170-
_current_cpu->fp_ctx = NULL;
170+
arch_curr_cpu()->fp_ctx = NULL;
171171

172172
/*
173173
* If the nested count is 2 and the current thread has used the
@@ -177,9 +177,9 @@ bool z_arm_fault_undef_instruction_fp(void)
177177
* saved exception stack frame, then save the floating point
178178
* context because it is about to be overwritten.
179179
*/
180-
if (((_current_cpu->nested == 2)
180+
if (((arch_curr_cpu()->nested == 2)
181181
&& (_current->base.user_options & K_FP_REGS))
182-
|| ((_current_cpu->nested > 2)
182+
|| ((arch_curr_cpu()->nested > 2)
183183
&& (spill_esf->undefined & FPEXC_EN))) {
184184
/*
185185
* Spill VFP registers to specified exception stack

arch/arm64/core/fpu.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ static void DBG(char *msg, struct k_thread *th)
3434
unsigned int v;
3535

3636
strcpy(buf, "CPU# exc# ");
37-
buf[3] = '0' + _current_cpu->id;
37+
buf[3] = '0' + arch_curr_cpu()->id;
3838
buf[8] = '0' + arch_exception_depth();
3939
strcat(buf, _current->name);
4040
strcat(buf, ": ");
@@ -68,7 +68,7 @@ void arch_flush_local_fpu(void)
6868
{
6969
__ASSERT(read_daif() & DAIF_IRQ_BIT, "must be called with IRQs disabled");
7070

71-
struct k_thread *owner = atomic_ptr_get(&_current_cpu->arch.fpu_owner);
71+
struct k_thread *owner = atomic_ptr_get(&arch_curr_cpu()->arch.fpu_owner);
7272

7373
if (owner != NULL) {
7474
uint64_t cpacr = read_cpacr_el1();
@@ -82,7 +82,7 @@ void arch_flush_local_fpu(void)
8282
/* make sure content made it to memory before releasing */
8383
barrier_dsync_fence_full();
8484
/* release ownership */
85-
atomic_ptr_clear(&_current_cpu->arch.fpu_owner);
85+
atomic_ptr_clear(&arch_curr_cpu()->arch.fpu_owner);
8686
DBG("disable", owner);
8787

8888
/* disable FPU access */
@@ -106,7 +106,7 @@ static void flush_owned_fpu(struct k_thread *thread)
106106
continue;
107107
}
108108
/* we found it live on CPU i */
109-
if (i == _current_cpu->id) {
109+
if (i == arch_curr_cpu()->id) {
110110
arch_flush_local_fpu();
111111
} else {
112112
/* the FPU context is live on another CPU */
@@ -235,12 +235,12 @@ void z_arm64_fpu_trap(struct arch_esf *esf)
235235
barrier_isync_fence_full();
236236

237237
/* save current owner's content if any */
238-
struct k_thread *owner = atomic_ptr_get(&_current_cpu->arch.fpu_owner);
238+
struct k_thread *owner = atomic_ptr_get(&arch_curr_cpu()->arch.fpu_owner);
239239

240240
if (owner) {
241241
z_arm64_fpu_save(&owner->arch.saved_fp_context);
242242
barrier_dsync_fence_full();
243-
atomic_ptr_clear(&_current_cpu->arch.fpu_owner);
243+
atomic_ptr_clear(&arch_curr_cpu()->arch.fpu_owner);
244244
DBG("save", owner);
245245
}
246246

@@ -264,7 +264,7 @@ void z_arm64_fpu_trap(struct arch_esf *esf)
264264
#endif
265265

266266
/* become new owner */
267-
atomic_ptr_set(&_current_cpu->arch.fpu_owner, _current);
267+
atomic_ptr_set(&arch_curr_cpu()->arch.fpu_owner, _current);
268268

269269
/* restore our content */
270270
z_arm64_fpu_restore(&_current->arch.saved_fp_context);
@@ -287,7 +287,7 @@ static void fpu_access_update(unsigned int exc_update_level)
287287

288288
if (arch_exception_depth() == exc_update_level) {
289289
/* We're about to execute non-exception code */
290-
if (atomic_ptr_get(&_current_cpu->arch.fpu_owner) == _current) {
290+
if (atomic_ptr_get(&arch_curr_cpu()->arch.fpu_owner) == _current) {
291291
/* turn on FPU access */
292292
write_cpacr_el1(cpacr | CPACR_EL1_FPEN_NOTRAP);
293293
} else {
@@ -333,7 +333,7 @@ int arch_float_disable(struct k_thread *thread)
333333
#ifdef CONFIG_SMP
334334
flush_owned_fpu(thread);
335335
#else
336-
if (thread == atomic_ptr_get(&_current_cpu->arch.fpu_owner)) {
336+
if (thread == atomic_ptr_get(&arch_curr_cpu()->arch.fpu_owner)) {
337337
arch_flush_local_fpu();
338338
}
339339
#endif

arch/arm64/core/isr_wrapper.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ GDATA(_sw_isr_table)
3232
GTEXT(_isr_wrapper)
3333
SECTION_FUNC(TEXT, _isr_wrapper)
3434

35-
/* ++_current_cpu->nested to be checked by arch_is_in_isr() */
35+
/* ++arch_curr_cpu()->nested to be checked by arch_is_in_isr() */
3636
get_cpu x0
3737
ldr w1, [x0, #___cpu_t_nested_OFFSET]
3838
add w2, w1, #1
@@ -113,7 +113,7 @@ spurious_continue:
113113

114114
GTEXT(z_arm64_irq_done)
115115
z_arm64_irq_done:
116-
/* if (--_current_cpu->nested != 0) exit */
116+
/* if (--arch_curr_cpu()->nested != 0) exit */
117117
get_cpu x0
118118
ldr w1, [x0, #___cpu_t_nested_OFFSET]
119119
subs w1, w1, #1

arch/arm64/core/smp.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -283,7 +283,7 @@ void arch_spin_relax(void)
283283
* We may not be in IRQ context here hence cannot use
284284
* arch_flush_local_fpu() directly.
285285
*/
286-
arch_float_disable(_current_cpu->arch.fpu_owner);
286+
arch_float_disable(arch_curr_cpu()->arch.fpu_owner);
287287
}
288288
}
289289
#endif

arch/arm64/core/switch.S

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,7 @@ offload:
179179
*/
180180
ldp x1, x0, [sp, ___esf_t_x0_x1_OFFSET]
181181

182-
/* ++_current_cpu->nested to be checked by arch_is_in_isr() */
182+
/* ++arch_curr_cpu()->nested to be checked by arch_is_in_isr() */
183183
get_cpu x2
184184
ldr w3, [x2, #___cpu_t_nested_OFFSET]
185185
add w4, w3, #1

arch/mips/core/irq_manage.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ int arch_irq_is_enabled(unsigned int irq)
6262

6363
void z_mips_enter_irq(uint32_t ipending)
6464
{
65-
_current_cpu->nested++;
65+
arch_curr_cpu()->nested++;
6666

6767
#ifdef CONFIG_IRQ_OFFLOAD
6868
z_irq_do_offload();
@@ -88,7 +88,7 @@ void z_mips_enter_irq(uint32_t ipending)
8888
}
8989
}
9090

91-
_current_cpu->nested--;
91+
arch_curr_cpu()->nested--;
9292

9393
if (IS_ENABLED(CONFIG_STACK_SENTINEL)) {
9494
z_check_stack_sentinel();

arch/mips/include/kernel_arch_func.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ FUNC_NORETURN void z_mips_fatal_error(unsigned int reason,
3939

4040
static inline bool arch_is_in_isr(void)
4141
{
42-
return _current_cpu->nested != 0U;
42+
return arch_curr_cpu()->nested != 0U;
4343
}
4444

4545
#ifdef CONFIG_IRQ_OFFLOAD

arch/riscv/core/fpu.c

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ static void DBG(char *msg, struct k_thread *th)
3535
unsigned int v;
3636

3737
strcpy(buf, "CPU# exc# ");
38-
buf[3] = '0' + _current_cpu->id;
38+
buf[3] = '0' + arch_curr_cpu()->id;
3939
buf[8] = '0' + _current->arch.exception_depth;
4040
strcat(buf, _current->name);
4141
strcat(buf, ": ");
@@ -70,7 +70,7 @@ static void z_riscv_fpu_disable(void)
7070
csr_clear(mstatus, MSTATUS_FS);
7171

7272
/* remember its clean/dirty state */
73-
_current_cpu->arch.fpu_state = (status & MSTATUS_FS);
73+
arch_curr_cpu()->arch.fpu_state = (status & MSTATUS_FS);
7474
}
7575
}
7676

@@ -82,7 +82,7 @@ static void z_riscv_fpu_load(void)
8282
"must be called with FPU access disabled");
8383

8484
/* become new owner */
85-
atomic_ptr_set(&_current_cpu->arch.fpu_owner, _current);
85+
atomic_ptr_set(&arch_curr_cpu()->arch.fpu_owner, _current);
8686

8787
/* restore our content */
8888
csr_set(mstatus, MSTATUS_FS_INIT);
@@ -105,10 +105,10 @@ void arch_flush_local_fpu(void)
105105
__ASSERT((csr_read(mstatus) & MSTATUS_FS) == 0,
106106
"must be called with FPU access disabled");
107107

108-
struct k_thread *owner = atomic_ptr_get(&_current_cpu->arch.fpu_owner);
108+
struct k_thread *owner = atomic_ptr_get(&arch_curr_cpu()->arch.fpu_owner);
109109

110110
if (owner != NULL) {
111-
bool dirty = (_current_cpu->arch.fpu_state == MSTATUS_FS_DIRTY);
111+
bool dirty = (arch_curr_cpu()->arch.fpu_state == MSTATUS_FS_DIRTY);
112112

113113
if (dirty) {
114114
/* turn on FPU access */
@@ -124,7 +124,7 @@ void arch_flush_local_fpu(void)
124124
csr_clear(mstatus, MSTATUS_FS);
125125

126126
/* release ownership */
127-
atomic_ptr_clear(&_current_cpu->arch.fpu_owner);
127+
atomic_ptr_clear(&arch_curr_cpu()->arch.fpu_owner);
128128
DBG("disable", owner);
129129
}
130130
}
@@ -147,7 +147,7 @@ static void flush_owned_fpu(struct k_thread *thread)
147147
continue;
148148
}
149149
/* we found it live on CPU i */
150-
if (i == _current_cpu->id) {
150+
if (i == arch_curr_cpu()->id) {
151151
z_riscv_fpu_disable();
152152
arch_flush_local_fpu();
153153
break;
@@ -258,7 +258,7 @@ static bool fpu_access_allowed(unsigned int exc_update_level)
258258

259259
if (_current->arch.exception_depth == exc_update_level) {
260260
/* We're about to execute non-exception code */
261-
if (_current_cpu->arch.fpu_owner == _current) {
261+
if (arch_curr_cpu()->arch.fpu_owner == _current) {
262262
/* everything is already in place */
263263
return true;
264264
}
@@ -276,7 +276,7 @@ static bool fpu_access_allowed(unsigned int exc_update_level)
276276
flush_owned_fpu(_current);
277277
#endif
278278
z_riscv_fpu_load();
279-
_current_cpu->arch.fpu_state = MSTATUS_FS_CLEAN;
279+
arch_curr_cpu()->arch.fpu_state = MSTATUS_FS_CLEAN;
280280
return true;
281281
}
282282
return false;
@@ -297,7 +297,7 @@ void z_riscv_fpu_exit_exc(struct arch_esf *esf)
297297
{
298298
if (fpu_access_allowed(1)) {
299299
esf->mstatus &= ~MSTATUS_FS;
300-
esf->mstatus |= _current_cpu->arch.fpu_state;
300+
esf->mstatus |= arch_curr_cpu()->arch.fpu_state;
301301
} else {
302302
esf->mstatus &= ~MSTATUS_FS;
303303
}
@@ -313,7 +313,7 @@ void z_riscv_fpu_thread_context_switch(void)
313313
{
314314
if (fpu_access_allowed(0)) {
315315
csr_clear(mstatus, MSTATUS_FS);
316-
csr_set(mstatus, _current_cpu->arch.fpu_state);
316+
csr_set(mstatus, arch_curr_cpu()->arch.fpu_state);
317317
} else {
318318
z_riscv_fpu_disable();
319319
}
@@ -327,7 +327,7 @@ int arch_float_disable(struct k_thread *thread)
327327
#ifdef CONFIG_SMP
328328
flush_owned_fpu(thread);
329329
#else
330-
if (thread == _current_cpu->arch.fpu_owner) {
330+
if (thread == arch_curr_cpu()->arch.fpu_owner) {
331331
z_riscv_fpu_disable();
332332
arch_flush_local_fpu();
333333
}

arch/riscv/core/isr.S

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ SECTION_FUNC(exception.entry, _isr_wrapper)
137137
.balign CONFIG_RISCV_TRAP_HANDLER_ALIGNMENT
138138

139139
#ifdef CONFIG_USERSPACE
140-
/* retrieve address of _current_cpu preserving s0 */
140+
/* retrieve address of arch_curr_cpu() preserving s0 */
141141
csrrw s0, mscratch, s0
142142

143143
/* preserve t0 and t1 temporarily */
@@ -172,7 +172,7 @@ SECTION_FUNC(exception.entry, _isr_wrapper)
172172
lr t0, _curr_cpu_arch_user_exc_tmp0(s0)
173173
lr t1, _curr_cpu_arch_user_exc_tmp1(s0)
174174

175-
/* retrieve original s0 and restore _current_cpu in mscratch */
175+
/* retrieve original s0 and restore arch_curr_cpu() in mscratch */
176176
csrrw s0, mscratch, s0
177177
#endif
178178

@@ -184,7 +184,7 @@ SECTION_FUNC(exception.entry, _isr_wrapper)
184184
DO_CALLER_SAVED(sr) ;
185185
#endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */
186186

187-
/* Save s0 in the esf and load it with &_current_cpu. */
187+
/* Save s0 in the esf and load it with &arch_curr_cpu(). */
188188
sr s0, __struct_arch_esf_s0_OFFSET(sp)
189189
get_current_cpu s0
190190

@@ -476,7 +476,7 @@ do_irq_offload:
476476
lr a1, __struct_arch_esf_a0_OFFSET(sp)
477477
lr a0, __struct_arch_esf_a1_OFFSET(sp)
478478

479-
/* Increment _current_cpu->nested */
479+
/* Increment arch_curr_cpu()->nested */
480480
lw t1, ___cpu_t_nested_OFFSET(s0)
481481
addi t2, t1, 1
482482
sw t2, ___cpu_t_nested_OFFSET(s0)
@@ -592,7 +592,7 @@ is_interrupt:
592592
2:
593593
#endif
594594

595-
/* Increment _current_cpu->nested */
595+
/* Increment arch_curr_cpu()->nested */
596596
lw t1, ___cpu_t_nested_OFFSET(s0)
597597
addi t2, t1, 1
598598
sw t2, ___cpu_t_nested_OFFSET(s0)
@@ -654,7 +654,7 @@ on_irq_stack:
654654
#endif
655655

656656
irq_done:
657-
/* Decrement _current_cpu->nested */
657+
/* Decrement arch_curr_cpu()->nested */
658658
lw t2, ___cpu_t_nested_OFFSET(s0)
659659
addi t2, t2, -1
660660
sw t2, ___cpu_t_nested_OFFSET(s0)
@@ -698,7 +698,7 @@ reschedule:
698698

699699
z_riscv_thread_start:
700700
might_have_rescheduled:
701-
/* reload s0 with &_current_cpu as it might have changed or be unset */
701+
/* reload s0 with &arch_curr_cpu() as it might have changed or be unset */
702702
get_current_cpu s0
703703

704704
#endif /* CONFIG_MULTITHREADING */

0 commit comments

Comments
 (0)