Skip to content

Commit b1def71

Browse files
ycsinnashif
authored andcommitted
arch: deprecate _current
`_current` is now functionally equals to `arch_curr_thread()`, remove its usage in-tree and deprecate it instead of removing it outright, as it has been with us since forever. Signed-off-by: Yong Cong Sin <[email protected]> Signed-off-by: Yong Cong Sin <[email protected]>
1 parent 1a752e8 commit b1def71

File tree

107 files changed

+490
-479
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

107 files changed

+490
-479
lines changed

arch/arc/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,7 @@ config ARC_CURRENT_THREAD_USE_NO_TLS
262262
RGF_NUM_BANKS the parameter is disabled by-default because banks syncronization
263263
requires significant time, and it slows down performance.
264264
ARCMWDT works with tls pointer in different way then GCC. Optimized access to
265-
TLS pointer via _current variable does not provide significant advantages
265+
TLS pointer via arch_current_thread() does not provide significant advantages
266266
in case of MetaWare.
267267

268268
config GEN_ISR_TABLES

arch/arc/core/fault.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ static bool z_check_thread_stack_fail(const uint32_t fault_addr, uint32_t sp)
5555
{
5656
#if defined(CONFIG_MULTITHREADING)
5757
uint32_t guard_end, guard_start;
58-
const struct k_thread *thread = _current;
58+
const struct k_thread *thread = arch_current_thread();
5959

6060
if (!thread) {
6161
/* TODO: Under what circumstances could we get here ? */

arch/arc/core/irq_offload.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,8 +49,8 @@ void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
4949

5050
__asm__ volatile("sync");
5151

52-
/* If _current was aborted in the offload routine, we shouldn't be here */
53-
__ASSERT_NO_MSG((_current->base.thread_state & _THREAD_DEAD) == 0);
52+
/* If arch_current_thread() was aborted in the offload routine, we shouldn't be here */
53+
__ASSERT_NO_MSG((arch_current_thread()->base.thread_state & _THREAD_DEAD) == 0);
5454
}
5555

5656
/* need to be executed on every core in the system */

arch/arc/core/thread.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
210210
#ifdef CONFIG_MULTITHREADING
211211
void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
212212
{
213-
*old_thread = _current;
213+
*old_thread = arch_current_thread();
214214

215215
return z_get_next_switch_handle(NULL);
216216
}
@@ -227,16 +227,16 @@ void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
227227
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
228228
void *p1, void *p2, void *p3)
229229
{
230-
setup_stack_vars(_current);
230+
setup_stack_vars(arch_current_thread());
231231

232232
/* possible optimizaiton: no need to load mem domain anymore */
233233
/* need to lock cpu here ? */
234-
configure_mpu_thread(_current);
234+
configure_mpu_thread(arch_current_thread());
235235

236236
z_arc_userspace_enter(user_entry, p1, p2, p3,
237-
(uint32_t)_current->stack_info.start,
238-
(_current->stack_info.size -
239-
_current->stack_info.delta), _current);
237+
(uint32_t)arch_current_thread()->stack_info.start,
238+
(arch_current_thread()->stack_info.size -
239+
arch_current_thread()->stack_info.delta), arch_current_thread());
240240
CODE_UNREACHABLE;
241241
}
242242
#endif
@@ -336,7 +336,7 @@ int arc_vpx_lock(k_timeout_t timeout)
336336

337337
id = _current_cpu->id;
338338
#if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
339-
__ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), "");
339+
__ASSERT(!arch_is_in_isr() && (arch_current_thread()->base.cpu_mask == BIT(id)), "");
340340
#endif
341341
k_spin_unlock(&lock, key);
342342

@@ -355,7 +355,7 @@ void arc_vpx_unlock(void)
355355

356356
key = k_spin_lock(&lock);
357357
#if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
358-
__ASSERT(!arch_is_in_isr() && (_current->base.cpu_mask == BIT(id)), "");
358+
__ASSERT(!arch_is_in_isr() && (arch_current_thread()->base.cpu_mask == BIT(id)), "");
359359
#endif
360360
id = _current_cpu->id;
361361
k_spin_unlock(&lock, key);

arch/arc/core/tls.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr)
2929

3030
void *_Preserve_flags _mwget_tls(void)
3131
{
32-
return (void *)(_current->tls);
32+
return (void *)(arch_current_thread()->tls);
3333
}
3434

3535
#else

arch/arm/core/cortex_a_r/fault.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ bool z_arm_fault_undef_instruction_fp(void)
178178
* context because it is about to be overwritten.
179179
*/
180180
if (((_current_cpu->nested == 2)
181-
&& (_current->base.user_options & K_FP_REGS))
181+
&& (arch_current_thread()->base.user_options & K_FP_REGS))
182182
|| ((_current_cpu->nested > 2)
183183
&& (spill_esf->undefined & FPEXC_EN))) {
184184
/*
@@ -196,7 +196,7 @@ bool z_arm_fault_undef_instruction_fp(void)
196196
* means that a thread that uses the VFP does not have to,
197197
* but should, set K_FP_REGS on thread creation.
198198
*/
199-
_current->base.user_options |= K_FP_REGS;
199+
arch_current_thread()->base.user_options |= K_FP_REGS;
200200
}
201201

202202
return false;

arch/arm/core/cortex_a_r/swap.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,14 +17,14 @@
1717
int arch_swap(unsigned int key)
1818
{
1919
/* store off key and return value */
20-
_current->arch.basepri = key;
21-
_current->arch.swap_return_value = -EAGAIN;
20+
arch_current_thread()->arch.basepri = key;
21+
arch_current_thread()->arch.swap_return_value = -EAGAIN;
2222

2323
z_arm_cortex_r_svc();
2424
irq_unlock(key);
2525

2626
/* Context switch is performed here. Returning implies the
2727
* thread has been context-switched-in again.
2828
*/
29-
return _current->arch.swap_return_value;
29+
return arch_current_thread()->arch.swap_return_value;
3030
}

arch/arm/core/cortex_a_r/swap_helper.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ SECTION_FUNC(TEXT, z_arm_do_swap)
7070

7171
#if defined(CONFIG_FPU_SHARING)
7272
ldrb r0, [r2, #_thread_offset_to_user_options]
73-
tst r0, #K_FP_REGS /* _current->base.user_options & K_FP_REGS */
73+
tst r0, #K_FP_REGS /* arch_current_thread()->base.user_options & K_FP_REGS */
7474
beq out_fp_inactive
7575

7676
mov ip, #FPEXC_EN
@@ -152,7 +152,7 @@ out_fp_inactive:
152152

153153
#if defined(CONFIG_FPU_SHARING)
154154
ldrb r0, [r2, #_thread_offset_to_user_options]
155-
tst r0, #K_FP_REGS /* _current->base.user_options & K_FP_REGS */
155+
tst r0, #K_FP_REGS /* arch_current_thread()->base.user_options & K_FP_REGS */
156156
beq in_fp_inactive
157157

158158
mov r3, #FPEXC_EN

arch/arm/core/cortex_a_r/thread.c

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -198,8 +198,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
198198
{
199199

200200
/* Set up privileged stack before entering user mode */
201-
_current->arch.priv_stack_start =
202-
(uint32_t)z_priv_stack_find(_current->stack_obj);
201+
arch_current_thread()->arch.priv_stack_start =
202+
(uint32_t)z_priv_stack_find(arch_current_thread()->stack_obj);
203203
#if defined(CONFIG_MPU_STACK_GUARD)
204204
#if defined(CONFIG_THREAD_STACK_INFO)
205205
/* We're dropping to user mode which means the guard area is no
@@ -208,37 +208,37 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
208208
* which accounted for memory borrowed from the thread stack.
209209
*/
210210
#if FP_GUARD_EXTRA_SIZE > 0
211-
if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
212-
_current->stack_info.start -= FP_GUARD_EXTRA_SIZE;
213-
_current->stack_info.size += FP_GUARD_EXTRA_SIZE;
211+
if ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
212+
arch_current_thread()->stack_info.start -= FP_GUARD_EXTRA_SIZE;
213+
arch_current_thread()->stack_info.size += FP_GUARD_EXTRA_SIZE;
214214
}
215215
#endif /* FP_GUARD_EXTRA_SIZE */
216-
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
217-
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
216+
arch_current_thread()->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
217+
arch_current_thread()->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
218218
#endif /* CONFIG_THREAD_STACK_INFO */
219219

220220
/* Stack guard area reserved at the bottom of the thread's
221221
* privileged stack. Adjust the available (writable) stack
222222
* buffer area accordingly.
223223
*/
224224
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
225-
_current->arch.priv_stack_start +=
226-
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
225+
arch_current_thread()->arch.priv_stack_start +=
226+
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
227227
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
228228
#else
229-
_current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
229+
arch_current_thread()->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
230230
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
231231
#endif /* CONFIG_MPU_STACK_GUARD */
232232

233233
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
234-
_current->arch.priv_stack_end =
235-
_current->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
234+
arch_current_thread()->arch.priv_stack_end =
235+
arch_current_thread()->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
236236
#endif
237237

238238
z_arm_userspace_enter(user_entry, p1, p2, p3,
239-
(uint32_t)_current->stack_info.start,
240-
_current->stack_info.size -
241-
_current->stack_info.delta);
239+
(uint32_t)arch_current_thread()->stack_info.start,
240+
arch_current_thread()->stack_info.size -
241+
arch_current_thread()->stack_info.delta);
242242
CODE_UNREACHABLE;
243243
}
244244

@@ -304,7 +304,7 @@ EXPORT_SYMBOL(z_arm_thread_is_in_user_mode);
304304
uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp)
305305
{
306306
#if defined(CONFIG_MULTITHREADING)
307-
const struct k_thread *thread = _current;
307+
const struct k_thread *thread = arch_current_thread();
308308

309309
if (thread == NULL) {
310310
return 0;
@@ -314,7 +314,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
314314
#if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \
315315
defined(CONFIG_MPU_STACK_GUARD)
316316
uint32_t guard_len =
317-
((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
317+
((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
318318
MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
319319
#else
320320
/* If MPU_STACK_GUARD is not enabled, the guard length is
@@ -377,7 +377,7 @@ uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp
377377
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
378378
int arch_float_disable(struct k_thread *thread)
379379
{
380-
if (thread != _current) {
380+
if (thread != arch_current_thread()) {
381381
return -EINVAL;
382382
}
383383

arch/arm/core/cortex_m/swap.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,8 @@
3333
int arch_swap(unsigned int key)
3434
{
3535
/* store off key and return value */
36-
_current->arch.basepri = key;
37-
_current->arch.swap_return_value = -EAGAIN;
36+
arch_current_thread()->arch.basepri = key;
37+
arch_current_thread()->arch.swap_return_value = -EAGAIN;
3838

3939
/* set pending bit to make sure we will take a PendSV exception */
4040
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
@@ -45,5 +45,5 @@ int arch_swap(unsigned int key)
4545
/* Context switch is performed here. Returning implies the
4646
* thread has been context-switched-in again.
4747
*/
48-
return _current->arch.swap_return_value;
48+
return arch_current_thread()->arch.swap_return_value;
4949
}

0 commit comments

Comments
 (0)