Skip to content

Commit eefd3da

Browse files
Andy Rossnashif
authored andcommitted
kernel/smp: arch/x86_64: Address race with CPU migration
Use of the _current_cpu pointer cannot be done safely in a preemptible context. If a thread is preempted and migrates to another CPU, the old CPU record will be wrong. Add a validation assert to the expression that catches incorrect usages, and fix up the spots where it was wrong (most important being a few uses of _current outside of locks, and the arch_is_in_isr() implementation). Note that the resulting _current expression now requires locking and is going to be somewhat slower. Longer term it's going to be better to augment the arch API to allow SMP architectures to implement a faster "get current thread pointer" action than this default. Note also that this change means that "_current" is no longer expressible as an lvalue (long ago, it was just a static variable), so the places where it gets assigned now assign to _current_cpu->current instead. Signed-off-by: Andy Ross <[email protected]>
1 parent e11262c commit eefd3da

File tree

7 files changed

+51
-12
lines changed

7 files changed

+51
-12
lines changed

arch/arc/core/arc_smp.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ u64_t z_arc_smp_switch_in_isr(void)
119119
_current_cpu->swap_ok = 0;
120120
((struct k_thread *)new_thread)->base.cpu =
121121
arch_curr_cpu()->id;
122-
_current = (struct k_thread *) new_thread;
122+
_current_cpu->current = (struct k_thread *) new_thread;
123123
ret = new_thread | ((u64_t)(old_thread) << 32);
124124
}
125125

arch/x86/include/kernel_arch_func.h

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,17 @@
1919
static inline bool arch_is_in_isr(void)
2020
{
2121
#ifdef CONFIG_SMP
22-
return arch_curr_cpu()->nested != 0;
22+
/* On SMP, there is a race vs. the current CPU changing if we
23+
* are preempted. Need to mask interrupts while inspecting
24+
* (note deliberate lack of gcc size suffix on the
25+
* instructions, we need to work with both architectures here)
26+
*/
27+
bool ret;
28+
29+
__asm__ volatile ("pushf; cli");
30+
ret = arch_curr_cpu()->nested != 0;
31+
__asm__ volatile ("popf");
32+
return ret;
2333
#else
2434
return _kernel.nested != 0U;
2535
#endif

include/kernel_structs.h

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -195,8 +195,16 @@ typedef struct z_kernel _kernel_t;
195195
extern struct z_kernel _kernel;
196196

197197
#ifdef CONFIG_SMP
198-
#define _current_cpu (arch_curr_cpu())
199-
#define _current (arch_curr_cpu()->current)
198+
199+
/* True if the current context can be preempted and migrated to
200+
* another SMP CPU.
201+
*/
202+
bool z_smp_cpu_mobile(void);
203+
204+
#define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
205+
arch_curr_cpu(); })
206+
#define _current k_current_get()
207+
200208
#else
201209
#define _current_cpu (&_kernel.cpus[0])
202210
#define _current _kernel.current

kernel/include/kswap.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -113,11 +113,10 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
113113
z_smp_release_global_lock(new_thread);
114114
}
115115
#endif
116-
_current = new_thread;
116+
_current_cpu->current = new_thread;
117117
wait_for_switch(new_thread);
118118
arch_switch(new_thread->switch_handle,
119119
&old_thread->switch_handle);
120-
121120
sys_trace_thread_switched_in();
122121
}
123122

kernel/init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -512,7 +512,7 @@ FUNC_NORETURN void z_cstart(void)
512512
# endif
513513
};
514514

515-
_current = &dummy_thread;
515+
_current_cpu->current = &dummy_thread;
516516
#endif
517517

518518
#ifdef CONFIG_USERSPACE

kernel/sched.c

Lines changed: 18 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -720,10 +720,10 @@ void k_sched_lock(void)
720720
void k_sched_unlock(void)
721721
{
722722
#ifdef CONFIG_PREEMPT_ENABLED
723-
__ASSERT(_current->base.sched_locked != 0, "");
724-
__ASSERT(!arch_is_in_isr(), "");
725-
726723
LOCKED(&sched_spinlock) {
724+
__ASSERT(_current->base.sched_locked != 0, "");
725+
__ASSERT(!arch_is_in_isr(), "");
726+
727727
++_current->base.sched_locked;
728728
update_cache(0);
729729
}
@@ -751,7 +751,7 @@ struct k_thread *z_get_next_ready_thread(void)
751751
/* Just a wrapper around _current = xxx with tracing */
752752
static inline void set_current(struct k_thread *new_thread)
753753
{
754-
_current = new_thread;
754+
_current_cpu->current = new_thread;
755755
}
756756

757757
#ifdef CONFIG_USE_SWITCH
@@ -1270,7 +1270,20 @@ static inline void z_vrfy_k_wakeup(k_tid_t thread)
12701270

12711271
k_tid_t z_impl_k_current_get(void)
12721272
{
1273-
return _current;
1273+
#ifdef CONFIG_SMP
1274+
/* In SMP, _current is a field read from _current_cpu, which
1275+
* can race with preemption before it is read. We must lock
1276+
* local interrupts when reading it.
1277+
*/
1278+
unsigned int k = arch_irq_lock();
1279+
#endif
1280+
1281+
k_tid_t ret = _current_cpu->current;
1282+
1283+
#ifdef CONFIG_SMP
1284+
arch_irq_unlock(k);
1285+
#endif
1286+
return ret;
12741287
}
12751288

12761289
#ifdef CONFIG_USERSPACE

kernel/smp.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,4 +111,13 @@ void z_smp_init(void)
111111
(void)atomic_set(&start_flag, 1);
112112
}
113113

114+
bool z_smp_cpu_mobile(void)
115+
{
116+
unsigned int k = arch_irq_lock();
117+
bool pinned = arch_is_in_isr() || !arch_irq_unlocked(k);
118+
119+
arch_irq_unlock(k);
120+
return !pinned;
121+
}
122+
114123
#endif /* CONFIG_SMP */

0 commit comments

Comments
 (0)