Skip to content

Commit c9a1ff3

Browse files
Brian Gerstsuryasaimadhu
authored andcommitted
x86/stackprotector: Pre-initialize canary for secondary CPUs
The idle tasks created for each secondary CPU already have a random stack canary generated by fork(). Copy the canary to the percpu variable before starting the secondary CPU which removes the need to call boot_init_stack_canary(). Signed-off-by: Brian Gerst <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent a5ce9f2 commit c9a1ff3

File tree

3 files changed

+14
-14
lines changed

3 files changed

+14
-14
lines changed

arch/x86/include/asm/stackprotector.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,15 @@ static __always_inline void boot_init_stack_canary(void)
9090
#endif
9191
}
9292

93+
static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
94+
{
95+
#ifdef CONFIG_X86_64
96+
per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary;
97+
#else
98+
per_cpu(stack_canary.canary, cpu) = idle->stack_canary;
99+
#endif
100+
}
101+
93102
static inline void setup_stack_canary_segment(int cpu)
94103
{
95104
#ifdef CONFIG_X86_32
@@ -119,6 +128,9 @@ static inline void load_stack_canary_segment(void)
119128
static inline void setup_stack_canary_segment(int cpu)
120129
{ }
121130

131+
static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
132+
{ }
133+
122134
static inline void load_stack_canary_segment(void)
123135
{
124136
#ifdef CONFIG_X86_32

arch/x86/kernel/smpboot.c

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,6 @@
5151
#include <linux/err.h>
5252
#include <linux/nmi.h>
5353
#include <linux/tboot.h>
54-
#include <linux/stackprotector.h>
5554
#include <linux/gfp.h>
5655
#include <linux/cpuidle.h>
5756
#include <linux/numa.h>
@@ -80,6 +79,7 @@
8079
#include <asm/cpu_device_id.h>
8180
#include <asm/spec-ctrl.h>
8281
#include <asm/hw_irq.h>
82+
#include <asm/stackprotector.h>
8383

8484
/* representing HT siblings of each logical CPU */
8585
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
@@ -259,21 +259,10 @@ static void notrace start_secondary(void *unused)
259259
/* enable local interrupts */
260260
local_irq_enable();
261261

262-
/* to prevent fake stack check failure in clock setup */
263-
boot_init_stack_canary();
264-
265262
x86_cpuinit.setup_percpu_clockev();
266263

267264
wmb();
268265
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
269-
270-
/*
271-
* Prevent tail call to cpu_startup_entry() because the stack protector
272-
* guard has been changed a couple of function calls up, in
273-
* boot_init_stack_canary() and must not be checked before tail calling
274-
* another function.
275-
*/
276-
prevent_tail_call_optimization();
277266
}
278267

279268
/**
@@ -1011,6 +1000,7 @@ int common_cpu_up(unsigned int cpu, struct task_struct *idle)
10111000
alternatives_enable_smp();
10121001

10131002
per_cpu(current_task, cpu) = idle;
1003+
cpu_init_stack_canary(cpu, idle);
10141004

10151005
/* Initialize the interrupt stack(s) */
10161006
ret = irq_init_percpu_irqstack(cpu);

arch/x86/xen/smp_pv.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,9 +92,7 @@ static void cpu_bringup(void)
9292
asmlinkage __visible void cpu_bringup_and_idle(void)
9393
{
9494
cpu_bringup();
95-
boot_init_stack_canary();
9695
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
97-
prevent_tail_call_optimization();
9896
}
9997

10098
void xen_smp_intr_free_pv(unsigned int cpu)

0 commit comments

Comments
 (0)