Skip to content

Commit b56e7d4

Browse files
author
Peter Zijlstra (Intel)
committed
x86, sched: Don't enable static key when starting secondary CPUs
The static key arch_scale_freq_key only needs to be enabled once (at boot). This change fixes a bug by which the key was enabled every time cpu0 is started, even as a secondary CPU during cpu hotplug. Secondary CPUs are started from the idle thread: setting a static key from there means acquiring a lock and may result in sleeping in the idle task, causing CPU lockup. Another consequence of this change is that init_counter_refs() is now called on each CPU correctly; previously the function on_each_cpu() was used, but it was called at boot when the only online cpu is cpu0. [[email protected]: Tested and wrote changelog] Fixes: 1567c3e ("x86, sched: Add support for frequency invariance") Reported-by: Chris Wilson <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Giovanni Gherdovich <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Acked-by: Rafael J. Wysocki <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 23ccee2 commit b56e7d4

File tree

1 file changed

+14
-7
lines changed

1 file changed

+14
-7
lines changed

arch/x86/kernel/smpboot.c

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
147147
*((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
148148
}
149149

150-
static void init_freq_invariance(void);
150+
static void init_freq_invariance(bool secondary);
151151

152152
/*
153153
* Report back to the Boot Processor during boot time or to the caller processor
@@ -185,7 +185,7 @@ static void smp_callin(void)
185185
*/
186186
set_cpu_sibling_map(raw_smp_processor_id());
187187

188-
init_freq_invariance();
188+
init_freq_invariance(true);
189189

190190
/*
191191
* Get our bogomips.
@@ -1341,7 +1341,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
13411341
set_sched_topology(x86_topology);
13421342

13431343
set_cpu_sibling_map(0);
1344-
init_freq_invariance();
1344+
init_freq_invariance(false);
13451345
smp_sanity_check();
13461346

13471347
switch (apic_intr_mode) {
@@ -2005,7 +2005,7 @@ static bool intel_set_max_freq_ratio(void)
20052005
return true;
20062006
}
20072007

2008-
static void init_counter_refs(void *arg)
2008+
static void init_counter_refs(void)
20092009
{
20102010
u64 aperf, mperf;
20112011

@@ -2016,18 +2016,25 @@ static void init_counter_refs(void *arg)
20162016
this_cpu_write(arch_prev_mperf, mperf);
20172017
}
20182018

2019-
static void init_freq_invariance(void)
2019+
static void init_freq_invariance(bool secondary)
20202020
{
20212021
bool ret = false;
20222022

2023-
if (smp_processor_id() != 0 || !boot_cpu_has(X86_FEATURE_APERFMPERF))
2023+
if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
20242024
return;
20252025

2026+
if (secondary) {
2027+
if (static_branch_likely(&arch_scale_freq_key)) {
2028+
init_counter_refs();
2029+
}
2030+
return;
2031+
}
2032+
20262033
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
20272034
ret = intel_set_max_freq_ratio();
20282035

20292036
if (ret) {
2030-
on_each_cpu(init_counter_refs, NULL, 1);
2037+
init_counter_refs();
20312038
static_branch_enable(&arch_scale_freq_key);
20322039
} else {
20332040
pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n");

0 commit comments

Comments
 (0)