Skip to content

Commit 05db498

Browse files
committed
Merge tag 'sched-urgent-2020-04-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Misc fixes: - an uclamp accounting fix - three frequency invariance fixes and a readability improvement" * tag 'sched-urgent-2020-04-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/core: Fix reset-on-fork from RT with uclamp x86, sched: Move check for CPU type to caller function x86, sched: Don't enable static key when starting secondary CPUs x86, sched: Account for CPUs with less than 4 cores in freq. invariance x86, sched: Bail out of frequency invariance if base frequency is unknown
2 parents e185880 + eaf5a92 commit 05db498

File tree

2 files changed

+35
-21
lines changed

2 files changed

+35
-21
lines changed

arch/x86/kernel/smpboot.c

Lines changed: 33 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
147147
*((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
148148
}
149149

150-
static void init_freq_invariance(void);
150+
static void init_freq_invariance(bool secondary);
151151

152152
/*
153153
* Report back to the Boot Processor during boot time or to the caller processor
@@ -185,7 +185,7 @@ static void smp_callin(void)
185185
*/
186186
set_cpu_sibling_map(raw_smp_processor_id());
187187

188-
init_freq_invariance();
188+
init_freq_invariance(true);
189189

190190
/*
191191
* Get our bogomips.
@@ -1341,7 +1341,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
13411341
set_sched_topology(x86_topology);
13421342

13431343
set_cpu_sibling_map(0);
1344-
init_freq_invariance();
1344+
init_freq_invariance(false);
13451345
smp_sanity_check();
13461346

13471347
switch (apic_intr_mode) {
@@ -1877,9 +1877,6 @@ static bool knl_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq,
18771877
int err, i;
18781878
u64 msr;
18791879

1880-
if (!x86_match_cpu(has_knl_turbo_ratio_limits))
1881-
return false;
1882-
18831880
err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
18841881
if (err)
18851882
return false;
@@ -1945,18 +1942,23 @@ static bool skx_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq, int size)
19451942

19461943
static bool core_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
19471944
{
1945+
u64 msr;
19481946
int err;
19491947

19501948
err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
19511949
if (err)
19521950
return false;
19531951

1954-
err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, turbo_freq);
1952+
err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr);
19551953
if (err)
19561954
return false;
19571955

1958-
*base_freq = (*base_freq >> 8) & 0xFF; /* max P state */
1959-
*turbo_freq = (*turbo_freq >> 24) & 0xFF; /* 4C turbo */
1956+
*base_freq = (*base_freq >> 8) & 0xFF; /* max P state */
1957+
*turbo_freq = (msr >> 24) & 0xFF; /* 4C turbo */
1958+
1959+
/* The CPU may have less than 4 cores */
1960+
if (!*turbo_freq)
1961+
*turbo_freq = msr & 0xFF; /* 1C turbo */
19601962

19611963
return true;
19621964
}
@@ -1972,7 +1974,8 @@ static bool intel_set_max_freq_ratio(void)
19721974
skx_set_max_freq_ratio(&base_freq, &turbo_freq, 1))
19731975
goto out;
19741976

1975-
if (knl_set_max_freq_ratio(&base_freq, &turbo_freq, 1))
1977+
if (x86_match_cpu(has_knl_turbo_ratio_limits) &&
1978+
knl_set_max_freq_ratio(&base_freq, &turbo_freq, 1))
19761979
goto out;
19771980

19781981
if (x86_match_cpu(has_skx_turbo_ratio_limits) &&
@@ -1985,13 +1988,22 @@ static bool intel_set_max_freq_ratio(void)
19851988
return false;
19861989

19871990
out:
1991+
/*
1992+
* Some hypervisors advertise X86_FEATURE_APERFMPERF
1993+
* but then fill all MSR's with zeroes.
1994+
*/
1995+
if (!base_freq) {
1996+
pr_debug("Couldn't determine cpu base frequency, necessary for scale-invariant accounting.\n");
1997+
return false;
1998+
}
1999+
19882000
arch_turbo_freq_ratio = div_u64(turbo_freq * SCHED_CAPACITY_SCALE,
19892001
base_freq);
19902002
arch_set_max_freq_ratio(turbo_disabled());
19912003
return true;
19922004
}
19932005

1994-
static void init_counter_refs(void *arg)
2006+
static void init_counter_refs(void)
19952007
{
19962008
u64 aperf, mperf;
19972009

@@ -2002,18 +2014,25 @@ static void init_counter_refs(void *arg)
20022014
this_cpu_write(arch_prev_mperf, mperf);
20032015
}
20042016

2005-
static void init_freq_invariance(void)
2017+
static void init_freq_invariance(bool secondary)
20062018
{
20072019
bool ret = false;
20082020

2009-
if (smp_processor_id() != 0 || !boot_cpu_has(X86_FEATURE_APERFMPERF))
2021+
if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
20102022
return;
20112023

2024+
if (secondary) {
2025+
if (static_branch_likely(&arch_scale_freq_key)) {
2026+
init_counter_refs();
2027+
}
2028+
return;
2029+
}
2030+
20122031
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
20132032
ret = intel_set_max_freq_ratio();
20142033

20152034
if (ret) {
2016-
on_each_cpu(init_counter_refs, NULL, 1);
2035+
init_counter_refs();
20172036
static_branch_enable(&arch_scale_freq_key);
20182037
} else {
20192038
pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n");

kernel/sched/core.c

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1232,13 +1232,8 @@ static void uclamp_fork(struct task_struct *p)
12321232
return;
12331233

12341234
for_each_clamp_id(clamp_id) {
1235-
unsigned int clamp_value = uclamp_none(clamp_id);
1236-
1237-
/* By default, RT tasks always get 100% boost */
1238-
if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
1239-
clamp_value = uclamp_none(UCLAMP_MAX);
1240-
1241-
uclamp_se_set(&p->uclamp_req[clamp_id], clamp_value, false);
1235+
uclamp_se_set(&p->uclamp_req[clamp_id],
1236+
uclamp_none(clamp_id), false);
12421237
}
12431238
}
12441239

0 commit comments

Comments
 (0)