Skip to content

Commit 0e3f6c3

Browse files
kudureranganathPeter Zijlstra
authored andcommitted
sched/topology: Introduce sched_update_asym_prefer_cpu()
A subset of AMD Processors supporting Preferred Core Rankings also feature the ability to dynamically switch these rankings at runtime to bias load balancing towards or away from the LLC domain with larger cache. To support dynamically updating "sg->asym_prefer_cpu" without needing to rebuild the sched domain, introduce sched_update_asym_prefer_cpu() which recomutes the "asym_prefer_cpu" when the core-ranking of a CPU changes. sched_update_asym_prefer_cpu() swaps the "sg->asym_prefer_cpu" with the CPU whose ranking has changed if the new ranking is greater than that of the "asym_prefer_cpu". If CPU whose ranking has changed is the current "asym_prefer_cpu", it scans the CPUs of the sched groups to find the new "asym_prefer_cpu" and sets it accordingly. get_group() for non-overlapping sched domains returns the sched group for the first CPU in the sched_group_span() which ensures all CPUs in the group see the updated value of "asym_prefer_cpu". Overlapping groups are allocated differently and will require moving the "asym_prefer_cpu" to "sg->sgc" but since the current implementations do not set "SD_ASYM_PACKING" at NUMA domains, skip additional indirection and place a SCHED_WARN_ON() to alert any future users. Signed-off-by: K Prateek Nayak <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 872aa4d commit 0e3f6c3

File tree

2 files changed

+64
-0
lines changed

2 files changed

+64
-0
lines changed

include/linux/sched/topology.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,8 @@ struct sched_domain_topology_level {
195195
};
196196

197197
extern void __init set_sched_topology(struct sched_domain_topology_level *tl);
198+
extern void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio);
199+
198200

199201
# define SD_INIT_NAME(type) .name = #type
200202

@@ -223,6 +225,10 @@ static inline bool cpus_share_resources(int this_cpu, int that_cpu)
223225
return true;
224226
}
225227

228+
static inline void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio)
229+
{
230+
}
231+
226232
#endif /* !CONFIG_SMP */
227233

228234
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)

kernel/sched/topology.c

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1333,6 +1333,64 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
13331333
update_group_capacity(sd, cpu);
13341334
}
13351335

1336+
#ifdef CONFIG_SMP
1337+
1338+
/* Update the "asym_prefer_cpu" when arch_asym_cpu_priority() changes. */
1339+
void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio)
1340+
{
1341+
int asym_prefer_cpu = cpu;
1342+
struct sched_domain *sd;
1343+
1344+
guard(rcu)();
1345+
1346+
for_each_domain(cpu, sd) {
1347+
struct sched_group *sg;
1348+
int group_cpu;
1349+
1350+
if (!(sd->flags & SD_ASYM_PACKING))
1351+
continue;
1352+
1353+
/*
1354+
* Groups of overlapping domain are replicated per NUMA
1355+
* node and will require updating "asym_prefer_cpu" on
1356+
* each local copy.
1357+
*
1358+
* If you are hitting this warning, consider moving
1359+
* "sg->asym_prefer_cpu" to "sg->sgc->asym_prefer_cpu"
1360+
* which is shared by all the overlapping groups.
1361+
*/
1362+
WARN_ON_ONCE(sd->flags & SD_OVERLAP);
1363+
1364+
sg = sd->groups;
1365+
if (cpu != sg->asym_prefer_cpu) {
1366+
/*
1367+
* Since the parent is a superset of the current group,
1368+
* if the cpu is not the "asym_prefer_cpu" at the
1369+
* current level, it cannot be the preferred CPU at a
1370+
* higher levels either.
1371+
*/
1372+
if (!sched_asym_prefer(cpu, sg->asym_prefer_cpu))
1373+
return;
1374+
1375+
WRITE_ONCE(sg->asym_prefer_cpu, cpu);
1376+
continue;
1377+
}
1378+
1379+
/* Ranking has improved; CPU is still the preferred one. */
1380+
if (new_prio >= old_prio)
1381+
continue;
1382+
1383+
for_each_cpu(group_cpu, sched_group_span(sg)) {
1384+
if (sched_asym_prefer(group_cpu, asym_prefer_cpu))
1385+
asym_prefer_cpu = group_cpu;
1386+
}
1387+
1388+
WRITE_ONCE(sg->asym_prefer_cpu, asym_prefer_cpu);
1389+
}
1390+
}
1391+
1392+
#endif /* CONFIG_SMP */
1393+
13361394
/*
13371395
* Set of available CPUs grouped by their corresponding capacities
13381396
* Each list entry contains a CPU mask reflecting CPUs that share the same

0 commit comments

Comments
 (0)