Skip to content

Commit 1eec89a

Browse files
kudureranganathPeter Zijlstra
authored andcommitted
sched/topology: Remove sched_domain_topology_level::flags
Support for overlapping domains added in commit e3589f6 ("sched: Allow for overlapping sched_domain spans") also allowed forcefully setting SD_OVERLAP for !NUMA domains via FORCE_SD_OVERLAP sched_feat(). Since NUMA domains had to be presumed overlapping to ensure correct behavior, "sched_domain_topology_level::flags" was introduced. NUMA domains added the SDTL_OVERLAP flag would ensure SD_OVERLAP was always added during build_sched_domains() for these domains, even when FORCE_SD_OVERLAP was off. Condition for adding the SD_OVERLAP flag at the aforementioned commit was as follows: if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) sd->flags |= SD_OVERLAP; The FORCE_SD_OVERLAP debug feature was removed in commit af85596 ("sched/topology: Remove FORCE_SD_OVERLAP") which left the NUMA domains as the exclusive users of SDTL_OVERLAP, SD_OVERLAP, and SD_NUMA flags. Get rid of SDTL_OVERLAP and SD_OVERLAP as they have become redundant and instead rely on SD_NUMA to detect the only overlapping domain currently supported. Since SDTL_OVERLAP was the only user of "tl->flags", get rid of "sched_domain_topology_level::flags" too. Signed-off-by: K Prateek Nayak <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent f79c9aa commit 1eec89a

File tree

4 files changed

+13
-23
lines changed

4 files changed

+13
-23
lines changed

include/linux/sched/sd_flags.h

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -153,14 +153,6 @@ SD_FLAG(SD_ASYM_PACKING, SDF_NEEDS_GROUPS)
153153
*/
154154
SD_FLAG(SD_PREFER_SIBLING, SDF_NEEDS_GROUPS)
155155

156-
/*
157-
* sched_groups of this level overlap
158-
*
159-
* SHARED_PARENT: Set for all NUMA levels above NODE.
160-
* NEEDS_GROUPS: Overlaps can only exist with more than one group.
161-
*/
162-
SD_FLAG(SD_OVERLAP, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
163-
164156
/*
165157
* Cross-node balancing
166158
*

include/linux/sched/topology.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -175,8 +175,6 @@ bool cpus_share_resources(int this_cpu, int that_cpu);
175175
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
176176
typedef int (*sched_domain_flags_f)(void);
177177

178-
#define SDTL_OVERLAP 0x01
179-
180178
struct sd_data {
181179
struct sched_domain *__percpu *sd;
182180
struct sched_domain_shared *__percpu *sds;
@@ -187,7 +185,6 @@ struct sd_data {
187185
struct sched_domain_topology_level {
188186
sched_domain_mask_f mask;
189187
sched_domain_flags_f sd_flags;
190-
int flags;
191188
int numa_level;
192189
struct sd_data data;
193190
char *name;

kernel/sched/fair.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9926,9 +9926,9 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
99269926
min_capacity = ULONG_MAX;
99279927
max_capacity = 0;
99289928

9929-
if (child->flags & SD_OVERLAP) {
9929+
if (child->flags & SD_NUMA) {
99309930
/*
9931-
* SD_OVERLAP domains cannot assume that child groups
9931+
* SD_NUMA domains cannot assume that child groups
99329932
* span the current group.
99339933
*/
99349934

@@ -9941,7 +9941,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
99419941
}
99429942
} else {
99439943
/*
9944-
* !SD_OVERLAP domains can assume that child groups
9944+
* !SD_NUMA domains can assume that child groups
99459945
* span the current group.
99469946
*/
99479947

kernel/sched/topology.c

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
8989
break;
9090
}
9191

92-
if (!(sd->flags & SD_OVERLAP) &&
92+
if (!(sd->flags & SD_NUMA) &&
9393
cpumask_intersects(groupmask, sched_group_span(group))) {
9494
printk(KERN_CONT "\n");
9595
printk(KERN_ERR "ERROR: repeated CPUs\n");
@@ -102,7 +102,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
102102
group->sgc->id,
103103
cpumask_pr_args(sched_group_span(group)));
104104

105-
if ((sd->flags & SD_OVERLAP) &&
105+
if ((sd->flags & SD_NUMA) &&
106106
!cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
107107
printk(KERN_CONT " mask=%*pbl",
108108
cpumask_pr_args(group_balance_mask(group)));
@@ -1344,7 +1344,7 @@ void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio)
13441344
* "sg->asym_prefer_cpu" to "sg->sgc->asym_prefer_cpu"
13451345
* which is shared by all the overlapping groups.
13461346
*/
1347-
WARN_ON_ONCE(sd->flags & SD_OVERLAP);
1347+
WARN_ON_ONCE(sd->flags & SD_NUMA);
13481348

13491349
sg = sd->groups;
13501350
if (cpu != sg->asym_prefer_cpu) {
@@ -2016,7 +2016,6 @@ void sched_init_numa(int offline_node)
20162016
for (j = 1; j < nr_levels; i++, j++) {
20172017
tl[i] = SDTL_INIT(sd_numa_mask, cpu_numa_flags, NUMA);
20182018
tl[i].numa_level = j;
2019-
tl[i].flags = SDTL_OVERLAP;
20202019
}
20212020

20222021
sched_domain_topology_saved = sched_domain_topology;
@@ -2327,7 +2326,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
23272326

23282327
if (sdd->sd) {
23292328
sd = *per_cpu_ptr(sdd->sd, j);
2330-
if (sd && (sd->flags & SD_OVERLAP))
2329+
if (sd && (sd->flags & SD_NUMA))
23312330
free_sched_groups(sd->groups, 0);
23322331
kfree(*per_cpu_ptr(sdd->sd, j));
23332332
}
@@ -2393,9 +2392,13 @@ static bool topology_span_sane(const struct cpumask *cpu_map)
23932392
id_seen = sched_domains_tmpmask2;
23942393

23952394
for_each_sd_topology(tl) {
2395+
int tl_common_flags = 0;
2396+
2397+
if (tl->sd_flags)
2398+
tl_common_flags = (*tl->sd_flags)();
23962399

23972400
/* NUMA levels are allowed to overlap */
2398-
if (tl->flags & SDTL_OVERLAP)
2401+
if (tl_common_flags & SD_NUMA)
23992402
continue;
24002403

24012404
cpumask_clear(covered);
@@ -2466,8 +2469,6 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
24662469

24672470
if (tl == sched_domain_topology)
24682471
*per_cpu_ptr(d.sd, i) = sd;
2469-
if (tl->flags & SDTL_OVERLAP)
2470-
sd->flags |= SD_OVERLAP;
24712472
if (cpumask_equal(cpu_map, sched_domain_span(sd)))
24722473
break;
24732474
}
@@ -2480,7 +2481,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
24802481
for_each_cpu(i, cpu_map) {
24812482
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
24822483
sd->span_weight = cpumask_weight(sched_domain_span(sd));
2483-
if (sd->flags & SD_OVERLAP) {
2484+
if (sd->flags & SD_NUMA) {
24842485
if (build_overlap_sched_groups(sd, i))
24852486
goto error;
24862487
} else {

0 commit comments

Comments
 (0)