Skip to content

Commit 4475cd8

Browse files
author
Ingo Molnar
committed
sched/balancing: Simplify the sg_status bitmask and use separate ->overloaded and ->overutilized flags
SG_OVERLOADED and SG_OVERUTILIZED flags plus the sg_status bitmask are an unnecessary complication that only make the code harder to read and slower. We only ever set them separately: thule:~/tip> git grep SG_OVER kernel/sched/ kernel/sched/fair.c: set_rd_overutilized_status(rq->rd, SG_OVERUTILIZED); kernel/sched/fair.c: *sg_status |= SG_OVERLOADED; kernel/sched/fair.c: *sg_status |= SG_OVERUTILIZED; kernel/sched/fair.c: *sg_status |= SG_OVERLOADED; kernel/sched/fair.c: set_rd_overloaded(env->dst_rq->rd, sg_status & SG_OVERLOADED); kernel/sched/fair.c: sg_status & SG_OVERUTILIZED); kernel/sched/fair.c: } else if (sg_status & SG_OVERUTILIZED) { kernel/sched/fair.c: set_rd_overutilized_status(env->dst_rq->rd, SG_OVERUTILIZED); kernel/sched/sched.h:#define SG_OVERLOADED 0x1 /* More than one runnable task on a CPU. */ kernel/sched/sched.h:#define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */ kernel/sched/sched.h: set_rd_overloaded(rq->rd, SG_OVERLOADED); And use them separately, which results in suboptimal code: /* update overload indicator if we are at root domain */ set_rd_overloaded(env->dst_rq->rd, sg_status & SG_OVERLOADED); /* Update over-utilization (tipping point, U >= 0) indicator */ set_rd_overutilized_status(env->dst_rq->rd, Introduce separate sg_overloaded and sg_overutilized flags in update_sd_lb_stats() and its lower level functions, and change all of them to 'bool'. Remove the now unused SG_OVERLOADED and SG_OVERUTILIZED flags. Signed-off-by: Ingo Molnar <[email protected]> Acked-by: Shrikanth Hegde <[email protected]> Tested-by: Shrikanth Hegde <[email protected]> Cc: Qais Yousef <[email protected]> Cc: Vincent Guittot <[email protected]> Cc: Peter Zijlstra <[email protected]> Link: https://lore.kernel.org/r/ZgVPhODZ8/[email protected]
1 parent 4d0a63e commit 4475cd8

File tree

2 files changed

+24
-29
lines changed

2 files changed

+24
-29
lines changed

kernel/sched/fair.c

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -6688,19 +6688,18 @@ static inline bool cpu_overutilized(int cpu)
66886688
/*
66896689
* overutilized value make sense only if EAS is enabled
66906690
*/
6691-
static inline int is_rd_overutilized(struct root_domain *rd)
6691+
static inline bool is_rd_overutilized(struct root_domain *rd)
66926692
{
66936693
return !sched_energy_enabled() || READ_ONCE(rd->overutilized);
66946694
}
66956695

6696-
static inline void set_rd_overutilized(struct root_domain *rd,
6697-
unsigned int status)
6696+
static inline void set_rd_overutilized(struct root_domain *rd, bool flag)
66986697
{
66996698
if (!sched_energy_enabled())
67006699
return;
67016700

6702-
WRITE_ONCE(rd->overutilized, status);
6703-
trace_sched_overutilized_tp(rd, !!status);
6701+
WRITE_ONCE(rd->overutilized, flag);
6702+
trace_sched_overutilized_tp(rd, flag);
67046703
}
67056704

67066705
static inline void check_update_overutilized_status(struct rq *rq)
@@ -6711,7 +6710,7 @@ static inline void check_update_overutilized_status(struct rq *rq)
67116710
*/
67126711

67136712
if (!is_rd_overutilized(rq->rd) && cpu_overutilized(rq->cpu))
6714-
set_rd_overutilized(rq->rd, SG_OVERUTILIZED);
6713+
set_rd_overutilized(rq->rd, 1);
67156714
}
67166715
#else
67176716
static inline void check_update_overutilized_status(struct rq *rq) { }
@@ -9934,13 +9933,15 @@ sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
99349933
* @sds: Load-balancing data with statistics of the local group.
99359934
* @group: sched_group whose statistics are to be updated.
99369935
* @sgs: variable to hold the statistics for this group.
9937-
* @sg_status: Holds flag indicating the status of the sched_group
9936+
* @sg_overloaded: sched_group is overloaded
9937+
* @sg_overutilized: sched_group is overutilized
99389938
*/
99399939
static inline void update_sg_lb_stats(struct lb_env *env,
99409940
struct sd_lb_stats *sds,
99419941
struct sched_group *group,
99429942
struct sg_lb_stats *sgs,
9943-
int *sg_status)
9943+
bool *sg_overloaded,
9944+
bool *sg_overutilized)
99449945
{
99459946
int i, nr_running, local_group;
99469947

@@ -9961,10 +9962,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
99619962
sgs->sum_nr_running += nr_running;
99629963

99639964
if (nr_running > 1)
9964-
*sg_status |= SG_OVERLOADED;
9965+
*sg_overloaded = 1;
99659966

99669967
if (cpu_overutilized(i))
9967-
*sg_status |= SG_OVERUTILIZED;
9968+
*sg_overutilized = 1;
99689969

99699970
#ifdef CONFIG_NUMA_BALANCING
99709971
sgs->nr_numa_running += rq->nr_numa_running;
@@ -9986,7 +9987,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
99869987
/* Check for a misfit task on the cpu */
99879988
if (sgs->group_misfit_task_load < rq->misfit_task_load) {
99889989
sgs->group_misfit_task_load = rq->misfit_task_load;
9989-
*sg_status |= SG_OVERLOADED;
9990+
*sg_overloaded = 1;
99909991
}
99919992
} else if (env->idle && sched_reduced_capacity(rq, env->sd)) {
99929993
/* Check for a task running on a CPU with reduced capacity */
@@ -10612,7 +10613,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
1061210613
struct sg_lb_stats *local = &sds->local_stat;
1061310614
struct sg_lb_stats tmp_sgs;
1061410615
unsigned long sum_util = 0;
10615-
int sg_status = 0;
10616+
bool sg_overloaded = 0, sg_overutilized = 0;
1061610617

1061710618
do {
1061810619
struct sg_lb_stats *sgs = &tmp_sgs;
@@ -10628,7 +10629,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
1062810629
update_group_capacity(env->sd, env->dst_cpu);
1062910630
}
1063010631

10631-
update_sg_lb_stats(env, sds, sg, sgs, &sg_status);
10632+
update_sg_lb_stats(env, sds, sg, sgs, &sg_overloaded, &sg_overutilized);
1063210633

1063310634
if (!local_group && update_sd_pick_busiest(env, sds, sg, sgs)) {
1063410635
sds->busiest = sg;
@@ -10657,13 +10658,12 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
1065710658

1065810659
if (!env->sd->parent) {
1065910660
/* update overload indicator if we are at root domain */
10660-
set_rd_overloaded(env->dst_rq->rd, sg_status & SG_OVERLOADED);
10661+
set_rd_overloaded(env->dst_rq->rd, sg_overloaded);
1066110662

1066210663
/* Update over-utilization (tipping point, U >= 0) indicator */
10663-
set_rd_overutilized(env->dst_rq->rd,
10664-
sg_status & SG_OVERUTILIZED);
10665-
} else if (sg_status & SG_OVERUTILIZED) {
10666-
set_rd_overutilized(env->dst_rq->rd, SG_OVERUTILIZED);
10664+
set_rd_overutilized(env->dst_rq->rd, sg_overloaded);
10665+
} else if (sg_overutilized) {
10666+
set_rd_overutilized(env->dst_rq->rd, sg_overutilized);
1066710667
}
1066810668

1066910669
update_idle_cpu_scan(env, sum_util);

kernel/sched/sched.h

Lines changed: 6 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -713,7 +713,7 @@ struct rt_rq {
713713
} highest_prio;
714714
#endif
715715
#ifdef CONFIG_SMP
716-
int overloaded;
716+
bool overloaded;
717717
struct plist_head pushable_tasks;
718718

719719
#endif /* CONFIG_SMP */
@@ -757,7 +757,7 @@ struct dl_rq {
757757
u64 next;
758758
} earliest_dl;
759759

760-
int overloaded;
760+
bool overloaded;
761761

762762
/*
763763
* Tasks on this rq that can be pushed away. They are kept in
@@ -850,10 +850,6 @@ struct perf_domain {
850850
struct rcu_head rcu;
851851
};
852852

853-
/* Scheduling group status flags */
854-
#define SG_OVERLOADED 0x1 /* More than one runnable task on a CPU. */
855-
#define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */
856-
857853
/*
858854
* We add the notion of a root-domain which will be used to define per-domain
859855
* variables. Each exclusive cpuset essentially defines an island domain by
@@ -874,10 +870,10 @@ struct root_domain {
874870
* - More than one runnable task
875871
* - Running task is misfit
876872
*/
877-
int overloaded;
873+
bool overloaded;
878874

879875
/* Indicate one or more cpus over-utilized (tipping point) */
880-
int overutilized;
876+
bool overutilized;
881877

882878
/*
883879
* The bit corresponding to a CPU gets set here if such CPU has more
@@ -2540,9 +2536,8 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
25402536
}
25412537

25422538
#ifdef CONFIG_SMP
2543-
if (prev_nr < 2 && rq->nr_running >= 2) {
2544-
set_rd_overloaded(rq->rd, SG_OVERLOADED);
2545-
}
2539+
if (prev_nr < 2 && rq->nr_running >= 2)
2540+
set_rd_overloaded(rq->rd, 1);
25462541
#endif
25472542

25482543
sched_update_tick_dependency(rq);

0 commit comments

Comments
 (0)