Skip to content

Commit 1a49104

Browse files
vingu-linaroPeter Zijlstra
authored andcommitted
sched/fair: Use the new cfs_rq.h_nr_runnable
Use the new h_nr_runnable that tracks only queued and runnable tasks in the statistics that are used to balance the system: - PELT runnable_avg - deciding if a group is overloaded or has spare capacity - numa stats - reduced capacity management - load balance - nohz kick It should be noticed that the rq->nr_running still counts the delayed dequeued tasks as delayed dequeue is a fair feature that is meaningless at core level. Signed-off-by: Vincent Guittot <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Dietmar Eggemann <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent c2a295b commit 1a49104

File tree

3 files changed

+13
-16
lines changed

3 files changed

+13
-16
lines changed

kernel/sched/fair.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2128,7 +2128,7 @@ static void update_numa_stats(struct task_numa_env *env,
21282128
ns->load += cpu_load(rq);
21292129
ns->runnable += cpu_runnable(rq);
21302130
ns->util += cpu_util_cfs(cpu);
2131-
ns->nr_running += rq->cfs.h_nr_queued;
2131+
ns->nr_running += rq->cfs.h_nr_runnable;
21322132
ns->compute_capacity += capacity_of(cpu);
21332133

21342134
if (find_idle && idle_core < 0 && !rq->nr_running && idle_cpu(cpu)) {
@@ -5394,7 +5394,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
53945394
* When enqueuing a sched_entity, we must:
53955395
* - Update loads to have both entity and cfs_rq synced with now.
53965396
* - For group_entity, update its runnable_weight to reflect the new
5397-
* h_nr_queued of its group cfs_rq.
5397+
* h_nr_runnable of its group cfs_rq.
53985398
* - For group_entity, update its weight to reflect the new share of
53995399
* its group cfs_rq
54005400
* - Add its new weight to cfs_rq->load.weight
@@ -5533,7 +5533,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
55335533
* When dequeuing a sched_entity, we must:
55345534
* - Update loads to have both entity and cfs_rq synced with now.
55355535
* - For group_entity, update its runnable_weight to reflect the new
5536-
* h_nr_queued of its group cfs_rq.
5536+
* h_nr_runnable of its group cfs_rq.
55375537
* - Subtract its previous weight from cfs_rq->load.weight.
55385538
* - For group entity, update its weight to reflect the new share
55395539
* of its group cfs_rq.
@@ -10332,7 +10332,7 @@ sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
1033210332
* When there is more than 1 task, the group_overloaded case already
1033310333
* takes care of cpu with reduced capacity
1033410334
*/
10335-
if (rq->cfs.h_nr_queued != 1)
10335+
if (rq->cfs.h_nr_runnable != 1)
1033610336
return false;
1033710337

1033810338
return check_cpu_capacity(rq, sd);
@@ -10367,7 +10367,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
1036710367
sgs->group_load += load;
1036810368
sgs->group_util += cpu_util_cfs(i);
1036910369
sgs->group_runnable += cpu_runnable(rq);
10370-
sgs->sum_h_nr_running += rq->cfs.h_nr_queued;
10370+
sgs->sum_h_nr_running += rq->cfs.h_nr_runnable;
1037110371

1037210372
nr_running = rq->nr_running;
1037310373
sgs->sum_nr_running += nr_running;
@@ -10682,7 +10682,7 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
1068210682
sgs->group_util += cpu_util_without(i, p);
1068310683
sgs->group_runnable += cpu_runnable_without(rq, p);
1068410684
local = task_running_on_cpu(i, p);
10685-
sgs->sum_h_nr_running += rq->cfs.h_nr_queued - local;
10685+
sgs->sum_h_nr_running += rq->cfs.h_nr_runnable - local;
1068610686

1068710687
nr_running = rq->nr_running - local;
1068810688
sgs->sum_nr_running += nr_running;
@@ -11464,7 +11464,7 @@ static struct rq *sched_balance_find_src_rq(struct lb_env *env,
1146411464
if (rt > env->fbq_type)
1146511465
continue;
1146611466

11467-
nr_running = rq->cfs.h_nr_queued;
11467+
nr_running = rq->cfs.h_nr_runnable;
1146811468
if (!nr_running)
1146911469
continue;
1147011470

@@ -11623,7 +11623,7 @@ static int need_active_balance(struct lb_env *env)
1162311623
* available on dst_cpu.
1162411624
*/
1162511625
if (env->idle &&
11626-
(env->src_rq->cfs.h_nr_queued == 1)) {
11626+
(env->src_rq->cfs.h_nr_runnable == 1)) {
1162711627
if ((check_cpu_capacity(env->src_rq, sd)) &&
1162811628
(capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
1162911629
return 1;
@@ -12364,7 +12364,7 @@ static void nohz_balancer_kick(struct rq *rq)
1236412364
* If there's a runnable CFS task and the current CPU has reduced
1236512365
* capacity, kick the ILB to see if there's a better CPU to run on:
1236612366
*/
12367-
if (rq->cfs.h_nr_queued >= 1 && check_cpu_capacity(rq, sd)) {
12367+
if (rq->cfs.h_nr_runnable >= 1 && check_cpu_capacity(rq, sd)) {
1236812368
flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
1236912369
goto unlock;
1237012370
}

kernel/sched/pelt.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
275275
*
276276
* group: [ see update_cfs_group() ]
277277
* se_weight() = tg->weight * grq->load_avg / tg->load_avg
278-
* se_runnable() = grq->h_nr_queued
278+
* se_runnable() = grq->h_nr_runnable
279279
*
280280
* runnable_sum = se_runnable() * runnable = grq->runnable_sum
281281
* runnable_avg = runnable_sum
@@ -321,7 +321,7 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
321321
{
322322
if (___update_load_sum(now, &cfs_rq->avg,
323323
scale_load_down(cfs_rq->load.weight),
324-
cfs_rq->h_nr_queued - cfs_rq->h_nr_delayed,
324+
cfs_rq->h_nr_runnable,
325325
cfs_rq->curr != NULL)) {
326326

327327
___update_load_avg(&cfs_rq->avg, 1);

kernel/sched/sched.h

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -900,11 +900,8 @@ struct dl_rq {
900900

901901
static inline void se_update_runnable(struct sched_entity *se)
902902
{
903-
if (!entity_is_task(se)) {
904-
struct cfs_rq *cfs_rq = se->my_q;
905-
906-
se->runnable_weight = cfs_rq->h_nr_queued - cfs_rq->h_nr_delayed;
907-
}
903+
if (!entity_is_task(se))
904+
se->runnable_weight = se->my_q->h_nr_runnable;
908905
}
909906

910907
static inline long se_runnable(struct sched_entity *se)

0 commit comments

Comments
 (0)