Skip to content

Commit 7b8a702

Browse files
vingu-linaroPeter Zijlstra
authored andcommitted
sched/fair: Rename h_nr_running into h_nr_queued
With delayed dequeued feature, a sleeping sched_entity remains queued in the rq until its lag has elapsed but can't run. Rename h_nr_running into h_nr_queued to reflect this new behavior. Signed-off-by: Vincent Guittot <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Dietmar Eggemann <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 40c3b94 commit 7b8a702

File tree

5 files changed

+53
-53
lines changed

5 files changed

+53
-53
lines changed

kernel/sched/core.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1343,7 +1343,7 @@ bool sched_can_stop_tick(struct rq *rq)
13431343
if (scx_enabled() && !scx_can_stop_tick(rq))
13441344
return false;
13451345

1346-
if (rq->cfs.h_nr_running > 1)
1346+
if (rq->cfs.h_nr_queued > 1)
13471347
return false;
13481348

13491349
/*
@@ -6020,7 +6020,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
60206020
* opportunity to pull in more work from other CPUs.
60216021
*/
60226022
if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
6023-
rq->nr_running == rq->cfs.h_nr_running)) {
6023+
rq->nr_running == rq->cfs.h_nr_queued)) {
60246024

60256025
p = pick_next_task_fair(rq, prev, rf);
60266026
if (unlikely(p == RETRY_TASK))

kernel/sched/debug.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -379,7 +379,7 @@ static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubu
379379
return -EINVAL;
380380
}
381381

382-
if (rq->cfs.h_nr_running) {
382+
if (rq->cfs.h_nr_queued) {
383383
update_rq_clock(rq);
384384
dl_server_stop(&rq->fair_server);
385385
}
@@ -392,7 +392,7 @@ static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubu
392392
printk_deferred("Fair server disabled in CPU %d, system may crash due to starvation.\n",
393393
cpu_of(rq));
394394

395-
if (rq->cfs.h_nr_running)
395+
if (rq->cfs.h_nr_queued)
396396
dl_server_start(&rq->fair_server);
397397
}
398398

@@ -844,7 +844,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
844844
spread = right_vruntime - left_vruntime;
845845
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
846846
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
847-
SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
847+
SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
848848
SEQ_printf(m, " .%-30s: %d\n", "h_nr_delayed", cfs_rq->h_nr_delayed);
849849
SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
850850
cfs_rq->idle_nr_running);

kernel/sched/fair.c

Lines changed: 44 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -2128,7 +2128,7 @@ static void update_numa_stats(struct task_numa_env *env,
21282128
ns->load += cpu_load(rq);
21292129
ns->runnable += cpu_runnable(rq);
21302130
ns->util += cpu_util_cfs(cpu);
2131-
ns->nr_running += rq->cfs.h_nr_running;
2131+
ns->nr_running += rq->cfs.h_nr_queued;
21322132
ns->compute_capacity += capacity_of(cpu);
21332133

21342134
if (find_idle && idle_core < 0 && !rq->nr_running && idle_cpu(cpu)) {
@@ -5394,7 +5394,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
53945394
* When enqueuing a sched_entity, we must:
53955395
* - Update loads to have both entity and cfs_rq synced with now.
53965396
* - For group_entity, update its runnable_weight to reflect the new
5397-
* h_nr_running of its group cfs_rq.
5397+
* h_nr_queued of its group cfs_rq.
53985398
* - For group_entity, update its weight to reflect the new share of
53995399
* its group cfs_rq
54005400
* - Add its new weight to cfs_rq->load.weight
@@ -5531,7 +5531,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
55315531
* When dequeuing a sched_entity, we must:
55325532
* - Update loads to have both entity and cfs_rq synced with now.
55335533
* - For group_entity, update its runnable_weight to reflect the new
5534-
* h_nr_running of its group cfs_rq.
5534+
* h_nr_queued of its group cfs_rq.
55355535
* - Subtract its previous weight from cfs_rq->load.weight.
55365536
* - For group entity, update its weight to reflect the new share
55375537
* of its group cfs_rq.
@@ -5930,8 +5930,8 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59305930
struct rq *rq = rq_of(cfs_rq);
59315931
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
59325932
struct sched_entity *se;
5933-
long task_delta, idle_task_delta, delayed_delta, dequeue = 1;
5934-
long rq_h_nr_running = rq->cfs.h_nr_running;
5933+
long queued_delta, idle_task_delta, delayed_delta, dequeue = 1;
5934+
long rq_h_nr_queued = rq->cfs.h_nr_queued;
59355935

59365936
raw_spin_lock(&cfs_b->lock);
59375937
/* This will start the period timer if necessary */
@@ -5961,7 +5961,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59615961
walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
59625962
rcu_read_unlock();
59635963

5964-
task_delta = cfs_rq->h_nr_running;
5964+
queued_delta = cfs_rq->h_nr_queued;
59655965
idle_task_delta = cfs_rq->idle_h_nr_running;
59665966
delayed_delta = cfs_rq->h_nr_delayed;
59675967
for_each_sched_entity(se) {
@@ -5983,9 +5983,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59835983
dequeue_entity(qcfs_rq, se, flags);
59845984

59855985
if (cfs_rq_is_idle(group_cfs_rq(se)))
5986-
idle_task_delta = cfs_rq->h_nr_running;
5986+
idle_task_delta = cfs_rq->h_nr_queued;
59875987

5988-
qcfs_rq->h_nr_running -= task_delta;
5988+
qcfs_rq->h_nr_queued -= queued_delta;
59895989
qcfs_rq->idle_h_nr_running -= idle_task_delta;
59905990
qcfs_rq->h_nr_delayed -= delayed_delta;
59915991

@@ -6006,18 +6006,18 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
60066006
se_update_runnable(se);
60076007

60086008
if (cfs_rq_is_idle(group_cfs_rq(se)))
6009-
idle_task_delta = cfs_rq->h_nr_running;
6009+
idle_task_delta = cfs_rq->h_nr_queued;
60106010

6011-
qcfs_rq->h_nr_running -= task_delta;
6011+
qcfs_rq->h_nr_queued -= queued_delta;
60126012
qcfs_rq->idle_h_nr_running -= idle_task_delta;
60136013
qcfs_rq->h_nr_delayed -= delayed_delta;
60146014
}
60156015

60166016
/* At this point se is NULL and we are at root level*/
6017-
sub_nr_running(rq, task_delta);
6017+
sub_nr_running(rq, queued_delta);
60186018

60196019
/* Stop the fair server if throttling resulted in no runnable tasks */
6020-
if (rq_h_nr_running && !rq->cfs.h_nr_running)
6020+
if (rq_h_nr_queued && !rq->cfs.h_nr_queued)
60216021
dl_server_stop(&rq->fair_server);
60226022
done:
60236023
/*
@@ -6036,8 +6036,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60366036
struct rq *rq = rq_of(cfs_rq);
60376037
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
60386038
struct sched_entity *se;
6039-
long task_delta, idle_task_delta, delayed_delta;
6040-
long rq_h_nr_running = rq->cfs.h_nr_running;
6039+
long queued_delta, idle_task_delta, delayed_delta;
6040+
long rq_h_nr_queued = rq->cfs.h_nr_queued;
60416041

60426042
se = cfs_rq->tg->se[cpu_of(rq)];
60436043

@@ -6070,7 +6070,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60706070
goto unthrottle_throttle;
60716071
}
60726072

6073-
task_delta = cfs_rq->h_nr_running;
6073+
queued_delta = cfs_rq->h_nr_queued;
60746074
idle_task_delta = cfs_rq->idle_h_nr_running;
60756075
delayed_delta = cfs_rq->h_nr_delayed;
60766076
for_each_sched_entity(se) {
@@ -6086,9 +6086,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60866086
enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP);
60876087

60886088
if (cfs_rq_is_idle(group_cfs_rq(se)))
6089-
idle_task_delta = cfs_rq->h_nr_running;
6089+
idle_task_delta = cfs_rq->h_nr_queued;
60906090

6091-
qcfs_rq->h_nr_running += task_delta;
6091+
qcfs_rq->h_nr_queued += queued_delta;
60926092
qcfs_rq->idle_h_nr_running += idle_task_delta;
60936093
qcfs_rq->h_nr_delayed += delayed_delta;
60946094

@@ -6104,9 +6104,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
61046104
se_update_runnable(se);
61056105

61066106
if (cfs_rq_is_idle(group_cfs_rq(se)))
6107-
idle_task_delta = cfs_rq->h_nr_running;
6107+
idle_task_delta = cfs_rq->h_nr_queued;
61086108

6109-
qcfs_rq->h_nr_running += task_delta;
6109+
qcfs_rq->h_nr_queued += queued_delta;
61106110
qcfs_rq->idle_h_nr_running += idle_task_delta;
61116111
qcfs_rq->h_nr_delayed += delayed_delta;
61126112

@@ -6116,11 +6116,11 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
61166116
}
61176117

61186118
/* Start the fair server if un-throttling resulted in new runnable tasks */
6119-
if (!rq_h_nr_running && rq->cfs.h_nr_running)
6119+
if (!rq_h_nr_queued && rq->cfs.h_nr_queued)
61206120
dl_server_start(&rq->fair_server);
61216121

61226122
/* At this point se is NULL and we are at root level*/
6123-
add_nr_running(rq, task_delta);
6123+
add_nr_running(rq, queued_delta);
61246124

61256125
unthrottle_throttle:
61266126
assert_list_leaf_cfs_rq(rq);
@@ -6830,7 +6830,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
68306830

68316831
SCHED_WARN_ON(task_rq(p) != rq);
68326832

6833-
if (rq->cfs.h_nr_running > 1) {
6833+
if (rq->cfs.h_nr_queued > 1) {
68346834
u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
68356835
u64 slice = se->slice;
68366836
s64 delta = slice - ran;
@@ -6973,7 +6973,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
69736973
int idle_h_nr_running = task_has_idle_policy(p);
69746974
int h_nr_delayed = 0;
69756975
int task_new = !(flags & ENQUEUE_WAKEUP);
6976-
int rq_h_nr_running = rq->cfs.h_nr_running;
6976+
int rq_h_nr_queued = rq->cfs.h_nr_queued;
69776977
u64 slice = 0;
69786978

69796979
/*
@@ -7021,7 +7021,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
70217021
enqueue_entity(cfs_rq, se, flags);
70227022
slice = cfs_rq_min_slice(cfs_rq);
70237023

7024-
cfs_rq->h_nr_running++;
7024+
cfs_rq->h_nr_queued++;
70257025
cfs_rq->idle_h_nr_running += idle_h_nr_running;
70267026
cfs_rq->h_nr_delayed += h_nr_delayed;
70277027

@@ -7045,7 +7045,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
70457045
se->slice = slice;
70467046
slice = cfs_rq_min_slice(cfs_rq);
70477047

7048-
cfs_rq->h_nr_running++;
7048+
cfs_rq->h_nr_queued++;
70497049
cfs_rq->idle_h_nr_running += idle_h_nr_running;
70507050
cfs_rq->h_nr_delayed += h_nr_delayed;
70517051

@@ -7057,7 +7057,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
70577057
goto enqueue_throttle;
70587058
}
70597059

7060-
if (!rq_h_nr_running && rq->cfs.h_nr_running) {
7060+
if (!rq_h_nr_queued && rq->cfs.h_nr_queued) {
70617061
/* Account for idle runtime */
70627062
if (!rq->nr_running)
70637063
dl_server_update_idle_time(rq, rq->curr);
@@ -7104,19 +7104,19 @@ static void set_next_buddy(struct sched_entity *se);
71047104
static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
71057105
{
71067106
bool was_sched_idle = sched_idle_rq(rq);
7107-
int rq_h_nr_running = rq->cfs.h_nr_running;
7107+
int rq_h_nr_queued = rq->cfs.h_nr_queued;
71087108
bool task_sleep = flags & DEQUEUE_SLEEP;
71097109
bool task_delayed = flags & DEQUEUE_DELAYED;
71107110
struct task_struct *p = NULL;
71117111
int idle_h_nr_running = 0;
7112-
int h_nr_running = 0;
7112+
int h_nr_queued = 0;
71137113
int h_nr_delayed = 0;
71147114
struct cfs_rq *cfs_rq;
71157115
u64 slice = 0;
71167116

71177117
if (entity_is_task(se)) {
71187118
p = task_of(se);
7119-
h_nr_running = 1;
7119+
h_nr_queued = 1;
71207120
idle_h_nr_running = task_has_idle_policy(p);
71217121
if (!task_sleep && !task_delayed)
71227122
h_nr_delayed = !!se->sched_delayed;
@@ -7135,12 +7135,12 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
71357135
break;
71367136
}
71377137

7138-
cfs_rq->h_nr_running -= h_nr_running;
7138+
cfs_rq->h_nr_queued -= h_nr_queued;
71397139
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
71407140
cfs_rq->h_nr_delayed -= h_nr_delayed;
71417141

71427142
if (cfs_rq_is_idle(cfs_rq))
7143-
idle_h_nr_running = h_nr_running;
7143+
idle_h_nr_running = h_nr_queued;
71447144

71457145
/* end evaluation on encountering a throttled cfs_rq */
71467146
if (cfs_rq_throttled(cfs_rq))
@@ -7174,21 +7174,21 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
71747174
se->slice = slice;
71757175
slice = cfs_rq_min_slice(cfs_rq);
71767176

7177-
cfs_rq->h_nr_running -= h_nr_running;
7177+
cfs_rq->h_nr_queued -= h_nr_queued;
71787178
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
71797179
cfs_rq->h_nr_delayed -= h_nr_delayed;
71807180

71817181
if (cfs_rq_is_idle(cfs_rq))
7182-
idle_h_nr_running = h_nr_running;
7182+
idle_h_nr_running = h_nr_queued;
71837183

71847184
/* end evaluation on encountering a throttled cfs_rq */
71857185
if (cfs_rq_throttled(cfs_rq))
71867186
return 0;
71877187
}
71887188

7189-
sub_nr_running(rq, h_nr_running);
7189+
sub_nr_running(rq, h_nr_queued);
71907190

7191-
if (rq_h_nr_running && !rq->cfs.h_nr_running)
7191+
if (rq_h_nr_queued && !rq->cfs.h_nr_queued)
71927192
dl_server_stop(&rq->fair_server);
71937193

71947194
/* balance early to pull high priority tasks */
@@ -10316,7 +10316,7 @@ sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
1031610316
* When there is more than 1 task, the group_overloaded case already
1031710317
* takes care of cpu with reduced capacity
1031810318
*/
10319-
if (rq->cfs.h_nr_running != 1)
10319+
if (rq->cfs.h_nr_queued != 1)
1032010320
return false;
1032110321

1032210322
return check_cpu_capacity(rq, sd);
@@ -10351,7 +10351,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
1035110351
sgs->group_load += load;
1035210352
sgs->group_util += cpu_util_cfs(i);
1035310353
sgs->group_runnable += cpu_runnable(rq);
10354-
sgs->sum_h_nr_running += rq->cfs.h_nr_running;
10354+
sgs->sum_h_nr_running += rq->cfs.h_nr_queued;
1035510355

1035610356
nr_running = rq->nr_running;
1035710357
sgs->sum_nr_running += nr_running;
@@ -10666,7 +10666,7 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
1066610666
sgs->group_util += cpu_util_without(i, p);
1066710667
sgs->group_runnable += cpu_runnable_without(rq, p);
1066810668
local = task_running_on_cpu(i, p);
10669-
sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
10669+
sgs->sum_h_nr_running += rq->cfs.h_nr_queued - local;
1067010670

1067110671
nr_running = rq->nr_running - local;
1067210672
sgs->sum_nr_running += nr_running;
@@ -11448,7 +11448,7 @@ static struct rq *sched_balance_find_src_rq(struct lb_env *env,
1144811448
if (rt > env->fbq_type)
1144911449
continue;
1145011450

11451-
nr_running = rq->cfs.h_nr_running;
11451+
nr_running = rq->cfs.h_nr_queued;
1145211452
if (!nr_running)
1145311453
continue;
1145411454

@@ -11607,7 +11607,7 @@ static int need_active_balance(struct lb_env *env)
1160711607
* available on dst_cpu.
1160811608
*/
1160911609
if (env->idle &&
11610-
(env->src_rq->cfs.h_nr_running == 1)) {
11610+
(env->src_rq->cfs.h_nr_queued == 1)) {
1161111611
if ((check_cpu_capacity(env->src_rq, sd)) &&
1161211612
(capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
1161311613
return 1;
@@ -12348,7 +12348,7 @@ static void nohz_balancer_kick(struct rq *rq)
1234812348
* If there's a runnable CFS task and the current CPU has reduced
1234912349
* capacity, kick the ILB to see if there's a better CPU to run on:
1235012350
*/
12351-
if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
12351+
if (rq->cfs.h_nr_queued >= 1 && check_cpu_capacity(rq, sd)) {
1235212352
flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
1235312353
goto unlock;
1235412354
}
@@ -12835,11 +12835,11 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
1283512835
* have been enqueued in the meantime. Since we're not going idle,
1283612836
* pretend we pulled a task.
1283712837
*/
12838-
if (this_rq->cfs.h_nr_running && !pulled_task)
12838+
if (this_rq->cfs.h_nr_queued && !pulled_task)
1283912839
pulled_task = 1;
1284012840

1284112841
/* Is there a task of a high priority class? */
12842-
if (this_rq->nr_running != this_rq->cfs.h_nr_running)
12842+
if (this_rq->nr_running != this_rq->cfs.h_nr_queued)
1284312843
pulled_task = -1;
1284412844

1284512845
out:
@@ -13526,7 +13526,7 @@ int sched_group_set_idle(struct task_group *tg, long idle)
1352613526
parent_cfs_rq->idle_nr_running--;
1352713527
}
1352813528

13529-
idle_task_delta = grp_cfs_rq->h_nr_running -
13529+
idle_task_delta = grp_cfs_rq->h_nr_queued -
1353013530
grp_cfs_rq->idle_h_nr_running;
1353113531
if (!cfs_rq_is_idle(grp_cfs_rq))
1353213532
idle_task_delta *= -1;

kernel/sched/pelt.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
275275
*
276276
* group: [ see update_cfs_group() ]
277277
* se_weight() = tg->weight * grq->load_avg / tg->load_avg
278-
* se_runnable() = grq->h_nr_running
278+
* se_runnable() = grq->h_nr_queued
279279
*
280280
* runnable_sum = se_runnable() * runnable = grq->runnable_sum
281281
* runnable_avg = runnable_sum
@@ -321,7 +321,7 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
321321
{
322322
if (___update_load_sum(now, &cfs_rq->avg,
323323
scale_load_down(cfs_rq->load.weight),
324-
cfs_rq->h_nr_running - cfs_rq->h_nr_delayed,
324+
cfs_rq->h_nr_queued - cfs_rq->h_nr_delayed,
325325
cfs_rq->curr != NULL)) {
326326

327327
___update_load_avg(&cfs_rq->avg, 1);

0 commit comments

Comments
 (0)