Skip to content

Commit 9216582

Browse files
vingu-linaroPeter Zijlstra
authored andcommitted
sched/fair: Removed unsued cfs_rq.h_nr_delayed
h_nr_delayed is not used anymore. We now have: - h_nr_runnable which tracks tasks ready to run - h_nr_queued which tracks enqueued tasks either ready to run or delayed dequeue Signed-off-by: Vincent Guittot <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Dietmar Eggemann <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 1a49104 commit 9216582

File tree

3 files changed

+12
-30
lines changed

3 files changed

+12
-30
lines changed

kernel/sched/debug.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -846,7 +846,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
846846
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
847847
SEQ_printf(m, " .%-30s: %d\n", "h_nr_runnable", cfs_rq->h_nr_runnable);
848848
SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
849-
SEQ_printf(m, " .%-30s: %d\n", "h_nr_delayed", cfs_rq->h_nr_delayed);
850849
SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
851850
cfs_rq->idle_nr_running);
852851
SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running",

kernel/sched/fair.c

Lines changed: 12 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -5470,7 +5470,6 @@ static void set_delayed(struct sched_entity *se)
54705470
struct cfs_rq *cfs_rq = cfs_rq_of(se);
54715471

54725472
cfs_rq->h_nr_runnable--;
5473-
cfs_rq->h_nr_delayed++;
54745473
if (cfs_rq_throttled(cfs_rq))
54755474
break;
54765475
}
@@ -5483,7 +5482,6 @@ static void clear_delayed(struct sched_entity *se)
54835482
struct cfs_rq *cfs_rq = cfs_rq_of(se);
54845483

54855484
cfs_rq->h_nr_runnable++;
5486-
cfs_rq->h_nr_delayed--;
54875485
if (cfs_rq_throttled(cfs_rq))
54885486
break;
54895487
}
@@ -5932,7 +5930,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59325930
struct rq *rq = rq_of(cfs_rq);
59335931
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
59345932
struct sched_entity *se;
5935-
long queued_delta, runnable_delta, idle_task_delta, delayed_delta, dequeue = 1;
5933+
long queued_delta, runnable_delta, idle_task_delta, dequeue = 1;
59365934
long rq_h_nr_queued = rq->cfs.h_nr_queued;
59375935

59385936
raw_spin_lock(&cfs_b->lock);
@@ -5966,7 +5964,6 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59665964
queued_delta = cfs_rq->h_nr_queued;
59675965
runnable_delta = cfs_rq->h_nr_runnable;
59685966
idle_task_delta = cfs_rq->idle_h_nr_running;
5969-
delayed_delta = cfs_rq->h_nr_delayed;
59705967
for_each_sched_entity(se) {
59715968
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
59725969
int flags;
@@ -5991,7 +5988,6 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59915988
qcfs_rq->h_nr_queued -= queued_delta;
59925989
qcfs_rq->h_nr_runnable -= runnable_delta;
59935990
qcfs_rq->idle_h_nr_running -= idle_task_delta;
5994-
qcfs_rq->h_nr_delayed -= delayed_delta;
59955991

59965992
if (qcfs_rq->load.weight) {
59975993
/* Avoid re-evaluating load for this entity: */
@@ -6015,7 +6011,6 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
60156011
qcfs_rq->h_nr_queued -= queued_delta;
60166012
qcfs_rq->h_nr_runnable -= runnable_delta;
60176013
qcfs_rq->idle_h_nr_running -= idle_task_delta;
6018-
qcfs_rq->h_nr_delayed -= delayed_delta;
60196014
}
60206015

60216016
/* At this point se is NULL and we are at root level*/
@@ -6041,7 +6036,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60416036
struct rq *rq = rq_of(cfs_rq);
60426037
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
60436038
struct sched_entity *se;
6044-
long queued_delta, runnable_delta, idle_task_delta, delayed_delta;
6039+
long queued_delta, runnable_delta, idle_task_delta;
60456040
long rq_h_nr_queued = rq->cfs.h_nr_queued;
60466041

60476042
se = cfs_rq->tg->se[cpu_of(rq)];
@@ -6078,7 +6073,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60786073
queued_delta = cfs_rq->h_nr_queued;
60796074
runnable_delta = cfs_rq->h_nr_runnable;
60806075
idle_task_delta = cfs_rq->idle_h_nr_running;
6081-
delayed_delta = cfs_rq->h_nr_delayed;
60826076
for_each_sched_entity(se) {
60836077
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
60846078

@@ -6097,7 +6091,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60976091
qcfs_rq->h_nr_queued += queued_delta;
60986092
qcfs_rq->h_nr_runnable += runnable_delta;
60996093
qcfs_rq->idle_h_nr_running += idle_task_delta;
6100-
qcfs_rq->h_nr_delayed += delayed_delta;
61016094

61026095
/* end evaluation on encountering a throttled cfs_rq */
61036096
if (cfs_rq_throttled(qcfs_rq))
@@ -6116,7 +6109,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
61166109
qcfs_rq->h_nr_queued += queued_delta;
61176110
qcfs_rq->h_nr_runnable += runnable_delta;
61186111
qcfs_rq->idle_h_nr_running += idle_task_delta;
6119-
qcfs_rq->h_nr_delayed += delayed_delta;
61206112

61216113
/* end evaluation on encountering a throttled cfs_rq */
61226114
if (cfs_rq_throttled(qcfs_rq))
@@ -6979,7 +6971,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
69796971
struct cfs_rq *cfs_rq;
69806972
struct sched_entity *se = &p->se;
69816973
int idle_h_nr_running = task_has_idle_policy(p);
6982-
int h_nr_delayed = 0;
6974+
int h_nr_runnable = 1;
69836975
int task_new = !(flags & ENQUEUE_WAKEUP);
69846976
int rq_h_nr_queued = rq->cfs.h_nr_queued;
69856977
u64 slice = 0;
@@ -7006,8 +6998,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
70066998
if (p->in_iowait)
70076999
cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
70087000

7009-
if (task_new)
7010-
h_nr_delayed = !!se->sched_delayed;
7001+
if (task_new && se->sched_delayed)
7002+
h_nr_runnable = 0;
70117003

70127004
for_each_sched_entity(se) {
70137005
if (se->on_rq) {
@@ -7029,11 +7021,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
70297021
enqueue_entity(cfs_rq, se, flags);
70307022
slice = cfs_rq_min_slice(cfs_rq);
70317023

7032-
if (!h_nr_delayed)
7033-
cfs_rq->h_nr_runnable++;
7024+
cfs_rq->h_nr_runnable += h_nr_runnable;
70347025
cfs_rq->h_nr_queued++;
70357026
cfs_rq->idle_h_nr_running += idle_h_nr_running;
7036-
cfs_rq->h_nr_delayed += h_nr_delayed;
70377027

70387028
if (cfs_rq_is_idle(cfs_rq))
70397029
idle_h_nr_running = 1;
@@ -7055,11 +7045,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
70557045
se->slice = slice;
70567046
slice = cfs_rq_min_slice(cfs_rq);
70577047

7058-
if (!h_nr_delayed)
7059-
cfs_rq->h_nr_runnable++;
7048+
cfs_rq->h_nr_runnable += h_nr_runnable;
70607049
cfs_rq->h_nr_queued++;
70617050
cfs_rq->idle_h_nr_running += idle_h_nr_running;
7062-
cfs_rq->h_nr_delayed += h_nr_delayed;
70637051

70647052
if (cfs_rq_is_idle(cfs_rq))
70657053
idle_h_nr_running = 1;
@@ -7122,16 +7110,16 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
71227110
struct task_struct *p = NULL;
71237111
int idle_h_nr_running = 0;
71247112
int h_nr_queued = 0;
7125-
int h_nr_delayed = 0;
7113+
int h_nr_runnable = 0;
71267114
struct cfs_rq *cfs_rq;
71277115
u64 slice = 0;
71287116

71297117
if (entity_is_task(se)) {
71307118
p = task_of(se);
71317119
h_nr_queued = 1;
71327120
idle_h_nr_running = task_has_idle_policy(p);
7133-
if (!task_sleep && !task_delayed)
7134-
h_nr_delayed = !!se->sched_delayed;
7121+
if (task_sleep || task_delayed || !se->sched_delayed)
7122+
h_nr_runnable = 1;
71357123
} else {
71367124
cfs_rq = group_cfs_rq(se);
71377125
slice = cfs_rq_min_slice(cfs_rq);
@@ -7147,11 +7135,9 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
71477135
break;
71487136
}
71497137

7150-
if (!h_nr_delayed)
7151-
cfs_rq->h_nr_runnable -= h_nr_queued;
7138+
cfs_rq->h_nr_runnable -= h_nr_runnable;
71527139
cfs_rq->h_nr_queued -= h_nr_queued;
71537140
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
7154-
cfs_rq->h_nr_delayed -= h_nr_delayed;
71557141

71567142
if (cfs_rq_is_idle(cfs_rq))
71577143
idle_h_nr_running = h_nr_queued;
@@ -7188,11 +7174,9 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
71887174
se->slice = slice;
71897175
slice = cfs_rq_min_slice(cfs_rq);
71907176

7191-
if (!h_nr_delayed)
7192-
cfs_rq->h_nr_runnable -= h_nr_queued;
7177+
cfs_rq->h_nr_runnable -= h_nr_runnable;
71937178
cfs_rq->h_nr_queued -= h_nr_queued;
71947179
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
7195-
cfs_rq->h_nr_delayed -= h_nr_delayed;
71967180

71977181
if (cfs_rq_is_idle(cfs_rq))
71987182
idle_h_nr_running = h_nr_queued;

kernel/sched/sched.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -650,7 +650,6 @@ struct cfs_rq {
650650
unsigned int h_nr_runnable; /* SCHED_{NORMAL,BATCH,IDLE} */
651651
unsigned int idle_nr_running; /* SCHED_IDLE */
652652
unsigned int idle_h_nr_running; /* SCHED_IDLE */
653-
unsigned int h_nr_delayed;
654653

655654
s64 avg_vruntime;
656655
u64 avg_load;

0 commit comments

Comments
 (0)