Skip to content

Commit c2a295b

Browse files
vingu-linaroPeter Zijlstra
authored andcommitted
sched/fair: Add new cfs_rq.h_nr_runnable
With delayed dequeued feature, a sleeping sched_entity remains queued in the rq until its lag has elapsed. As a result, it stays also visible in the statistics that are used to balance the system and in particular the field cfs.h_nr_queued when the sched_entity is associated to a task. Create a new h_nr_runnable that tracks only queued and runnable tasks. Signed-off-by: Vincent Guittot <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Dietmar Eggemann <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 7b8a702 commit c2a295b

File tree

3 files changed

+20
-2
lines changed

3 files changed

+20
-2
lines changed

kernel/sched/debug.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -844,6 +844,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
844844
spread = right_vruntime - left_vruntime;
845845
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
846846
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
847+
SEQ_printf(m, " .%-30s: %d\n", "h_nr_runnable", cfs_rq->h_nr_runnable);
847848
SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
848849
SEQ_printf(m, " .%-30s: %d\n", "h_nr_delayed", cfs_rq->h_nr_delayed);
849850
SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",

kernel/sched/fair.c

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5469,6 +5469,7 @@ static void set_delayed(struct sched_entity *se)
54695469
for_each_sched_entity(se) {
54705470
struct cfs_rq *cfs_rq = cfs_rq_of(se);
54715471

5472+
cfs_rq->h_nr_runnable--;
54725473
cfs_rq->h_nr_delayed++;
54735474
if (cfs_rq_throttled(cfs_rq))
54745475
break;
@@ -5481,6 +5482,7 @@ static void clear_delayed(struct sched_entity *se)
54815482
for_each_sched_entity(se) {
54825483
struct cfs_rq *cfs_rq = cfs_rq_of(se);
54835484

5485+
cfs_rq->h_nr_runnable++;
54845486
cfs_rq->h_nr_delayed--;
54855487
if (cfs_rq_throttled(cfs_rq))
54865488
break;
@@ -5930,7 +5932,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59305932
struct rq *rq = rq_of(cfs_rq);
59315933
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
59325934
struct sched_entity *se;
5933-
long queued_delta, idle_task_delta, delayed_delta, dequeue = 1;
5935+
long queued_delta, runnable_delta, idle_task_delta, delayed_delta, dequeue = 1;
59345936
long rq_h_nr_queued = rq->cfs.h_nr_queued;
59355937

59365938
raw_spin_lock(&cfs_b->lock);
@@ -5962,6 +5964,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59625964
rcu_read_unlock();
59635965

59645966
queued_delta = cfs_rq->h_nr_queued;
5967+
runnable_delta = cfs_rq->h_nr_runnable;
59655968
idle_task_delta = cfs_rq->idle_h_nr_running;
59665969
delayed_delta = cfs_rq->h_nr_delayed;
59675970
for_each_sched_entity(se) {
@@ -5986,6 +5989,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59865989
idle_task_delta = cfs_rq->h_nr_queued;
59875990

59885991
qcfs_rq->h_nr_queued -= queued_delta;
5992+
qcfs_rq->h_nr_runnable -= runnable_delta;
59895993
qcfs_rq->idle_h_nr_running -= idle_task_delta;
59905994
qcfs_rq->h_nr_delayed -= delayed_delta;
59915995

@@ -6009,6 +6013,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
60096013
idle_task_delta = cfs_rq->h_nr_queued;
60106014

60116015
qcfs_rq->h_nr_queued -= queued_delta;
6016+
qcfs_rq->h_nr_runnable -= runnable_delta;
60126017
qcfs_rq->idle_h_nr_running -= idle_task_delta;
60136018
qcfs_rq->h_nr_delayed -= delayed_delta;
60146019
}
@@ -6036,7 +6041,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60366041
struct rq *rq = rq_of(cfs_rq);
60376042
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
60386043
struct sched_entity *se;
6039-
long queued_delta, idle_task_delta, delayed_delta;
6044+
long queued_delta, runnable_delta, idle_task_delta, delayed_delta;
60406045
long rq_h_nr_queued = rq->cfs.h_nr_queued;
60416046

60426047
se = cfs_rq->tg->se[cpu_of(rq)];
@@ -6071,6 +6076,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60716076
}
60726077

60736078
queued_delta = cfs_rq->h_nr_queued;
6079+
runnable_delta = cfs_rq->h_nr_runnable;
60746080
idle_task_delta = cfs_rq->idle_h_nr_running;
60756081
delayed_delta = cfs_rq->h_nr_delayed;
60766082
for_each_sched_entity(se) {
@@ -6089,6 +6095,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60896095
idle_task_delta = cfs_rq->h_nr_queued;
60906096

60916097
qcfs_rq->h_nr_queued += queued_delta;
6098+
qcfs_rq->h_nr_runnable += runnable_delta;
60926099
qcfs_rq->idle_h_nr_running += idle_task_delta;
60936100
qcfs_rq->h_nr_delayed += delayed_delta;
60946101

@@ -6107,6 +6114,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
61076114
idle_task_delta = cfs_rq->h_nr_queued;
61086115

61096116
qcfs_rq->h_nr_queued += queued_delta;
6117+
qcfs_rq->h_nr_runnable += runnable_delta;
61106118
qcfs_rq->idle_h_nr_running += idle_task_delta;
61116119
qcfs_rq->h_nr_delayed += delayed_delta;
61126120

@@ -7021,6 +7029,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
70217029
enqueue_entity(cfs_rq, se, flags);
70227030
slice = cfs_rq_min_slice(cfs_rq);
70237031

7032+
if (!h_nr_delayed)
7033+
cfs_rq->h_nr_runnable++;
70247034
cfs_rq->h_nr_queued++;
70257035
cfs_rq->idle_h_nr_running += idle_h_nr_running;
70267036
cfs_rq->h_nr_delayed += h_nr_delayed;
@@ -7045,6 +7055,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
70457055
se->slice = slice;
70467056
slice = cfs_rq_min_slice(cfs_rq);
70477057

7058+
if (!h_nr_delayed)
7059+
cfs_rq->h_nr_runnable++;
70487060
cfs_rq->h_nr_queued++;
70497061
cfs_rq->idle_h_nr_running += idle_h_nr_running;
70507062
cfs_rq->h_nr_delayed += h_nr_delayed;
@@ -7135,6 +7147,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
71357147
break;
71367148
}
71377149

7150+
if (!h_nr_delayed)
7151+
cfs_rq->h_nr_runnable -= h_nr_queued;
71387152
cfs_rq->h_nr_queued -= h_nr_queued;
71397153
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
71407154
cfs_rq->h_nr_delayed -= h_nr_delayed;
@@ -7174,6 +7188,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
71747188
se->slice = slice;
71757189
slice = cfs_rq_min_slice(cfs_rq);
71767190

7191+
if (!h_nr_delayed)
7192+
cfs_rq->h_nr_runnable -= h_nr_queued;
71777193
cfs_rq->h_nr_queued -= h_nr_queued;
71787194
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
71797195
cfs_rq->h_nr_delayed -= h_nr_delayed;

kernel/sched/sched.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -647,6 +647,7 @@ struct cfs_rq {
647647
struct load_weight load;
648648
unsigned int nr_running;
649649
unsigned int h_nr_queued; /* SCHED_{NORMAL,BATCH,IDLE} */
650+
unsigned int h_nr_runnable; /* SCHED_{NORMAL,BATCH,IDLE} */
650651
unsigned int idle_nr_running; /* SCHED_IDLE */
651652
unsigned int idle_h_nr_running; /* SCHED_IDLE */
652653
unsigned int h_nr_delayed;

0 commit comments

Comments
 (0)