Skip to content

Commit 736c55a

Browse files
vingu-linaroPeter Zijlstra
authored andcommitted
sched/fair: Rename cfs_rq.nr_running into nr_queued
Rename cfs_rq.nr_running into cfs_rq.nr_queued which better reflects the reality as the value includes both the ready to run tasks and the delayed dequeue tasks. Signed-off-by: Vincent Guittot <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Dietmar Eggemann <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 43eef7c commit 736c55a

File tree

3 files changed

+22
-22
lines changed

3 files changed

+22
-22
lines changed

kernel/sched/debug.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -843,7 +843,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
843843
SPLIT_NS(right_vruntime));
844844
spread = right_vruntime - left_vruntime;
845845
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
846-
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
846+
SEQ_printf(m, " .%-30s: %d\n", "nr_queued", cfs_rq->nr_queued);
847847
SEQ_printf(m, " .%-30s: %d\n", "h_nr_runnable", cfs_rq->h_nr_runnable);
848848
SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
849849
SEQ_printf(m, " .%-30s: %d\n", "h_nr_idle", cfs_rq->h_nr_idle);

kernel/sched/fair.c

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -915,7 +915,7 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
915915
* We can safely skip eligibility check if there is only one entity
916916
* in this cfs_rq, saving some cycles.
917917
*/
918-
if (cfs_rq->nr_running == 1)
918+
if (cfs_rq->nr_queued == 1)
919919
return curr && curr->on_rq ? curr : se;
920920

921921
if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
@@ -1247,7 +1247,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
12471247

12481248
account_cfs_rq_runtime(cfs_rq, delta_exec);
12491249

1250-
if (cfs_rq->nr_running == 1)
1250+
if (cfs_rq->nr_queued == 1)
12511251
return;
12521252

12531253
if (resched || did_preempt_short(cfs_rq, curr)) {
@@ -3673,7 +3673,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
36733673
list_add(&se->group_node, &rq->cfs_tasks);
36743674
}
36753675
#endif
3676-
cfs_rq->nr_running++;
3676+
cfs_rq->nr_queued++;
36773677
}
36783678

36793679
static void
@@ -3686,7 +3686,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
36863686
list_del_init(&se->group_node);
36873687
}
36883688
#endif
3689-
cfs_rq->nr_running--;
3689+
cfs_rq->nr_queued--;
36903690
}
36913691

36923692
/*
@@ -5220,7 +5220,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
52205220

52215221
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
52225222
{
5223-
return !cfs_rq->nr_running;
5223+
return !cfs_rq->nr_queued;
52245224
}
52255225

52265226
#define UPDATE_TG 0x0
@@ -5276,7 +5276,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
52765276
*
52775277
* EEVDF: placement strategy #1 / #2
52785278
*/
5279-
if (sched_feat(PLACE_LAG) && cfs_rq->nr_running && se->vlag) {
5279+
if (sched_feat(PLACE_LAG) && cfs_rq->nr_queued && se->vlag) {
52805280
struct sched_entity *curr = cfs_rq->curr;
52815281
unsigned long load;
52825282

@@ -5423,7 +5423,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
54235423
__enqueue_entity(cfs_rq, se);
54245424
se->on_rq = 1;
54255425

5426-
if (cfs_rq->nr_running == 1) {
5426+
if (cfs_rq->nr_queued == 1) {
54275427
check_enqueue_throttle(cfs_rq);
54285428
if (!throttled_hierarchy(cfs_rq)) {
54295429
list_add_leaf_cfs_rq(cfs_rq);
@@ -5565,7 +5565,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
55655565
if (flags & DEQUEUE_DELAYED)
55665566
finish_delayed_dequeue_entity(se);
55675567

5568-
if (cfs_rq->nr_running == 0)
5568+
if (cfs_rq->nr_queued == 0)
55695569
update_idle_cfs_rq_clock_pelt(cfs_rq);
55705570

55715571
return true;
@@ -5913,7 +5913,7 @@ static int tg_throttle_down(struct task_group *tg, void *data)
59135913
list_del_leaf_cfs_rq(cfs_rq);
59145914

59155915
SCHED_WARN_ON(cfs_rq->throttled_clock_self);
5916-
if (cfs_rq->nr_running)
5916+
if (cfs_rq->nr_queued)
59175917
cfs_rq->throttled_clock_self = rq_clock(rq);
59185918
}
59195919
cfs_rq->throttle_count++;
@@ -6022,7 +6022,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
60226022
*/
60236023
cfs_rq->throttled = 1;
60246024
SCHED_WARN_ON(cfs_rq->throttled_clock);
6025-
if (cfs_rq->nr_running)
6025+
if (cfs_rq->nr_queued)
60266026
cfs_rq->throttled_clock = rq_clock(rq);
60276027
return true;
60286028
}
@@ -6122,7 +6122,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
61226122
assert_list_leaf_cfs_rq(rq);
61236123

61246124
/* Determine whether we need to wake up potentially idle CPU: */
6125-
if (rq->curr == rq->idle && rq->cfs.nr_running)
6125+
if (rq->curr == rq->idle && rq->cfs.nr_queued)
61266126
resched_curr(rq);
61276127
}
61286128

@@ -6423,7 +6423,7 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
64236423
if (!cfs_bandwidth_used())
64246424
return;
64256425

6426-
if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
6426+
if (!cfs_rq->runtime_enabled || cfs_rq->nr_queued)
64276427
return;
64286428

64296429
__return_cfs_rq_runtime(cfs_rq);
@@ -6941,14 +6941,14 @@ requeue_delayed_entity(struct sched_entity *se)
69416941
if (sched_feat(DELAY_ZERO)) {
69426942
update_entity_lag(cfs_rq, se);
69436943
if (se->vlag > 0) {
6944-
cfs_rq->nr_running--;
6944+
cfs_rq->nr_queued--;
69456945
if (se != cfs_rq->curr)
69466946
__dequeue_entity(cfs_rq, se);
69476947
se->vlag = 0;
69486948
place_entity(cfs_rq, se, 0);
69496949
if (se != cfs_rq->curr)
69506950
__enqueue_entity(cfs_rq, se);
6951-
cfs_rq->nr_running++;
6951+
cfs_rq->nr_queued++;
69526952
}
69536953
}
69546954

@@ -8873,7 +8873,7 @@ static struct task_struct *pick_task_fair(struct rq *rq)
88738873

88748874
again:
88758875
cfs_rq = &rq->cfs;
8876-
if (!cfs_rq->nr_running)
8876+
if (!cfs_rq->nr_queued)
88778877
return NULL;
88788878

88798879
do {
@@ -8990,7 +8990,7 @@ static struct task_struct *__pick_next_task_fair(struct rq *rq, struct task_stru
89908990

89918991
static bool fair_server_has_tasks(struct sched_dl_entity *dl_se)
89928992
{
8993-
return !!dl_se->rq->cfs.nr_running;
8993+
return !!dl_se->rq->cfs.nr_queued;
89948994
}
89958995

89968996
static struct task_struct *fair_server_pick_task(struct sched_dl_entity *dl_se)
@@ -9780,7 +9780,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
97809780
if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
97819781
update_tg_load_avg(cfs_rq);
97829782

9783-
if (cfs_rq->nr_running == 0)
9783+
if (cfs_rq->nr_queued == 0)
97849784
update_idle_cfs_rq_clock_pelt(cfs_rq);
97859785

97869786
if (cfs_rq == &rq->cfs)
@@ -12949,7 +12949,7 @@ static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
1294912949
* MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check
1295012950
* if we need to give up the CPU.
1295112951
*/
12952-
if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 &&
12952+
if (rq->core->core_forceidle_count && rq->cfs.nr_queued == 1 &&
1295312953
__entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
1295412954
resched_curr(rq);
1295512955
}
@@ -13093,7 +13093,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
1309313093
if (!task_on_rq_queued(p))
1309413094
return;
1309513095

13096-
if (rq->cfs.nr_running == 1)
13096+
if (rq->cfs.nr_queued == 1)
1309713097
return;
1309813098

1309913099
/*

kernel/sched/sched.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -645,7 +645,7 @@ struct balance_callback {
645645
/* CFS-related fields in a runqueue */
646646
struct cfs_rq {
647647
struct load_weight load;
648-
unsigned int nr_running;
648+
unsigned int nr_queued;
649649
unsigned int h_nr_queued; /* SCHED_{NORMAL,BATCH,IDLE} */
650650
unsigned int h_nr_runnable; /* SCHED_{NORMAL,BATCH,IDLE} */
651651
unsigned int h_nr_idle; /* SCHED_IDLE */
@@ -2565,7 +2565,7 @@ static inline bool sched_rt_runnable(struct rq *rq)
25652565

25662566
static inline bool sched_fair_runnable(struct rq *rq)
25672567
{
2568-
return rq->cfs.nr_running > 0;
2568+
return rq->cfs.nr_queued > 0;
25692569
}
25702570

25712571
extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);

0 commit comments

Comments
 (0)