@@ -915,7 +915,7 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
915
915
* We can safely skip eligibility check if there is only one entity
916
916
* in this cfs_rq, saving some cycles.
917
917
*/
918
- if (cfs_rq -> nr_running == 1 )
918
+ if (cfs_rq -> nr_queued == 1 )
919
919
return curr && curr -> on_rq ? curr : se ;
920
920
921
921
if (curr && (!curr -> on_rq || !entity_eligible (cfs_rq , curr )))
@@ -1247,7 +1247,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
1247
1247
1248
1248
account_cfs_rq_runtime (cfs_rq , delta_exec );
1249
1249
1250
- if (cfs_rq -> nr_running == 1 )
1250
+ if (cfs_rq -> nr_queued == 1 )
1251
1251
return ;
1252
1252
1253
1253
if (resched || did_preempt_short (cfs_rq , curr )) {
@@ -3673,7 +3673,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3673
3673
list_add (& se -> group_node , & rq -> cfs_tasks );
3674
3674
}
3675
3675
#endif
3676
- cfs_rq -> nr_running ++ ;
3676
+ cfs_rq -> nr_queued ++ ;
3677
3677
}
3678
3678
3679
3679
static void
@@ -3686,7 +3686,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3686
3686
list_del_init (& se -> group_node );
3687
3687
}
3688
3688
#endif
3689
- cfs_rq -> nr_running -- ;
3689
+ cfs_rq -> nr_queued -- ;
3690
3690
}
3691
3691
3692
3692
/*
@@ -5220,7 +5220,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
5220
5220
5221
5221
static inline bool cfs_rq_is_decayed (struct cfs_rq * cfs_rq )
5222
5222
{
5223
- return !cfs_rq -> nr_running ;
5223
+ return !cfs_rq -> nr_queued ;
5224
5224
}
5225
5225
5226
5226
#define UPDATE_TG 0x0
@@ -5276,7 +5276,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5276
5276
*
5277
5277
* EEVDF: placement strategy #1 / #2
5278
5278
*/
5279
- if (sched_feat (PLACE_LAG ) && cfs_rq -> nr_running && se -> vlag ) {
5279
+ if (sched_feat (PLACE_LAG ) && cfs_rq -> nr_queued && se -> vlag ) {
5280
5280
struct sched_entity * curr = cfs_rq -> curr ;
5281
5281
unsigned long load ;
5282
5282
@@ -5423,7 +5423,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5423
5423
__enqueue_entity (cfs_rq , se );
5424
5424
se -> on_rq = 1 ;
5425
5425
5426
- if (cfs_rq -> nr_running == 1 ) {
5426
+ if (cfs_rq -> nr_queued == 1 ) {
5427
5427
check_enqueue_throttle (cfs_rq );
5428
5428
if (!throttled_hierarchy (cfs_rq )) {
5429
5429
list_add_leaf_cfs_rq (cfs_rq );
@@ -5565,7 +5565,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5565
5565
if (flags & DEQUEUE_DELAYED )
5566
5566
finish_delayed_dequeue_entity (se );
5567
5567
5568
- if (cfs_rq -> nr_running == 0 )
5568
+ if (cfs_rq -> nr_queued == 0 )
5569
5569
update_idle_cfs_rq_clock_pelt (cfs_rq );
5570
5570
5571
5571
return true;
@@ -5913,7 +5913,7 @@ static int tg_throttle_down(struct task_group *tg, void *data)
5913
5913
list_del_leaf_cfs_rq (cfs_rq );
5914
5914
5915
5915
SCHED_WARN_ON (cfs_rq -> throttled_clock_self );
5916
- if (cfs_rq -> nr_running )
5916
+ if (cfs_rq -> nr_queued )
5917
5917
cfs_rq -> throttled_clock_self = rq_clock (rq );
5918
5918
}
5919
5919
cfs_rq -> throttle_count ++ ;
@@ -6022,7 +6022,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
6022
6022
*/
6023
6023
cfs_rq -> throttled = 1 ;
6024
6024
SCHED_WARN_ON (cfs_rq -> throttled_clock );
6025
- if (cfs_rq -> nr_running )
6025
+ if (cfs_rq -> nr_queued )
6026
6026
cfs_rq -> throttled_clock = rq_clock (rq );
6027
6027
return true;
6028
6028
}
@@ -6122,7 +6122,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6122
6122
assert_list_leaf_cfs_rq (rq );
6123
6123
6124
6124
/* Determine whether we need to wake up potentially idle CPU: */
6125
- if (rq -> curr == rq -> idle && rq -> cfs .nr_running )
6125
+ if (rq -> curr == rq -> idle && rq -> cfs .nr_queued )
6126
6126
resched_curr (rq );
6127
6127
}
6128
6128
@@ -6423,7 +6423,7 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
6423
6423
if (!cfs_bandwidth_used ())
6424
6424
return ;
6425
6425
6426
- if (!cfs_rq -> runtime_enabled || cfs_rq -> nr_running )
6426
+ if (!cfs_rq -> runtime_enabled || cfs_rq -> nr_queued )
6427
6427
return ;
6428
6428
6429
6429
__return_cfs_rq_runtime (cfs_rq );
@@ -6941,14 +6941,14 @@ requeue_delayed_entity(struct sched_entity *se)
6941
6941
if (sched_feat (DELAY_ZERO )) {
6942
6942
update_entity_lag (cfs_rq , se );
6943
6943
if (se -> vlag > 0 ) {
6944
- cfs_rq -> nr_running -- ;
6944
+ cfs_rq -> nr_queued -- ;
6945
6945
if (se != cfs_rq -> curr )
6946
6946
__dequeue_entity (cfs_rq , se );
6947
6947
se -> vlag = 0 ;
6948
6948
place_entity (cfs_rq , se , 0 );
6949
6949
if (se != cfs_rq -> curr )
6950
6950
__enqueue_entity (cfs_rq , se );
6951
- cfs_rq -> nr_running ++ ;
6951
+ cfs_rq -> nr_queued ++ ;
6952
6952
}
6953
6953
}
6954
6954
@@ -8873,7 +8873,7 @@ static struct task_struct *pick_task_fair(struct rq *rq)
8873
8873
8874
8874
again :
8875
8875
cfs_rq = & rq -> cfs ;
8876
- if (!cfs_rq -> nr_running )
8876
+ if (!cfs_rq -> nr_queued )
8877
8877
return NULL ;
8878
8878
8879
8879
do {
@@ -8990,7 +8990,7 @@ static struct task_struct *__pick_next_task_fair(struct rq *rq, struct task_stru
8990
8990
8991
8991
static bool fair_server_has_tasks (struct sched_dl_entity * dl_se )
8992
8992
{
8993
- return !!dl_se -> rq -> cfs .nr_running ;
8993
+ return !!dl_se -> rq -> cfs .nr_queued ;
8994
8994
}
8995
8995
8996
8996
static struct task_struct * fair_server_pick_task (struct sched_dl_entity * dl_se )
@@ -9780,7 +9780,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
9780
9780
if (update_cfs_rq_load_avg (cfs_rq_clock_pelt (cfs_rq ), cfs_rq )) {
9781
9781
update_tg_load_avg (cfs_rq );
9782
9782
9783
- if (cfs_rq -> nr_running == 0 )
9783
+ if (cfs_rq -> nr_queued == 0 )
9784
9784
update_idle_cfs_rq_clock_pelt (cfs_rq );
9785
9785
9786
9786
if (cfs_rq == & rq -> cfs )
@@ -12949,7 +12949,7 @@ static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
12949
12949
* MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check
12950
12950
* if we need to give up the CPU.
12951
12951
*/
12952
- if (rq -> core -> core_forceidle_count && rq -> cfs .nr_running == 1 &&
12952
+ if (rq -> core -> core_forceidle_count && rq -> cfs .nr_queued == 1 &&
12953
12953
__entity_slice_used (& curr -> se , MIN_NR_TASKS_DURING_FORCEIDLE ))
12954
12954
resched_curr (rq );
12955
12955
}
@@ -13093,7 +13093,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
13093
13093
if (!task_on_rq_queued (p ))
13094
13094
return ;
13095
13095
13096
- if (rq -> cfs .nr_running == 1 )
13096
+ if (rq -> cfs .nr_queued == 1 )
13097
13097
return ;
13098
13098
13099
13099
/*
0 commit comments