@@ -2128,7 +2128,7 @@ static void update_numa_stats(struct task_numa_env *env,
2128
2128
ns -> load += cpu_load (rq );
2129
2129
ns -> runnable += cpu_runnable (rq );
2130
2130
ns -> util += cpu_util_cfs (cpu );
2131
- ns -> nr_running += rq -> cfs .h_nr_queued ;
2131
+ ns -> nr_running += rq -> cfs .h_nr_runnable ;
2132
2132
ns -> compute_capacity += capacity_of (cpu );
2133
2133
2134
2134
if (find_idle && idle_core < 0 && !rq -> nr_running && idle_cpu (cpu )) {
@@ -5394,7 +5394,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5394
5394
* When enqueuing a sched_entity, we must:
5395
5395
* - Update loads to have both entity and cfs_rq synced with now.
5396
5396
* - For group_entity, update its runnable_weight to reflect the new
5397
- * h_nr_queued of its group cfs_rq.
5397
+ * h_nr_runnable of its group cfs_rq.
5398
5398
* - For group_entity, update its weight to reflect the new share of
5399
5399
* its group cfs_rq
5400
5400
* - Add its new weight to cfs_rq->load.weight
@@ -5533,7 +5533,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5533
5533
* When dequeuing a sched_entity, we must:
5534
5534
* - Update loads to have both entity and cfs_rq synced with now.
5535
5535
* - For group_entity, update its runnable_weight to reflect the new
5536
- * h_nr_queued of its group cfs_rq.
5536
+ * h_nr_runnable of its group cfs_rq.
5537
5537
* - Subtract its previous weight from cfs_rq->load.weight.
5538
5538
* - For group entity, update its weight to reflect the new share
5539
5539
* of its group cfs_rq.
@@ -10332,7 +10332,7 @@ sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
10332
10332
* When there is more than 1 task, the group_overloaded case already
10333
10333
* takes care of cpu with reduced capacity
10334
10334
*/
10335
- if (rq -> cfs .h_nr_queued != 1 )
10335
+ if (rq -> cfs .h_nr_runnable != 1 )
10336
10336
return false;
10337
10337
10338
10338
return check_cpu_capacity (rq , sd );
@@ -10367,7 +10367,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
10367
10367
sgs -> group_load += load ;
10368
10368
sgs -> group_util += cpu_util_cfs (i );
10369
10369
sgs -> group_runnable += cpu_runnable (rq );
10370
- sgs -> sum_h_nr_running += rq -> cfs .h_nr_queued ;
10370
+ sgs -> sum_h_nr_running += rq -> cfs .h_nr_runnable ;
10371
10371
10372
10372
nr_running = rq -> nr_running ;
10373
10373
sgs -> sum_nr_running += nr_running ;
@@ -10682,7 +10682,7 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
10682
10682
sgs -> group_util += cpu_util_without (i , p );
10683
10683
sgs -> group_runnable += cpu_runnable_without (rq , p );
10684
10684
local = task_running_on_cpu (i , p );
10685
- sgs -> sum_h_nr_running += rq -> cfs .h_nr_queued - local ;
10685
+ sgs -> sum_h_nr_running += rq -> cfs .h_nr_runnable - local ;
10686
10686
10687
10687
nr_running = rq -> nr_running - local ;
10688
10688
sgs -> sum_nr_running += nr_running ;
@@ -11464,7 +11464,7 @@ static struct rq *sched_balance_find_src_rq(struct lb_env *env,
11464
11464
if (rt > env -> fbq_type )
11465
11465
continue ;
11466
11466
11467
- nr_running = rq -> cfs .h_nr_queued ;
11467
+ nr_running = rq -> cfs .h_nr_runnable ;
11468
11468
if (!nr_running )
11469
11469
continue ;
11470
11470
@@ -11623,7 +11623,7 @@ static int need_active_balance(struct lb_env *env)
11623
11623
* available on dst_cpu.
11624
11624
*/
11625
11625
if (env -> idle &&
11626
- (env -> src_rq -> cfs .h_nr_queued == 1 )) {
11626
+ (env -> src_rq -> cfs .h_nr_runnable == 1 )) {
11627
11627
if ((check_cpu_capacity (env -> src_rq , sd )) &&
11628
11628
(capacity_of (env -> src_cpu )* sd -> imbalance_pct < capacity_of (env -> dst_cpu )* 100 ))
11629
11629
return 1 ;
@@ -12364,7 +12364,7 @@ static void nohz_balancer_kick(struct rq *rq)
12364
12364
* If there's a runnable CFS task and the current CPU has reduced
12365
12365
* capacity, kick the ILB to see if there's a better CPU to run on:
12366
12366
*/
12367
- if (rq -> cfs .h_nr_queued >= 1 && check_cpu_capacity (rq , sd )) {
12367
+ if (rq -> cfs .h_nr_runnable >= 1 && check_cpu_capacity (rq , sd )) {
12368
12368
flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK ;
12369
12369
goto unlock ;
12370
12370
}
0 commit comments