@@ -2128,7 +2128,7 @@ static void update_numa_stats(struct task_numa_env *env,
2128
2128
ns -> load += cpu_load (rq );
2129
2129
ns -> runnable += cpu_runnable (rq );
2130
2130
ns -> util += cpu_util_cfs (cpu );
2131
- ns -> nr_running += rq -> cfs .h_nr_running ;
2131
+ ns -> nr_running += rq -> cfs .h_nr_queued ;
2132
2132
ns -> compute_capacity += capacity_of (cpu );
2133
2133
2134
2134
if (find_idle && idle_core < 0 && !rq -> nr_running && idle_cpu (cpu )) {
@@ -5394,7 +5394,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5394
5394
* When enqueuing a sched_entity, we must:
5395
5395
* - Update loads to have both entity and cfs_rq synced with now.
5396
5396
* - For group_entity, update its runnable_weight to reflect the new
5397
- * h_nr_running of its group cfs_rq.
5397
+ * h_nr_queued of its group cfs_rq.
5398
5398
* - For group_entity, update its weight to reflect the new share of
5399
5399
* its group cfs_rq
5400
5400
* - Add its new weight to cfs_rq->load.weight
@@ -5531,7 +5531,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5531
5531
* When dequeuing a sched_entity, we must:
5532
5532
* - Update loads to have both entity and cfs_rq synced with now.
5533
5533
* - For group_entity, update its runnable_weight to reflect the new
5534
- * h_nr_running of its group cfs_rq.
5534
+ * h_nr_queued of its group cfs_rq.
5535
5535
* - Subtract its previous weight from cfs_rq->load.weight.
5536
5536
* - For group entity, update its weight to reflect the new share
5537
5537
* of its group cfs_rq.
@@ -5930,8 +5930,8 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5930
5930
struct rq * rq = rq_of (cfs_rq );
5931
5931
struct cfs_bandwidth * cfs_b = tg_cfs_bandwidth (cfs_rq -> tg );
5932
5932
struct sched_entity * se ;
5933
- long task_delta , idle_task_delta , delayed_delta , dequeue = 1 ;
5934
- long rq_h_nr_running = rq -> cfs .h_nr_running ;
5933
+ long queued_delta , idle_task_delta , delayed_delta , dequeue = 1 ;
5934
+ long rq_h_nr_queued = rq -> cfs .h_nr_queued ;
5935
5935
5936
5936
raw_spin_lock (& cfs_b -> lock );
5937
5937
/* This will start the period timer if necessary */
@@ -5961,7 +5961,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5961
5961
walk_tg_tree_from (cfs_rq -> tg , tg_throttle_down , tg_nop , (void * )rq );
5962
5962
rcu_read_unlock ();
5963
5963
5964
- task_delta = cfs_rq -> h_nr_running ;
5964
+ queued_delta = cfs_rq -> h_nr_queued ;
5965
5965
idle_task_delta = cfs_rq -> idle_h_nr_running ;
5966
5966
delayed_delta = cfs_rq -> h_nr_delayed ;
5967
5967
for_each_sched_entity (se ) {
@@ -5983,9 +5983,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5983
5983
dequeue_entity (qcfs_rq , se , flags );
5984
5984
5985
5985
if (cfs_rq_is_idle (group_cfs_rq (se )))
5986
- idle_task_delta = cfs_rq -> h_nr_running ;
5986
+ idle_task_delta = cfs_rq -> h_nr_queued ;
5987
5987
5988
- qcfs_rq -> h_nr_running -= task_delta ;
5988
+ qcfs_rq -> h_nr_queued -= queued_delta ;
5989
5989
qcfs_rq -> idle_h_nr_running -= idle_task_delta ;
5990
5990
qcfs_rq -> h_nr_delayed -= delayed_delta ;
5991
5991
@@ -6006,18 +6006,18 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
6006
6006
se_update_runnable (se );
6007
6007
6008
6008
if (cfs_rq_is_idle (group_cfs_rq (se )))
6009
- idle_task_delta = cfs_rq -> h_nr_running ;
6009
+ idle_task_delta = cfs_rq -> h_nr_queued ;
6010
6010
6011
- qcfs_rq -> h_nr_running -= task_delta ;
6011
+ qcfs_rq -> h_nr_queued -= queued_delta ;
6012
6012
qcfs_rq -> idle_h_nr_running -= idle_task_delta ;
6013
6013
qcfs_rq -> h_nr_delayed -= delayed_delta ;
6014
6014
}
6015
6015
6016
6016
/* At this point se is NULL and we are at root level*/
6017
- sub_nr_running (rq , task_delta );
6017
+ sub_nr_running (rq , queued_delta );
6018
6018
6019
6019
/* Stop the fair server if throttling resulted in no runnable tasks */
6020
- if (rq_h_nr_running && !rq -> cfs .h_nr_running )
6020
+ if (rq_h_nr_queued && !rq -> cfs .h_nr_queued )
6021
6021
dl_server_stop (& rq -> fair_server );
6022
6022
done :
6023
6023
/*
@@ -6036,8 +6036,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6036
6036
struct rq * rq = rq_of (cfs_rq );
6037
6037
struct cfs_bandwidth * cfs_b = tg_cfs_bandwidth (cfs_rq -> tg );
6038
6038
struct sched_entity * se ;
6039
- long task_delta , idle_task_delta , delayed_delta ;
6040
- long rq_h_nr_running = rq -> cfs .h_nr_running ;
6039
+ long queued_delta , idle_task_delta , delayed_delta ;
6040
+ long rq_h_nr_queued = rq -> cfs .h_nr_queued ;
6041
6041
6042
6042
se = cfs_rq -> tg -> se [cpu_of (rq )];
6043
6043
@@ -6070,7 +6070,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6070
6070
goto unthrottle_throttle ;
6071
6071
}
6072
6072
6073
- task_delta = cfs_rq -> h_nr_running ;
6073
+ queued_delta = cfs_rq -> h_nr_queued ;
6074
6074
idle_task_delta = cfs_rq -> idle_h_nr_running ;
6075
6075
delayed_delta = cfs_rq -> h_nr_delayed ;
6076
6076
for_each_sched_entity (se ) {
@@ -6086,9 +6086,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6086
6086
enqueue_entity (qcfs_rq , se , ENQUEUE_WAKEUP );
6087
6087
6088
6088
if (cfs_rq_is_idle (group_cfs_rq (se )))
6089
- idle_task_delta = cfs_rq -> h_nr_running ;
6089
+ idle_task_delta = cfs_rq -> h_nr_queued ;
6090
6090
6091
- qcfs_rq -> h_nr_running += task_delta ;
6091
+ qcfs_rq -> h_nr_queued += queued_delta ;
6092
6092
qcfs_rq -> idle_h_nr_running += idle_task_delta ;
6093
6093
qcfs_rq -> h_nr_delayed += delayed_delta ;
6094
6094
@@ -6104,9 +6104,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6104
6104
se_update_runnable (se );
6105
6105
6106
6106
if (cfs_rq_is_idle (group_cfs_rq (se )))
6107
- idle_task_delta = cfs_rq -> h_nr_running ;
6107
+ idle_task_delta = cfs_rq -> h_nr_queued ;
6108
6108
6109
- qcfs_rq -> h_nr_running += task_delta ;
6109
+ qcfs_rq -> h_nr_queued += queued_delta ;
6110
6110
qcfs_rq -> idle_h_nr_running += idle_task_delta ;
6111
6111
qcfs_rq -> h_nr_delayed += delayed_delta ;
6112
6112
@@ -6116,11 +6116,11 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6116
6116
}
6117
6117
6118
6118
/* Start the fair server if un-throttling resulted in new runnable tasks */
6119
- if (!rq_h_nr_running && rq -> cfs .h_nr_running )
6119
+ if (!rq_h_nr_queued && rq -> cfs .h_nr_queued )
6120
6120
dl_server_start (& rq -> fair_server );
6121
6121
6122
6122
/* At this point se is NULL and we are at root level*/
6123
- add_nr_running (rq , task_delta );
6123
+ add_nr_running (rq , queued_delta );
6124
6124
6125
6125
unthrottle_throttle :
6126
6126
assert_list_leaf_cfs_rq (rq );
@@ -6830,7 +6830,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
6830
6830
6831
6831
SCHED_WARN_ON (task_rq (p ) != rq );
6832
6832
6833
- if (rq -> cfs .h_nr_running > 1 ) {
6833
+ if (rq -> cfs .h_nr_queued > 1 ) {
6834
6834
u64 ran = se -> sum_exec_runtime - se -> prev_sum_exec_runtime ;
6835
6835
u64 slice = se -> slice ;
6836
6836
s64 delta = slice - ran ;
@@ -6973,7 +6973,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6973
6973
int idle_h_nr_running = task_has_idle_policy (p );
6974
6974
int h_nr_delayed = 0 ;
6975
6975
int task_new = !(flags & ENQUEUE_WAKEUP );
6976
- int rq_h_nr_running = rq -> cfs .h_nr_running ;
6976
+ int rq_h_nr_queued = rq -> cfs .h_nr_queued ;
6977
6977
u64 slice = 0 ;
6978
6978
6979
6979
/*
@@ -7021,7 +7021,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
7021
7021
enqueue_entity (cfs_rq , se , flags );
7022
7022
slice = cfs_rq_min_slice (cfs_rq );
7023
7023
7024
- cfs_rq -> h_nr_running ++ ;
7024
+ cfs_rq -> h_nr_queued ++ ;
7025
7025
cfs_rq -> idle_h_nr_running += idle_h_nr_running ;
7026
7026
cfs_rq -> h_nr_delayed += h_nr_delayed ;
7027
7027
@@ -7045,7 +7045,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
7045
7045
se -> slice = slice ;
7046
7046
slice = cfs_rq_min_slice (cfs_rq );
7047
7047
7048
- cfs_rq -> h_nr_running ++ ;
7048
+ cfs_rq -> h_nr_queued ++ ;
7049
7049
cfs_rq -> idle_h_nr_running += idle_h_nr_running ;
7050
7050
cfs_rq -> h_nr_delayed += h_nr_delayed ;
7051
7051
@@ -7057,7 +7057,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
7057
7057
goto enqueue_throttle ;
7058
7058
}
7059
7059
7060
- if (!rq_h_nr_running && rq -> cfs .h_nr_running ) {
7060
+ if (!rq_h_nr_queued && rq -> cfs .h_nr_queued ) {
7061
7061
/* Account for idle runtime */
7062
7062
if (!rq -> nr_running )
7063
7063
dl_server_update_idle_time (rq , rq -> curr );
@@ -7104,19 +7104,19 @@ static void set_next_buddy(struct sched_entity *se);
7104
7104
static int dequeue_entities (struct rq * rq , struct sched_entity * se , int flags )
7105
7105
{
7106
7106
bool was_sched_idle = sched_idle_rq (rq );
7107
- int rq_h_nr_running = rq -> cfs .h_nr_running ;
7107
+ int rq_h_nr_queued = rq -> cfs .h_nr_queued ;
7108
7108
bool task_sleep = flags & DEQUEUE_SLEEP ;
7109
7109
bool task_delayed = flags & DEQUEUE_DELAYED ;
7110
7110
struct task_struct * p = NULL ;
7111
7111
int idle_h_nr_running = 0 ;
7112
- int h_nr_running = 0 ;
7112
+ int h_nr_queued = 0 ;
7113
7113
int h_nr_delayed = 0 ;
7114
7114
struct cfs_rq * cfs_rq ;
7115
7115
u64 slice = 0 ;
7116
7116
7117
7117
if (entity_is_task (se )) {
7118
7118
p = task_of (se );
7119
- h_nr_running = 1 ;
7119
+ h_nr_queued = 1 ;
7120
7120
idle_h_nr_running = task_has_idle_policy (p );
7121
7121
if (!task_sleep && !task_delayed )
7122
7122
h_nr_delayed = !!se -> sched_delayed ;
@@ -7135,12 +7135,12 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7135
7135
break ;
7136
7136
}
7137
7137
7138
- cfs_rq -> h_nr_running -= h_nr_running ;
7138
+ cfs_rq -> h_nr_queued -= h_nr_queued ;
7139
7139
cfs_rq -> idle_h_nr_running -= idle_h_nr_running ;
7140
7140
cfs_rq -> h_nr_delayed -= h_nr_delayed ;
7141
7141
7142
7142
if (cfs_rq_is_idle (cfs_rq ))
7143
- idle_h_nr_running = h_nr_running ;
7143
+ idle_h_nr_running = h_nr_queued ;
7144
7144
7145
7145
/* end evaluation on encountering a throttled cfs_rq */
7146
7146
if (cfs_rq_throttled (cfs_rq ))
@@ -7174,21 +7174,21 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7174
7174
se -> slice = slice ;
7175
7175
slice = cfs_rq_min_slice (cfs_rq );
7176
7176
7177
- cfs_rq -> h_nr_running -= h_nr_running ;
7177
+ cfs_rq -> h_nr_queued -= h_nr_queued ;
7178
7178
cfs_rq -> idle_h_nr_running -= idle_h_nr_running ;
7179
7179
cfs_rq -> h_nr_delayed -= h_nr_delayed ;
7180
7180
7181
7181
if (cfs_rq_is_idle (cfs_rq ))
7182
- idle_h_nr_running = h_nr_running ;
7182
+ idle_h_nr_running = h_nr_queued ;
7183
7183
7184
7184
/* end evaluation on encountering a throttled cfs_rq */
7185
7185
if (cfs_rq_throttled (cfs_rq ))
7186
7186
return 0 ;
7187
7187
}
7188
7188
7189
- sub_nr_running (rq , h_nr_running );
7189
+ sub_nr_running (rq , h_nr_queued );
7190
7190
7191
- if (rq_h_nr_running && !rq -> cfs .h_nr_running )
7191
+ if (rq_h_nr_queued && !rq -> cfs .h_nr_queued )
7192
7192
dl_server_stop (& rq -> fair_server );
7193
7193
7194
7194
/* balance early to pull high priority tasks */
@@ -10316,7 +10316,7 @@ sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
10316
10316
* When there is more than 1 task, the group_overloaded case already
10317
10317
* takes care of cpu with reduced capacity
10318
10318
*/
10319
- if (rq -> cfs .h_nr_running != 1 )
10319
+ if (rq -> cfs .h_nr_queued != 1 )
10320
10320
return false;
10321
10321
10322
10322
return check_cpu_capacity (rq , sd );
@@ -10351,7 +10351,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
10351
10351
sgs -> group_load += load ;
10352
10352
sgs -> group_util += cpu_util_cfs (i );
10353
10353
sgs -> group_runnable += cpu_runnable (rq );
10354
- sgs -> sum_h_nr_running += rq -> cfs .h_nr_running ;
10354
+ sgs -> sum_h_nr_running += rq -> cfs .h_nr_queued ;
10355
10355
10356
10356
nr_running = rq -> nr_running ;
10357
10357
sgs -> sum_nr_running += nr_running ;
@@ -10666,7 +10666,7 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
10666
10666
sgs -> group_util += cpu_util_without (i , p );
10667
10667
sgs -> group_runnable += cpu_runnable_without (rq , p );
10668
10668
local = task_running_on_cpu (i , p );
10669
- sgs -> sum_h_nr_running += rq -> cfs .h_nr_running - local ;
10669
+ sgs -> sum_h_nr_running += rq -> cfs .h_nr_queued - local ;
10670
10670
10671
10671
nr_running = rq -> nr_running - local ;
10672
10672
sgs -> sum_nr_running += nr_running ;
@@ -11448,7 +11448,7 @@ static struct rq *sched_balance_find_src_rq(struct lb_env *env,
11448
11448
if (rt > env -> fbq_type )
11449
11449
continue ;
11450
11450
11451
- nr_running = rq -> cfs .h_nr_running ;
11451
+ nr_running = rq -> cfs .h_nr_queued ;
11452
11452
if (!nr_running )
11453
11453
continue ;
11454
11454
@@ -11607,7 +11607,7 @@ static int need_active_balance(struct lb_env *env)
11607
11607
* available on dst_cpu.
11608
11608
*/
11609
11609
if (env -> idle &&
11610
- (env -> src_rq -> cfs .h_nr_running == 1 )) {
11610
+ (env -> src_rq -> cfs .h_nr_queued == 1 )) {
11611
11611
if ((check_cpu_capacity (env -> src_rq , sd )) &&
11612
11612
(capacity_of (env -> src_cpu )* sd -> imbalance_pct < capacity_of (env -> dst_cpu )* 100 ))
11613
11613
return 1 ;
@@ -12348,7 +12348,7 @@ static void nohz_balancer_kick(struct rq *rq)
12348
12348
* If there's a runnable CFS task and the current CPU has reduced
12349
12349
* capacity, kick the ILB to see if there's a better CPU to run on:
12350
12350
*/
12351
- if (rq -> cfs .h_nr_running >= 1 && check_cpu_capacity (rq , sd )) {
12351
+ if (rq -> cfs .h_nr_queued >= 1 && check_cpu_capacity (rq , sd )) {
12352
12352
flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK ;
12353
12353
goto unlock ;
12354
12354
}
@@ -12835,11 +12835,11 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
12835
12835
* have been enqueued in the meantime. Since we're not going idle,
12836
12836
* pretend we pulled a task.
12837
12837
*/
12838
- if (this_rq -> cfs .h_nr_running && !pulled_task )
12838
+ if (this_rq -> cfs .h_nr_queued && !pulled_task )
12839
12839
pulled_task = 1 ;
12840
12840
12841
12841
/* Is there a task of a high priority class? */
12842
- if (this_rq -> nr_running != this_rq -> cfs .h_nr_running )
12842
+ if (this_rq -> nr_running != this_rq -> cfs .h_nr_queued )
12843
12843
pulled_task = -1 ;
12844
12844
12845
12845
out :
@@ -13526,7 +13526,7 @@ int sched_group_set_idle(struct task_group *tg, long idle)
13526
13526
parent_cfs_rq -> idle_nr_running -- ;
13527
13527
}
13528
13528
13529
- idle_task_delta = grp_cfs_rq -> h_nr_running -
13529
+ idle_task_delta = grp_cfs_rq -> h_nr_queued -
13530
13530
grp_cfs_rq -> idle_h_nr_running ;
13531
13531
if (!cfs_rq_is_idle (grp_cfs_rq ))
13532
13532
idle_task_delta *= -1 ;
0 commit comments