@@ -5930,7 +5930,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5930
5930
struct rq * rq = rq_of (cfs_rq );
5931
5931
struct cfs_bandwidth * cfs_b = tg_cfs_bandwidth (cfs_rq -> tg );
5932
5932
struct sched_entity * se ;
5933
- long queued_delta , runnable_delta , idle_task_delta , dequeue = 1 ;
5933
+ long queued_delta , runnable_delta , idle_delta , dequeue = 1 ;
5934
5934
long rq_h_nr_queued = rq -> cfs .h_nr_queued ;
5935
5935
5936
5936
raw_spin_lock (& cfs_b -> lock );
@@ -5963,7 +5963,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5963
5963
5964
5964
queued_delta = cfs_rq -> h_nr_queued ;
5965
5965
runnable_delta = cfs_rq -> h_nr_runnable ;
5966
- idle_task_delta = cfs_rq -> idle_h_nr_running ;
5966
+ idle_delta = cfs_rq -> h_nr_idle ;
5967
5967
for_each_sched_entity (se ) {
5968
5968
struct cfs_rq * qcfs_rq = cfs_rq_of (se );
5969
5969
int flags ;
@@ -5983,11 +5983,11 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5983
5983
dequeue_entity (qcfs_rq , se , flags );
5984
5984
5985
5985
if (cfs_rq_is_idle (group_cfs_rq (se )))
5986
- idle_task_delta = cfs_rq -> h_nr_queued ;
5986
+ idle_delta = cfs_rq -> h_nr_queued ;
5987
5987
5988
5988
qcfs_rq -> h_nr_queued -= queued_delta ;
5989
5989
qcfs_rq -> h_nr_runnable -= runnable_delta ;
5990
- qcfs_rq -> idle_h_nr_running -= idle_task_delta ;
5990
+ qcfs_rq -> h_nr_idle -= idle_delta ;
5991
5991
5992
5992
if (qcfs_rq -> load .weight ) {
5993
5993
/* Avoid re-evaluating load for this entity: */
@@ -6006,11 +6006,11 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
6006
6006
se_update_runnable (se );
6007
6007
6008
6008
if (cfs_rq_is_idle (group_cfs_rq (se )))
6009
- idle_task_delta = cfs_rq -> h_nr_queued ;
6009
+ idle_delta = cfs_rq -> h_nr_queued ;
6010
6010
6011
6011
qcfs_rq -> h_nr_queued -= queued_delta ;
6012
6012
qcfs_rq -> h_nr_runnable -= runnable_delta ;
6013
- qcfs_rq -> idle_h_nr_running -= idle_task_delta ;
6013
+ qcfs_rq -> h_nr_idle -= idle_delta ;
6014
6014
}
6015
6015
6016
6016
/* At this point se is NULL and we are at root level*/
@@ -6036,7 +6036,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6036
6036
struct rq * rq = rq_of (cfs_rq );
6037
6037
struct cfs_bandwidth * cfs_b = tg_cfs_bandwidth (cfs_rq -> tg );
6038
6038
struct sched_entity * se ;
6039
- long queued_delta , runnable_delta , idle_task_delta ;
6039
+ long queued_delta , runnable_delta , idle_delta ;
6040
6040
long rq_h_nr_queued = rq -> cfs .h_nr_queued ;
6041
6041
6042
6042
se = cfs_rq -> tg -> se [cpu_of (rq )];
@@ -6072,7 +6072,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6072
6072
6073
6073
queued_delta = cfs_rq -> h_nr_queued ;
6074
6074
runnable_delta = cfs_rq -> h_nr_runnable ;
6075
- idle_task_delta = cfs_rq -> idle_h_nr_running ;
6075
+ idle_delta = cfs_rq -> h_nr_idle ;
6076
6076
for_each_sched_entity (se ) {
6077
6077
struct cfs_rq * qcfs_rq = cfs_rq_of (se );
6078
6078
@@ -6086,11 +6086,11 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6086
6086
enqueue_entity (qcfs_rq , se , ENQUEUE_WAKEUP );
6087
6087
6088
6088
if (cfs_rq_is_idle (group_cfs_rq (se )))
6089
- idle_task_delta = cfs_rq -> h_nr_queued ;
6089
+ idle_delta = cfs_rq -> h_nr_queued ;
6090
6090
6091
6091
qcfs_rq -> h_nr_queued += queued_delta ;
6092
6092
qcfs_rq -> h_nr_runnable += runnable_delta ;
6093
- qcfs_rq -> idle_h_nr_running += idle_task_delta ;
6093
+ qcfs_rq -> h_nr_idle += idle_delta ;
6094
6094
6095
6095
/* end evaluation on encountering a throttled cfs_rq */
6096
6096
if (cfs_rq_throttled (qcfs_rq ))
@@ -6104,11 +6104,11 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6104
6104
se_update_runnable (se );
6105
6105
6106
6106
if (cfs_rq_is_idle (group_cfs_rq (se )))
6107
- idle_task_delta = cfs_rq -> h_nr_queued ;
6107
+ idle_delta = cfs_rq -> h_nr_queued ;
6108
6108
6109
6109
qcfs_rq -> h_nr_queued += queued_delta ;
6110
6110
qcfs_rq -> h_nr_runnable += runnable_delta ;
6111
- qcfs_rq -> idle_h_nr_running += idle_task_delta ;
6111
+ qcfs_rq -> h_nr_idle += idle_delta ;
6112
6112
6113
6113
/* end evaluation on encountering a throttled cfs_rq */
6114
6114
if (cfs_rq_throttled (qcfs_rq ))
@@ -6918,7 +6918,7 @@ static inline void check_update_overutilized_status(struct rq *rq) { }
6918
6918
/* Runqueue only has SCHED_IDLE tasks enqueued */
6919
6919
static int sched_idle_rq (struct rq * rq )
6920
6920
{
6921
- return unlikely (rq -> nr_running == rq -> cfs .idle_h_nr_running &&
6921
+ return unlikely (rq -> nr_running == rq -> cfs .h_nr_idle &&
6922
6922
rq -> nr_running );
6923
6923
}
6924
6924
@@ -6970,7 +6970,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6970
6970
{
6971
6971
struct cfs_rq * cfs_rq ;
6972
6972
struct sched_entity * se = & p -> se ;
6973
- int idle_h_nr_running = task_has_idle_policy (p );
6973
+ int h_nr_idle = task_has_idle_policy (p );
6974
6974
int h_nr_runnable = 1 ;
6975
6975
int task_new = !(flags & ENQUEUE_WAKEUP );
6976
6976
int rq_h_nr_queued = rq -> cfs .h_nr_queued ;
@@ -7023,10 +7023,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
7023
7023
7024
7024
cfs_rq -> h_nr_runnable += h_nr_runnable ;
7025
7025
cfs_rq -> h_nr_queued ++ ;
7026
- cfs_rq -> idle_h_nr_running += idle_h_nr_running ;
7026
+ cfs_rq -> h_nr_idle += h_nr_idle ;
7027
7027
7028
7028
if (cfs_rq_is_idle (cfs_rq ))
7029
- idle_h_nr_running = 1 ;
7029
+ h_nr_idle = 1 ;
7030
7030
7031
7031
/* end evaluation on encountering a throttled cfs_rq */
7032
7032
if (cfs_rq_throttled (cfs_rq ))
@@ -7047,10 +7047,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
7047
7047
7048
7048
cfs_rq -> h_nr_runnable += h_nr_runnable ;
7049
7049
cfs_rq -> h_nr_queued ++ ;
7050
- cfs_rq -> idle_h_nr_running += idle_h_nr_running ;
7050
+ cfs_rq -> h_nr_idle += h_nr_idle ;
7051
7051
7052
7052
if (cfs_rq_is_idle (cfs_rq ))
7053
- idle_h_nr_running = 1 ;
7053
+ h_nr_idle = 1 ;
7054
7054
7055
7055
/* end evaluation on encountering a throttled cfs_rq */
7056
7056
if (cfs_rq_throttled (cfs_rq ))
@@ -7108,7 +7108,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7108
7108
bool task_sleep = flags & DEQUEUE_SLEEP ;
7109
7109
bool task_delayed = flags & DEQUEUE_DELAYED ;
7110
7110
struct task_struct * p = NULL ;
7111
- int idle_h_nr_running = 0 ;
7111
+ int h_nr_idle = 0 ;
7112
7112
int h_nr_queued = 0 ;
7113
7113
int h_nr_runnable = 0 ;
7114
7114
struct cfs_rq * cfs_rq ;
@@ -7117,7 +7117,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7117
7117
if (entity_is_task (se )) {
7118
7118
p = task_of (se );
7119
7119
h_nr_queued = 1 ;
7120
- idle_h_nr_running = task_has_idle_policy (p );
7120
+ h_nr_idle = task_has_idle_policy (p );
7121
7121
if (task_sleep || task_delayed || !se -> sched_delayed )
7122
7122
h_nr_runnable = 1 ;
7123
7123
} else {
@@ -7137,10 +7137,10 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7137
7137
7138
7138
cfs_rq -> h_nr_runnable -= h_nr_runnable ;
7139
7139
cfs_rq -> h_nr_queued -= h_nr_queued ;
7140
- cfs_rq -> idle_h_nr_running -= idle_h_nr_running ;
7140
+ cfs_rq -> h_nr_idle -= h_nr_idle ;
7141
7141
7142
7142
if (cfs_rq_is_idle (cfs_rq ))
7143
- idle_h_nr_running = h_nr_queued ;
7143
+ h_nr_idle = h_nr_queued ;
7144
7144
7145
7145
/* end evaluation on encountering a throttled cfs_rq */
7146
7146
if (cfs_rq_throttled (cfs_rq ))
@@ -7176,10 +7176,10 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7176
7176
7177
7177
cfs_rq -> h_nr_runnable -= h_nr_runnable ;
7178
7178
cfs_rq -> h_nr_queued -= h_nr_queued ;
7179
- cfs_rq -> idle_h_nr_running -= idle_h_nr_running ;
7179
+ cfs_rq -> h_nr_idle -= h_nr_idle ;
7180
7180
7181
7181
if (cfs_rq_is_idle (cfs_rq ))
7182
- idle_h_nr_running = h_nr_queued ;
7182
+ h_nr_idle = h_nr_queued ;
7183
7183
7184
7184
/* end evaluation on encountering a throttled cfs_rq */
7185
7185
if (cfs_rq_throttled (cfs_rq ))
@@ -13527,7 +13527,7 @@ int sched_group_set_idle(struct task_group *tg, long idle)
13527
13527
}
13528
13528
13529
13529
idle_task_delta = grp_cfs_rq -> h_nr_queued -
13530
- grp_cfs_rq -> idle_h_nr_running ;
13530
+ grp_cfs_rq -> h_nr_idle ;
13531
13531
if (!cfs_rq_is_idle (grp_cfs_rq ))
13532
13532
idle_task_delta *= -1 ;
13533
13533
@@ -13537,7 +13537,7 @@ int sched_group_set_idle(struct task_group *tg, long idle)
13537
13537
if (!se -> on_rq )
13538
13538
break ;
13539
13539
13540
- cfs_rq -> idle_h_nr_running += idle_task_delta ;
13540
+ cfs_rq -> h_nr_idle += idle_task_delta ;
13541
13541
13542
13542
/* Already accounted at parent level and above. */
13543
13543
if (cfs_rq_is_idle (cfs_rq ))
0 commit comments