@@ -5470,7 +5470,6 @@ static void set_delayed(struct sched_entity *se)
5470
5470
struct cfs_rq * cfs_rq = cfs_rq_of (se );
5471
5471
5472
5472
cfs_rq -> h_nr_runnable -- ;
5473
- cfs_rq -> h_nr_delayed ++ ;
5474
5473
if (cfs_rq_throttled (cfs_rq ))
5475
5474
break ;
5476
5475
}
@@ -5483,7 +5482,6 @@ static void clear_delayed(struct sched_entity *se)
5483
5482
struct cfs_rq * cfs_rq = cfs_rq_of (se );
5484
5483
5485
5484
cfs_rq -> h_nr_runnable ++ ;
5486
- cfs_rq -> h_nr_delayed -- ;
5487
5485
if (cfs_rq_throttled (cfs_rq ))
5488
5486
break ;
5489
5487
}
@@ -5932,7 +5930,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5932
5930
struct rq * rq = rq_of (cfs_rq );
5933
5931
struct cfs_bandwidth * cfs_b = tg_cfs_bandwidth (cfs_rq -> tg );
5934
5932
struct sched_entity * se ;
5935
- long queued_delta , runnable_delta , idle_task_delta , delayed_delta , dequeue = 1 ;
5933
+ long queued_delta , runnable_delta , idle_task_delta , dequeue = 1 ;
5936
5934
long rq_h_nr_queued = rq -> cfs .h_nr_queued ;
5937
5935
5938
5936
raw_spin_lock (& cfs_b -> lock );
@@ -5966,7 +5964,6 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5966
5964
queued_delta = cfs_rq -> h_nr_queued ;
5967
5965
runnable_delta = cfs_rq -> h_nr_runnable ;
5968
5966
idle_task_delta = cfs_rq -> idle_h_nr_running ;
5969
- delayed_delta = cfs_rq -> h_nr_delayed ;
5970
5967
for_each_sched_entity (se ) {
5971
5968
struct cfs_rq * qcfs_rq = cfs_rq_of (se );
5972
5969
int flags ;
@@ -5991,7 +5988,6 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5991
5988
qcfs_rq -> h_nr_queued -= queued_delta ;
5992
5989
qcfs_rq -> h_nr_runnable -= runnable_delta ;
5993
5990
qcfs_rq -> idle_h_nr_running -= idle_task_delta ;
5994
- qcfs_rq -> h_nr_delayed -= delayed_delta ;
5995
5991
5996
5992
if (qcfs_rq -> load .weight ) {
5997
5993
/* Avoid re-evaluating load for this entity: */
@@ -6015,7 +6011,6 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
6015
6011
qcfs_rq -> h_nr_queued -= queued_delta ;
6016
6012
qcfs_rq -> h_nr_runnable -= runnable_delta ;
6017
6013
qcfs_rq -> idle_h_nr_running -= idle_task_delta ;
6018
- qcfs_rq -> h_nr_delayed -= delayed_delta ;
6019
6014
}
6020
6015
6021
6016
/* At this point se is NULL and we are at root level*/
@@ -6041,7 +6036,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6041
6036
struct rq * rq = rq_of (cfs_rq );
6042
6037
struct cfs_bandwidth * cfs_b = tg_cfs_bandwidth (cfs_rq -> tg );
6043
6038
struct sched_entity * se ;
6044
- long queued_delta , runnable_delta , idle_task_delta , delayed_delta ;
6039
+ long queued_delta , runnable_delta , idle_task_delta ;
6045
6040
long rq_h_nr_queued = rq -> cfs .h_nr_queued ;
6046
6041
6047
6042
se = cfs_rq -> tg -> se [cpu_of (rq )];
@@ -6078,7 +6073,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6078
6073
queued_delta = cfs_rq -> h_nr_queued ;
6079
6074
runnable_delta = cfs_rq -> h_nr_runnable ;
6080
6075
idle_task_delta = cfs_rq -> idle_h_nr_running ;
6081
- delayed_delta = cfs_rq -> h_nr_delayed ;
6082
6076
for_each_sched_entity (se ) {
6083
6077
struct cfs_rq * qcfs_rq = cfs_rq_of (se );
6084
6078
@@ -6097,7 +6091,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6097
6091
qcfs_rq -> h_nr_queued += queued_delta ;
6098
6092
qcfs_rq -> h_nr_runnable += runnable_delta ;
6099
6093
qcfs_rq -> idle_h_nr_running += idle_task_delta ;
6100
- qcfs_rq -> h_nr_delayed += delayed_delta ;
6101
6094
6102
6095
/* end evaluation on encountering a throttled cfs_rq */
6103
6096
if (cfs_rq_throttled (qcfs_rq ))
@@ -6116,7 +6109,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6116
6109
qcfs_rq -> h_nr_queued += queued_delta ;
6117
6110
qcfs_rq -> h_nr_runnable += runnable_delta ;
6118
6111
qcfs_rq -> idle_h_nr_running += idle_task_delta ;
6119
- qcfs_rq -> h_nr_delayed += delayed_delta ;
6120
6112
6121
6113
/* end evaluation on encountering a throttled cfs_rq */
6122
6114
if (cfs_rq_throttled (qcfs_rq ))
@@ -6979,7 +6971,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6979
6971
struct cfs_rq * cfs_rq ;
6980
6972
struct sched_entity * se = & p -> se ;
6981
6973
int idle_h_nr_running = task_has_idle_policy (p );
6982
- int h_nr_delayed = 0 ;
6974
+ int h_nr_runnable = 1 ;
6983
6975
int task_new = !(flags & ENQUEUE_WAKEUP );
6984
6976
int rq_h_nr_queued = rq -> cfs .h_nr_queued ;
6985
6977
u64 slice = 0 ;
@@ -7006,8 +6998,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
7006
6998
if (p -> in_iowait )
7007
6999
cpufreq_update_util (rq , SCHED_CPUFREQ_IOWAIT );
7008
7000
7009
- if (task_new )
7010
- h_nr_delayed = !! se -> sched_delayed ;
7001
+ if (task_new && se -> sched_delayed )
7002
+ h_nr_runnable = 0 ;
7011
7003
7012
7004
for_each_sched_entity (se ) {
7013
7005
if (se -> on_rq ) {
@@ -7029,11 +7021,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
7029
7021
enqueue_entity (cfs_rq , se , flags );
7030
7022
slice = cfs_rq_min_slice (cfs_rq );
7031
7023
7032
- if (!h_nr_delayed )
7033
- cfs_rq -> h_nr_runnable ++ ;
7024
+ cfs_rq -> h_nr_runnable += h_nr_runnable ;
7034
7025
cfs_rq -> h_nr_queued ++ ;
7035
7026
cfs_rq -> idle_h_nr_running += idle_h_nr_running ;
7036
- cfs_rq -> h_nr_delayed += h_nr_delayed ;
7037
7027
7038
7028
if (cfs_rq_is_idle (cfs_rq ))
7039
7029
idle_h_nr_running = 1 ;
@@ -7055,11 +7045,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
7055
7045
se -> slice = slice ;
7056
7046
slice = cfs_rq_min_slice (cfs_rq );
7057
7047
7058
- if (!h_nr_delayed )
7059
- cfs_rq -> h_nr_runnable ++ ;
7048
+ cfs_rq -> h_nr_runnable += h_nr_runnable ;
7060
7049
cfs_rq -> h_nr_queued ++ ;
7061
7050
cfs_rq -> idle_h_nr_running += idle_h_nr_running ;
7062
- cfs_rq -> h_nr_delayed += h_nr_delayed ;
7063
7051
7064
7052
if (cfs_rq_is_idle (cfs_rq ))
7065
7053
idle_h_nr_running = 1 ;
@@ -7122,16 +7110,16 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7122
7110
struct task_struct * p = NULL ;
7123
7111
int idle_h_nr_running = 0 ;
7124
7112
int h_nr_queued = 0 ;
7125
- int h_nr_delayed = 0 ;
7113
+ int h_nr_runnable = 0 ;
7126
7114
struct cfs_rq * cfs_rq ;
7127
7115
u64 slice = 0 ;
7128
7116
7129
7117
if (entity_is_task (se )) {
7130
7118
p = task_of (se );
7131
7119
h_nr_queued = 1 ;
7132
7120
idle_h_nr_running = task_has_idle_policy (p );
7133
- if (! task_sleep && ! task_delayed )
7134
- h_nr_delayed = !! se -> sched_delayed ;
7121
+ if (task_sleep || task_delayed || ! se -> sched_delayed )
7122
+ h_nr_runnable = 1 ;
7135
7123
} else {
7136
7124
cfs_rq = group_cfs_rq (se );
7137
7125
slice = cfs_rq_min_slice (cfs_rq );
@@ -7147,11 +7135,9 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7147
7135
break ;
7148
7136
}
7149
7137
7150
- if (!h_nr_delayed )
7151
- cfs_rq -> h_nr_runnable -= h_nr_queued ;
7138
+ cfs_rq -> h_nr_runnable -= h_nr_runnable ;
7152
7139
cfs_rq -> h_nr_queued -= h_nr_queued ;
7153
7140
cfs_rq -> idle_h_nr_running -= idle_h_nr_running ;
7154
- cfs_rq -> h_nr_delayed -= h_nr_delayed ;
7155
7141
7156
7142
if (cfs_rq_is_idle (cfs_rq ))
7157
7143
idle_h_nr_running = h_nr_queued ;
@@ -7188,11 +7174,9 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7188
7174
se -> slice = slice ;
7189
7175
slice = cfs_rq_min_slice (cfs_rq );
7190
7176
7191
- if (!h_nr_delayed )
7192
- cfs_rq -> h_nr_runnable -= h_nr_queued ;
7177
+ cfs_rq -> h_nr_runnable -= h_nr_runnable ;
7193
7178
cfs_rq -> h_nr_queued -= h_nr_queued ;
7194
7179
cfs_rq -> idle_h_nr_running -= idle_h_nr_running ;
7195
- cfs_rq -> h_nr_delayed -= h_nr_delayed ;
7196
7180
7197
7181
if (cfs_rq_is_idle (cfs_rq ))
7198
7182
idle_h_nr_running = h_nr_queued ;
0 commit comments