@@ -5469,6 +5469,7 @@ static void set_delayed(struct sched_entity *se)
5469
5469
for_each_sched_entity (se ) {
5470
5470
struct cfs_rq * cfs_rq = cfs_rq_of (se );
5471
5471
5472
+ cfs_rq -> h_nr_runnable -- ;
5472
5473
cfs_rq -> h_nr_delayed ++ ;
5473
5474
if (cfs_rq_throttled (cfs_rq ))
5474
5475
break ;
@@ -5481,6 +5482,7 @@ static void clear_delayed(struct sched_entity *se)
5481
5482
for_each_sched_entity (se ) {
5482
5483
struct cfs_rq * cfs_rq = cfs_rq_of (se );
5483
5484
5485
+ cfs_rq -> h_nr_runnable ++ ;
5484
5486
cfs_rq -> h_nr_delayed -- ;
5485
5487
if (cfs_rq_throttled (cfs_rq ))
5486
5488
break ;
@@ -5930,7 +5932,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5930
5932
struct rq * rq = rq_of (cfs_rq );
5931
5933
struct cfs_bandwidth * cfs_b = tg_cfs_bandwidth (cfs_rq -> tg );
5932
5934
struct sched_entity * se ;
5933
- long queued_delta , idle_task_delta , delayed_delta , dequeue = 1 ;
5935
+ long queued_delta , runnable_delta , idle_task_delta , delayed_delta , dequeue = 1 ;
5934
5936
long rq_h_nr_queued = rq -> cfs .h_nr_queued ;
5935
5937
5936
5938
raw_spin_lock (& cfs_b -> lock );
@@ -5962,6 +5964,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5962
5964
rcu_read_unlock ();
5963
5965
5964
5966
queued_delta = cfs_rq -> h_nr_queued ;
5967
+ runnable_delta = cfs_rq -> h_nr_runnable ;
5965
5968
idle_task_delta = cfs_rq -> idle_h_nr_running ;
5966
5969
delayed_delta = cfs_rq -> h_nr_delayed ;
5967
5970
for_each_sched_entity (se ) {
@@ -5986,6 +5989,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5986
5989
idle_task_delta = cfs_rq -> h_nr_queued ;
5987
5990
5988
5991
qcfs_rq -> h_nr_queued -= queued_delta ;
5992
+ qcfs_rq -> h_nr_runnable -= runnable_delta ;
5989
5993
qcfs_rq -> idle_h_nr_running -= idle_task_delta ;
5990
5994
qcfs_rq -> h_nr_delayed -= delayed_delta ;
5991
5995
@@ -6009,6 +6013,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
6009
6013
idle_task_delta = cfs_rq -> h_nr_queued ;
6010
6014
6011
6015
qcfs_rq -> h_nr_queued -= queued_delta ;
6016
+ qcfs_rq -> h_nr_runnable -= runnable_delta ;
6012
6017
qcfs_rq -> idle_h_nr_running -= idle_task_delta ;
6013
6018
qcfs_rq -> h_nr_delayed -= delayed_delta ;
6014
6019
}
@@ -6036,7 +6041,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6036
6041
struct rq * rq = rq_of (cfs_rq );
6037
6042
struct cfs_bandwidth * cfs_b = tg_cfs_bandwidth (cfs_rq -> tg );
6038
6043
struct sched_entity * se ;
6039
- long queued_delta , idle_task_delta , delayed_delta ;
6044
+ long queued_delta , runnable_delta , idle_task_delta , delayed_delta ;
6040
6045
long rq_h_nr_queued = rq -> cfs .h_nr_queued ;
6041
6046
6042
6047
se = cfs_rq -> tg -> se [cpu_of (rq )];
@@ -6071,6 +6076,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6071
6076
}
6072
6077
6073
6078
queued_delta = cfs_rq -> h_nr_queued ;
6079
+ runnable_delta = cfs_rq -> h_nr_runnable ;
6074
6080
idle_task_delta = cfs_rq -> idle_h_nr_running ;
6075
6081
delayed_delta = cfs_rq -> h_nr_delayed ;
6076
6082
for_each_sched_entity (se ) {
@@ -6089,6 +6095,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6089
6095
idle_task_delta = cfs_rq -> h_nr_queued ;
6090
6096
6091
6097
qcfs_rq -> h_nr_queued += queued_delta ;
6098
+ qcfs_rq -> h_nr_runnable += runnable_delta ;
6092
6099
qcfs_rq -> idle_h_nr_running += idle_task_delta ;
6093
6100
qcfs_rq -> h_nr_delayed += delayed_delta ;
6094
6101
@@ -6107,6 +6114,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6107
6114
idle_task_delta = cfs_rq -> h_nr_queued ;
6108
6115
6109
6116
qcfs_rq -> h_nr_queued += queued_delta ;
6117
+ qcfs_rq -> h_nr_runnable += runnable_delta ;
6110
6118
qcfs_rq -> idle_h_nr_running += idle_task_delta ;
6111
6119
qcfs_rq -> h_nr_delayed += delayed_delta ;
6112
6120
@@ -7021,6 +7029,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
7021
7029
enqueue_entity (cfs_rq , se , flags );
7022
7030
slice = cfs_rq_min_slice (cfs_rq );
7023
7031
7032
+ if (!h_nr_delayed )
7033
+ cfs_rq -> h_nr_runnable ++ ;
7024
7034
cfs_rq -> h_nr_queued ++ ;
7025
7035
cfs_rq -> idle_h_nr_running += idle_h_nr_running ;
7026
7036
cfs_rq -> h_nr_delayed += h_nr_delayed ;
@@ -7045,6 +7055,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
7045
7055
se -> slice = slice ;
7046
7056
slice = cfs_rq_min_slice (cfs_rq );
7047
7057
7058
+ if (!h_nr_delayed )
7059
+ cfs_rq -> h_nr_runnable ++ ;
7048
7060
cfs_rq -> h_nr_queued ++ ;
7049
7061
cfs_rq -> idle_h_nr_running += idle_h_nr_running ;
7050
7062
cfs_rq -> h_nr_delayed += h_nr_delayed ;
@@ -7135,6 +7147,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7135
7147
break ;
7136
7148
}
7137
7149
7150
+ if (!h_nr_delayed )
7151
+ cfs_rq -> h_nr_runnable -= h_nr_queued ;
7138
7152
cfs_rq -> h_nr_queued -= h_nr_queued ;
7139
7153
cfs_rq -> idle_h_nr_running -= idle_h_nr_running ;
7140
7154
cfs_rq -> h_nr_delayed -= h_nr_delayed ;
@@ -7174,6 +7188,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7174
7188
se -> slice = slice ;
7175
7189
slice = cfs_rq_min_slice (cfs_rq );
7176
7190
7191
+ if (!h_nr_delayed )
7192
+ cfs_rq -> h_nr_runnable -= h_nr_queued ;
7177
7193
cfs_rq -> h_nr_queued -= h_nr_queued ;
7178
7194
cfs_rq -> idle_h_nr_running -= idle_h_nr_running ;
7179
7195
cfs_rq -> h_nr_delayed -= h_nr_delayed ;
0 commit comments