@@ -5463,9 +5463,33 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
5463
5463
5464
5464
static __always_inline void return_cfs_rq_runtime (struct cfs_rq * cfs_rq );
5465
5465
5466
- static inline void finish_delayed_dequeue_entity (struct sched_entity * se )
5466
+ static void set_delayed (struct sched_entity * se )
5467
+ {
5468
+ se -> sched_delayed = 1 ;
5469
+ for_each_sched_entity (se ) {
5470
+ struct cfs_rq * cfs_rq = cfs_rq_of (se );
5471
+
5472
+ cfs_rq -> h_nr_delayed ++ ;
5473
+ if (cfs_rq_throttled (cfs_rq ))
5474
+ break ;
5475
+ }
5476
+ }
5477
+
5478
+ static void clear_delayed (struct sched_entity * se )
5467
5479
{
5468
5480
se -> sched_delayed = 0 ;
5481
+ for_each_sched_entity (se ) {
5482
+ struct cfs_rq * cfs_rq = cfs_rq_of (se );
5483
+
5484
+ cfs_rq -> h_nr_delayed -- ;
5485
+ if (cfs_rq_throttled (cfs_rq ))
5486
+ break ;
5487
+ }
5488
+ }
5489
+
5490
+ static inline void finish_delayed_dequeue_entity (struct sched_entity * se )
5491
+ {
5492
+ clear_delayed (se );
5469
5493
if (sched_feat (DELAY_ZERO ) && se -> vlag > 0 )
5470
5494
se -> vlag = 0 ;
5471
5495
}
@@ -5476,6 +5500,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5476
5500
bool sleep = flags & DEQUEUE_SLEEP ;
5477
5501
5478
5502
update_curr (cfs_rq );
5503
+ clear_buddies (cfs_rq , se );
5479
5504
5480
5505
if (flags & DEQUEUE_DELAYED ) {
5481
5506
SCHED_WARN_ON (!se -> sched_delayed );
@@ -5492,10 +5517,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5492
5517
5493
5518
if (sched_feat (DELAY_DEQUEUE ) && delay &&
5494
5519
!entity_eligible (cfs_rq , se )) {
5495
- if (cfs_rq -> next == se )
5496
- cfs_rq -> next = NULL ;
5497
5520
update_load_avg (cfs_rq , se , 0 );
5498
- se -> sched_delayed = 1 ;
5521
+ set_delayed ( se ) ;
5499
5522
return false;
5500
5523
}
5501
5524
}
@@ -5518,8 +5541,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5518
5541
5519
5542
update_stats_dequeue_fair (cfs_rq , se , flags );
5520
5543
5521
- clear_buddies (cfs_rq , se );
5522
-
5523
5544
update_entity_lag (cfs_rq , se );
5524
5545
if (sched_feat (PLACE_REL_DEADLINE ) && !sleep ) {
5525
5546
se -> deadline -= se -> vruntime ;
@@ -5909,7 +5930,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5909
5930
struct rq * rq = rq_of (cfs_rq );
5910
5931
struct cfs_bandwidth * cfs_b = tg_cfs_bandwidth (cfs_rq -> tg );
5911
5932
struct sched_entity * se ;
5912
- long task_delta , idle_task_delta , dequeue = 1 ;
5933
+ long task_delta , idle_task_delta , delayed_delta , dequeue = 1 ;
5913
5934
long rq_h_nr_running = rq -> cfs .h_nr_running ;
5914
5935
5915
5936
raw_spin_lock (& cfs_b -> lock );
@@ -5942,6 +5963,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5942
5963
5943
5964
task_delta = cfs_rq -> h_nr_running ;
5944
5965
idle_task_delta = cfs_rq -> idle_h_nr_running ;
5966
+ delayed_delta = cfs_rq -> h_nr_delayed ;
5945
5967
for_each_sched_entity (se ) {
5946
5968
struct cfs_rq * qcfs_rq = cfs_rq_of (se );
5947
5969
int flags ;
@@ -5965,6 +5987,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5965
5987
5966
5988
qcfs_rq -> h_nr_running -= task_delta ;
5967
5989
qcfs_rq -> idle_h_nr_running -= idle_task_delta ;
5990
+ qcfs_rq -> h_nr_delayed -= delayed_delta ;
5968
5991
5969
5992
if (qcfs_rq -> load .weight ) {
5970
5993
/* Avoid re-evaluating load for this entity: */
@@ -5987,6 +6010,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5987
6010
5988
6011
qcfs_rq -> h_nr_running -= task_delta ;
5989
6012
qcfs_rq -> idle_h_nr_running -= idle_task_delta ;
6013
+ qcfs_rq -> h_nr_delayed -= delayed_delta ;
5990
6014
}
5991
6015
5992
6016
/* At this point se is NULL and we are at root level*/
@@ -6012,7 +6036,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6012
6036
struct rq * rq = rq_of (cfs_rq );
6013
6037
struct cfs_bandwidth * cfs_b = tg_cfs_bandwidth (cfs_rq -> tg );
6014
6038
struct sched_entity * se ;
6015
- long task_delta , idle_task_delta ;
6039
+ long task_delta , idle_task_delta , delayed_delta ;
6016
6040
long rq_h_nr_running = rq -> cfs .h_nr_running ;
6017
6041
6018
6042
se = cfs_rq -> tg -> se [cpu_of (rq )];
@@ -6048,6 +6072,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6048
6072
6049
6073
task_delta = cfs_rq -> h_nr_running ;
6050
6074
idle_task_delta = cfs_rq -> idle_h_nr_running ;
6075
+ delayed_delta = cfs_rq -> h_nr_delayed ;
6051
6076
for_each_sched_entity (se ) {
6052
6077
struct cfs_rq * qcfs_rq = cfs_rq_of (se );
6053
6078
@@ -6065,6 +6090,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6065
6090
6066
6091
qcfs_rq -> h_nr_running += task_delta ;
6067
6092
qcfs_rq -> idle_h_nr_running += idle_task_delta ;
6093
+ qcfs_rq -> h_nr_delayed += delayed_delta ;
6068
6094
6069
6095
/* end evaluation on encountering a throttled cfs_rq */
6070
6096
if (cfs_rq_throttled (qcfs_rq ))
@@ -6082,6 +6108,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6082
6108
6083
6109
qcfs_rq -> h_nr_running += task_delta ;
6084
6110
qcfs_rq -> idle_h_nr_running += idle_task_delta ;
6111
+ qcfs_rq -> h_nr_delayed += delayed_delta ;
6085
6112
6086
6113
/* end evaluation on encountering a throttled cfs_rq */
6087
6114
if (cfs_rq_throttled (qcfs_rq ))
@@ -6930,7 +6957,7 @@ requeue_delayed_entity(struct sched_entity *se)
6930
6957
}
6931
6958
6932
6959
update_load_avg (cfs_rq , se , 0 );
6933
- se -> sched_delayed = 0 ;
6960
+ clear_delayed ( se ) ;
6934
6961
}
6935
6962
6936
6963
/*
@@ -6944,6 +6971,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6944
6971
struct cfs_rq * cfs_rq ;
6945
6972
struct sched_entity * se = & p -> se ;
6946
6973
int idle_h_nr_running = task_has_idle_policy (p );
6974
+ int h_nr_delayed = 0 ;
6947
6975
int task_new = !(flags & ENQUEUE_WAKEUP );
6948
6976
int rq_h_nr_running = rq -> cfs .h_nr_running ;
6949
6977
u64 slice = 0 ;
@@ -6970,6 +6998,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6970
6998
if (p -> in_iowait )
6971
6999
cpufreq_update_util (rq , SCHED_CPUFREQ_IOWAIT );
6972
7000
7001
+ if (task_new )
7002
+ h_nr_delayed = !!se -> sched_delayed ;
7003
+
6973
7004
for_each_sched_entity (se ) {
6974
7005
if (se -> on_rq ) {
6975
7006
if (se -> sched_delayed )
@@ -6992,6 +7023,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6992
7023
6993
7024
cfs_rq -> h_nr_running ++ ;
6994
7025
cfs_rq -> idle_h_nr_running += idle_h_nr_running ;
7026
+ cfs_rq -> h_nr_delayed += h_nr_delayed ;
6995
7027
6996
7028
if (cfs_rq_is_idle (cfs_rq ))
6997
7029
idle_h_nr_running = 1 ;
@@ -7015,6 +7047,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
7015
7047
7016
7048
cfs_rq -> h_nr_running ++ ;
7017
7049
cfs_rq -> idle_h_nr_running += idle_h_nr_running ;
7050
+ cfs_rq -> h_nr_delayed += h_nr_delayed ;
7018
7051
7019
7052
if (cfs_rq_is_idle (cfs_rq ))
7020
7053
idle_h_nr_running = 1 ;
@@ -7077,13 +7110,16 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7077
7110
struct task_struct * p = NULL ;
7078
7111
int idle_h_nr_running = 0 ;
7079
7112
int h_nr_running = 0 ;
7113
+ int h_nr_delayed = 0 ;
7080
7114
struct cfs_rq * cfs_rq ;
7081
7115
u64 slice = 0 ;
7082
7116
7083
7117
if (entity_is_task (se )) {
7084
7118
p = task_of (se );
7085
7119
h_nr_running = 1 ;
7086
7120
idle_h_nr_running = task_has_idle_policy (p );
7121
+ if (!task_sleep && !task_delayed )
7122
+ h_nr_delayed = !!se -> sched_delayed ;
7087
7123
} else {
7088
7124
cfs_rq = group_cfs_rq (se );
7089
7125
slice = cfs_rq_min_slice (cfs_rq );
@@ -7101,6 +7137,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7101
7137
7102
7138
cfs_rq -> h_nr_running -= h_nr_running ;
7103
7139
cfs_rq -> idle_h_nr_running -= idle_h_nr_running ;
7140
+ cfs_rq -> h_nr_delayed -= h_nr_delayed ;
7104
7141
7105
7142
if (cfs_rq_is_idle (cfs_rq ))
7106
7143
idle_h_nr_running = h_nr_running ;
@@ -7139,6 +7176,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7139
7176
7140
7177
cfs_rq -> h_nr_running -= h_nr_running ;
7141
7178
cfs_rq -> idle_h_nr_running -= idle_h_nr_running ;
7179
+ cfs_rq -> h_nr_delayed -= h_nr_delayed ;
7142
7180
7143
7181
if (cfs_rq_is_idle (cfs_rq ))
7144
7182
idle_h_nr_running = h_nr_running ;
@@ -8767,7 +8805,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
8767
8805
if (unlikely (throttled_hierarchy (cfs_rq_of (pse ))))
8768
8806
return ;
8769
8807
8770
- if (sched_feat (NEXT_BUDDY ) && !(wake_flags & WF_FORK )) {
8808
+ if (sched_feat (NEXT_BUDDY ) && !(wake_flags & WF_FORK ) && ! pse -> sched_delayed ) {
8771
8809
set_next_buddy (pse );
8772
8810
}
8773
8811
0 commit comments