Skip to content

Commit 40c3b94

Browse files
author
Peter Zijlstra
committed
Merge branch 'sched/urgent'
Sync with urgent bits as a base for further work. Signed-off-by: Peter Zijlstra <[email protected]>
2 parents c907cd4 + 76f2f78 commit 40c3b94

File tree

5 files changed

+57
-14
lines changed

5 files changed

+57
-14
lines changed

kernel/sched/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1343,7 +1343,7 @@ bool sched_can_stop_tick(struct rq *rq)
13431343
if (scx_enabled() && !scx_can_stop_tick(rq))
13441344
return false;
13451345

1346-
if (rq->cfs.nr_running > 1)
1346+
if (rq->cfs.h_nr_running > 1)
13471347
return false;
13481348

13491349
/*

kernel/sched/debug.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -845,6 +845,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
845845
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
846846
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
847847
SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
848+
SEQ_printf(m, " .%-30s: %d\n", "h_nr_delayed", cfs_rq->h_nr_delayed);
848849
SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
849850
cfs_rq->idle_nr_running);
850851
SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running",

kernel/sched/fair.c

Lines changed: 48 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -5463,9 +5463,33 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
54635463

54645464
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
54655465

5466-
static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
5466+
static void set_delayed(struct sched_entity *se)
5467+
{
5468+
se->sched_delayed = 1;
5469+
for_each_sched_entity(se) {
5470+
struct cfs_rq *cfs_rq = cfs_rq_of(se);
5471+
5472+
cfs_rq->h_nr_delayed++;
5473+
if (cfs_rq_throttled(cfs_rq))
5474+
break;
5475+
}
5476+
}
5477+
5478+
static void clear_delayed(struct sched_entity *se)
54675479
{
54685480
se->sched_delayed = 0;
5481+
for_each_sched_entity(se) {
5482+
struct cfs_rq *cfs_rq = cfs_rq_of(se);
5483+
5484+
cfs_rq->h_nr_delayed--;
5485+
if (cfs_rq_throttled(cfs_rq))
5486+
break;
5487+
}
5488+
}
5489+
5490+
static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
5491+
{
5492+
clear_delayed(se);
54695493
if (sched_feat(DELAY_ZERO) && se->vlag > 0)
54705494
se->vlag = 0;
54715495
}
@@ -5476,6 +5500,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
54765500
bool sleep = flags & DEQUEUE_SLEEP;
54775501

54785502
update_curr(cfs_rq);
5503+
clear_buddies(cfs_rq, se);
54795504

54805505
if (flags & DEQUEUE_DELAYED) {
54815506
SCHED_WARN_ON(!se->sched_delayed);
@@ -5492,10 +5517,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
54925517

54935518
if (sched_feat(DELAY_DEQUEUE) && delay &&
54945519
!entity_eligible(cfs_rq, se)) {
5495-
if (cfs_rq->next == se)
5496-
cfs_rq->next = NULL;
54975520
update_load_avg(cfs_rq, se, 0);
5498-
se->sched_delayed = 1;
5521+
set_delayed(se);
54995522
return false;
55005523
}
55015524
}
@@ -5518,8 +5541,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
55185541

55195542
update_stats_dequeue_fair(cfs_rq, se, flags);
55205543

5521-
clear_buddies(cfs_rq, se);
5522-
55235544
update_entity_lag(cfs_rq, se);
55245545
if (sched_feat(PLACE_REL_DEADLINE) && !sleep) {
55255546
se->deadline -= se->vruntime;
@@ -5909,7 +5930,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59095930
struct rq *rq = rq_of(cfs_rq);
59105931
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
59115932
struct sched_entity *se;
5912-
long task_delta, idle_task_delta, dequeue = 1;
5933+
long task_delta, idle_task_delta, delayed_delta, dequeue = 1;
59135934
long rq_h_nr_running = rq->cfs.h_nr_running;
59145935

59155936
raw_spin_lock(&cfs_b->lock);
@@ -5942,6 +5963,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59425963

59435964
task_delta = cfs_rq->h_nr_running;
59445965
idle_task_delta = cfs_rq->idle_h_nr_running;
5966+
delayed_delta = cfs_rq->h_nr_delayed;
59455967
for_each_sched_entity(se) {
59465968
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
59475969
int flags;
@@ -5965,6 +5987,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59655987

59665988
qcfs_rq->h_nr_running -= task_delta;
59675989
qcfs_rq->idle_h_nr_running -= idle_task_delta;
5990+
qcfs_rq->h_nr_delayed -= delayed_delta;
59685991

59695992
if (qcfs_rq->load.weight) {
59705993
/* Avoid re-evaluating load for this entity: */
@@ -5987,6 +6010,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
59876010

59886011
qcfs_rq->h_nr_running -= task_delta;
59896012
qcfs_rq->idle_h_nr_running -= idle_task_delta;
6013+
qcfs_rq->h_nr_delayed -= delayed_delta;
59906014
}
59916015

59926016
/* At this point se is NULL and we are at root level*/
@@ -6012,7 +6036,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60126036
struct rq *rq = rq_of(cfs_rq);
60136037
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
60146038
struct sched_entity *se;
6015-
long task_delta, idle_task_delta;
6039+
long task_delta, idle_task_delta, delayed_delta;
60166040
long rq_h_nr_running = rq->cfs.h_nr_running;
60176041

60186042
se = cfs_rq->tg->se[cpu_of(rq)];
@@ -6048,6 +6072,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60486072

60496073
task_delta = cfs_rq->h_nr_running;
60506074
idle_task_delta = cfs_rq->idle_h_nr_running;
6075+
delayed_delta = cfs_rq->h_nr_delayed;
60516076
for_each_sched_entity(se) {
60526077
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
60536078

@@ -6065,6 +6090,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60656090

60666091
qcfs_rq->h_nr_running += task_delta;
60676092
qcfs_rq->idle_h_nr_running += idle_task_delta;
6093+
qcfs_rq->h_nr_delayed += delayed_delta;
60686094

60696095
/* end evaluation on encountering a throttled cfs_rq */
60706096
if (cfs_rq_throttled(qcfs_rq))
@@ -6082,6 +6108,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
60826108

60836109
qcfs_rq->h_nr_running += task_delta;
60846110
qcfs_rq->idle_h_nr_running += idle_task_delta;
6111+
qcfs_rq->h_nr_delayed += delayed_delta;
60856112

60866113
/* end evaluation on encountering a throttled cfs_rq */
60876114
if (cfs_rq_throttled(qcfs_rq))
@@ -6930,7 +6957,7 @@ requeue_delayed_entity(struct sched_entity *se)
69306957
}
69316958

69326959
update_load_avg(cfs_rq, se, 0);
6933-
se->sched_delayed = 0;
6960+
clear_delayed(se);
69346961
}
69356962

69366963
/*
@@ -6944,6 +6971,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
69446971
struct cfs_rq *cfs_rq;
69456972
struct sched_entity *se = &p->se;
69466973
int idle_h_nr_running = task_has_idle_policy(p);
6974+
int h_nr_delayed = 0;
69476975
int task_new = !(flags & ENQUEUE_WAKEUP);
69486976
int rq_h_nr_running = rq->cfs.h_nr_running;
69496977
u64 slice = 0;
@@ -6970,6 +6998,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
69706998
if (p->in_iowait)
69716999
cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
69727000

7001+
if (task_new)
7002+
h_nr_delayed = !!se->sched_delayed;
7003+
69737004
for_each_sched_entity(se) {
69747005
if (se->on_rq) {
69757006
if (se->sched_delayed)
@@ -6992,6 +7023,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
69927023

69937024
cfs_rq->h_nr_running++;
69947025
cfs_rq->idle_h_nr_running += idle_h_nr_running;
7026+
cfs_rq->h_nr_delayed += h_nr_delayed;
69957027

69967028
if (cfs_rq_is_idle(cfs_rq))
69977029
idle_h_nr_running = 1;
@@ -7015,6 +7047,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
70157047

70167048
cfs_rq->h_nr_running++;
70177049
cfs_rq->idle_h_nr_running += idle_h_nr_running;
7050+
cfs_rq->h_nr_delayed += h_nr_delayed;
70187051

70197052
if (cfs_rq_is_idle(cfs_rq))
70207053
idle_h_nr_running = 1;
@@ -7077,13 +7110,16 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
70777110
struct task_struct *p = NULL;
70787111
int idle_h_nr_running = 0;
70797112
int h_nr_running = 0;
7113+
int h_nr_delayed = 0;
70807114
struct cfs_rq *cfs_rq;
70817115
u64 slice = 0;
70827116

70837117
if (entity_is_task(se)) {
70847118
p = task_of(se);
70857119
h_nr_running = 1;
70867120
idle_h_nr_running = task_has_idle_policy(p);
7121+
if (!task_sleep && !task_delayed)
7122+
h_nr_delayed = !!se->sched_delayed;
70877123
} else {
70887124
cfs_rq = group_cfs_rq(se);
70897125
slice = cfs_rq_min_slice(cfs_rq);
@@ -7101,6 +7137,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
71017137

71027138
cfs_rq->h_nr_running -= h_nr_running;
71037139
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
7140+
cfs_rq->h_nr_delayed -= h_nr_delayed;
71047141

71057142
if (cfs_rq_is_idle(cfs_rq))
71067143
idle_h_nr_running = h_nr_running;
@@ -7139,6 +7176,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
71397176

71407177
cfs_rq->h_nr_running -= h_nr_running;
71417178
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
7179+
cfs_rq->h_nr_delayed -= h_nr_delayed;
71427180

71437181
if (cfs_rq_is_idle(cfs_rq))
71447182
idle_h_nr_running = h_nr_running;
@@ -8767,7 +8805,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
87678805
if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
87688806
return;
87698807

8770-
if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK)) {
8808+
if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK) && !pse->sched_delayed) {
87718809
set_next_buddy(pse);
87728810
}
87738811

kernel/sched/pelt.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -321,7 +321,7 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
321321
{
322322
if (___update_load_sum(now, &cfs_rq->avg,
323323
scale_load_down(cfs_rq->load.weight),
324-
cfs_rq->h_nr_running,
324+
cfs_rq->h_nr_running - cfs_rq->h_nr_delayed,
325325
cfs_rq->curr != NULL)) {
326326

327327
___update_load_avg(&cfs_rq->avg, 1);

kernel/sched/sched.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -649,6 +649,7 @@ struct cfs_rq {
649649
unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */
650650
unsigned int idle_nr_running; /* SCHED_IDLE */
651651
unsigned int idle_h_nr_running; /* SCHED_IDLE */
652+
unsigned int h_nr_delayed;
652653

653654
s64 avg_vruntime;
654655
u64 avg_load;
@@ -898,8 +899,11 @@ struct dl_rq {
898899

899900
static inline void se_update_runnable(struct sched_entity *se)
900901
{
901-
if (!entity_is_task(se))
902-
se->runnable_weight = se->my_q->h_nr_running;
902+
if (!entity_is_task(se)) {
903+
struct cfs_rq *cfs_rq = se->my_q;
904+
905+
se->runnable_weight = cfs_rq->h_nr_running - cfs_rq->h_nr_delayed;
906+
}
903907
}
904908

905909
static inline long se_runnable(struct sched_entity *se)

0 commit comments

Comments
 (0)