Skip to content

Commit d7b01ae

Browse files
committed
Merge branch 'tip/sched/core' into for-6.12
- Resolve trivial context conflicts from dl_server clearing being moved around. - Add @next to put_prev_task_scx() and @Prev to pick_next_task_scx() to match sched/core. - Merge sched_class->switch_class() addition from sched_ext with tip/sched/core changes in __pick_next_task(). - Make pick_next_task_scx() call put_prev_task_scx() to emulate the previous behavior where sched_class->put_prev_task() was called before sched_class->pick_next_task(). While this makes sched_ext build and function, the behavior is not in line with other sched classes. The follow-up patches will address the discrepancies and remove sched_class->switch_class(). Signed-off-by: Tejun Heo <[email protected]>
2 parents 62607d0 + b2d7022 commit d7b01ae

File tree

9 files changed

+180
-218
lines changed

9 files changed

+180
-218
lines changed

include/linux/sched.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -694,7 +694,6 @@ struct sched_dl_entity {
694694
*/
695695
struct rq *rq;
696696
dl_server_has_tasks_f server_has_tasks;
697-
dl_server_pick_f server_pick_next;
698697
dl_server_pick_f server_pick_task;
699698

700699
#ifdef CONFIG_RT_MUTEXES

kernel/sched/core.c

Lines changed: 42 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -3690,8 +3690,6 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
36903690
rq->idle_stamp = 0;
36913691
}
36923692
#endif
3693-
3694-
p->dl_server = NULL;
36953693
}
36963694

36973695
/*
@@ -5895,8 +5893,8 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt)
58955893
schedstat_inc(this_rq()->sched_count);
58965894
}
58975895

5898-
static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
5899-
struct rq_flags *rf)
5896+
static void prev_balance(struct rq *rq, struct task_struct *prev,
5897+
struct rq_flags *rf)
59005898
{
59015899
const struct sched_class *start_class = prev->sched_class;
59025900
const struct sched_class *class;
@@ -5923,16 +5921,6 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
59235921
if (class->balance && class->balance(rq, prev, rf))
59245922
break;
59255923
}
5926-
5927-
put_prev_task(rq, prev);
5928-
5929-
/*
5930-
* We've updated @prev and no longer need the server link, clear it.
5931-
* Must be done before ->pick_next_task() because that can (re)set
5932-
* ->dl_server.
5933-
*/
5934-
if (prev->dl_server)
5935-
prev->dl_server = NULL;
59365924
}
59375925

59385926
/*
@@ -5944,6 +5932,8 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
59445932
const struct sched_class *class;
59455933
struct task_struct *p;
59465934

5935+
rq->dl_server = NULL;
5936+
59475937
if (scx_enabled())
59485938
goto restart;
59495939

@@ -5962,38 +5952,37 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
59625952

59635953
/* Assume the next prioritized class is idle_sched_class */
59645954
if (!p) {
5965-
put_prev_task(rq, prev);
5966-
p = pick_next_task_idle(rq);
5955+
p = pick_task_idle(rq);
5956+
put_prev_set_next_task(rq, prev, p);
59675957
}
59685958

5969-
/*
5970-
* This is a normal CFS pick, but the previous could be a DL pick.
5971-
* Clear it as previous is no longer picked.
5972-
*/
5973-
if (prev->dl_server)
5974-
prev->dl_server = NULL;
5975-
5976-
/*
5977-
* This is the fast path; it cannot be a DL server pick;
5978-
* therefore even if @p == @prev, ->dl_server must be NULL.
5979-
*/
5980-
if (p->dl_server)
5981-
p->dl_server = NULL;
5982-
59835959
return p;
59845960
}
59855961

59865962
restart:
5987-
put_prev_task_balance(rq, prev, rf);
5963+
prev_balance(rq, prev, rf);
59885964

59895965
for_each_active_class(class) {
5990-
p = class->pick_next_task(rq);
5991-
if (p) {
5992-
const struct sched_class *prev_class = prev->sched_class;
5966+
if (class->pick_next_task) {
5967+
p = class->pick_next_task(rq, prev);
5968+
if (p) {
5969+
const struct sched_class *prev_class = prev->sched_class;
5970+
5971+
if (class != prev_class && prev_class->switch_class)
5972+
prev_class->switch_class(rq, p);
5973+
return p;
5974+
}
5975+
} else {
5976+
p = class->pick_task(rq);
5977+
if (p) {
5978+
const struct sched_class *prev_class = prev->sched_class;
59935979

5994-
if (class != prev_class && prev_class->switch_class)
5995-
prev_class->switch_class(rq, p);
5996-
return p;
5980+
put_prev_set_next_task(rq, prev, p);
5981+
5982+
if (class != prev_class && prev_class->switch_class)
5983+
prev_class->switch_class(rq, p);
5984+
return p;
5985+
}
59975986
}
59985987
}
59995988

@@ -6024,6 +6013,8 @@ static inline struct task_struct *pick_task(struct rq *rq)
60246013
const struct sched_class *class;
60256014
struct task_struct *p;
60266015

6016+
rq->dl_server = NULL;
6017+
60276018
for_each_active_class(class) {
60286019
p = class->pick_task(rq);
60296020
if (p)
@@ -6062,6 +6053,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
60626053
* another cpu during offline.
60636054
*/
60646055
rq->core_pick = NULL;
6056+
rq->core_dl_server = NULL;
60656057
return __pick_next_task(rq, prev, rf);
60666058
}
60676059

@@ -6080,16 +6072,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
60806072
WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
60816073

60826074
next = rq->core_pick;
6083-
if (next != prev) {
6084-
put_prev_task(rq, prev);
6085-
set_next_task(rq, next);
6086-
}
6087-
6075+
rq->dl_server = rq->core_dl_server;
60886076
rq->core_pick = NULL;
6089-
goto out;
6077+
rq->core_dl_server = NULL;
6078+
goto out_set_next;
60906079
}
60916080

6092-
put_prev_task_balance(rq, prev, rf);
6081+
prev_balance(rq, prev, rf);
60936082

60946083
smt_mask = cpu_smt_mask(cpu);
60956084
need_sync = !!rq->core->core_cookie;
@@ -6130,6 +6119,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
61306119
next = pick_task(rq);
61316120
if (!next->core_cookie) {
61326121
rq->core_pick = NULL;
6122+
rq->core_dl_server = NULL;
61336123
/*
61346124
* For robustness, update the min_vruntime_fi for
61356125
* unconstrained picks as well.
@@ -6157,7 +6147,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
61576147
if (i != cpu && (rq_i != rq->core || !core_clock_updated))
61586148
update_rq_clock(rq_i);
61596149

6160-
p = rq_i->core_pick = pick_task(rq_i);
6150+
rq_i->core_pick = p = pick_task(rq_i);
6151+
rq_i->core_dl_server = rq_i->dl_server;
6152+
61616153
if (!max || prio_less(max, p, fi_before))
61626154
max = p;
61636155
}
@@ -6181,6 +6173,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
61816173
}
61826174

61836175
rq_i->core_pick = p;
6176+
rq_i->core_dl_server = NULL;
61846177

61856178
if (p == rq_i->idle) {
61866179
if (rq_i->nr_running) {
@@ -6241,6 +6234,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
62416234

62426235
if (i == cpu) {
62436236
rq_i->core_pick = NULL;
6237+
rq_i->core_dl_server = NULL;
62446238
continue;
62456239
}
62466240

@@ -6249,15 +6243,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
62496243

62506244
if (rq_i->curr == rq_i->core_pick) {
62516245
rq_i->core_pick = NULL;
6246+
rq_i->core_dl_server = NULL;
62526247
continue;
62536248
}
62546249

62556250
resched_curr(rq_i);
62566251
}
62576252

62586253
out_set_next:
6259-
set_next_task(rq, next);
6260-
out:
6254+
put_prev_set_next_task(rq, prev, next);
62616255
if (rq->core->core_forceidle_count && next == rq->idle)
62626256
queue_core_balance(rq);
62636257

@@ -8487,6 +8481,7 @@ void __init sched_init(void)
84878481
#ifdef CONFIG_SCHED_CORE
84888482
rq->core = rq;
84898483
rq->core_pick = NULL;
8484+
rq->core_dl_server = NULL;
84908485
rq->core_enabled = 0;
84918486
rq->core_tree = RB_ROOT;
84928487
rq->core_forceidle_count = 0;

kernel/sched/deadline.c

Lines changed: 25 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -1665,12 +1665,10 @@ void dl_server_stop(struct sched_dl_entity *dl_se)
16651665

16661666
void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
16671667
dl_server_has_tasks_f has_tasks,
1668-
dl_server_pick_f pick_next,
16691668
dl_server_pick_f pick_task)
16701669
{
16711670
dl_se->rq = rq;
16721671
dl_se->server_has_tasks = has_tasks;
1673-
dl_se->server_pick_next = pick_next;
16741672
dl_se->server_pick_task = pick_task;
16751673
}
16761674

@@ -1896,46 +1894,40 @@ static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
18961894
return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
18971895
}
18981896

1899-
static inline struct sched_statistics *
1897+
static __always_inline struct sched_statistics *
19001898
__schedstats_from_dl_se(struct sched_dl_entity *dl_se)
19011899
{
1900+
if (!schedstat_enabled())
1901+
return NULL;
1902+
1903+
if (dl_server(dl_se))
1904+
return NULL;
1905+
19021906
return &dl_task_of(dl_se)->stats;
19031907
}
19041908

19051909
static inline void
19061910
update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
19071911
{
1908-
struct sched_statistics *stats;
1909-
1910-
if (!schedstat_enabled())
1911-
return;
1912-
1913-
stats = __schedstats_from_dl_se(dl_se);
1914-
__update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1912+
struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
1913+
if (stats)
1914+
__update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
19151915
}
19161916

19171917
static inline void
19181918
update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
19191919
{
1920-
struct sched_statistics *stats;
1921-
1922-
if (!schedstat_enabled())
1923-
return;
1924-
1925-
stats = __schedstats_from_dl_se(dl_se);
1926-
__update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1920+
struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
1921+
if (stats)
1922+
__update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
19271923
}
19281924

19291925
static inline void
19301926
update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
19311927
{
1932-
struct sched_statistics *stats;
1933-
1934-
if (!schedstat_enabled())
1935-
return;
1936-
1937-
stats = __schedstats_from_dl_se(dl_se);
1938-
__update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1928+
struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
1929+
if (stats)
1930+
__update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
19391931
}
19401932

19411933
static inline void
@@ -2392,6 +2384,9 @@ static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
23922384
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
23932385

23942386
deadline_queue_push_tasks(rq);
2387+
2388+
if (hrtick_enabled(rq))
2389+
start_hrtick_dl(rq, &p->dl);
23952390
}
23962391

23972392
static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
@@ -2407,9 +2402,8 @@ static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
24072402
/*
24082403
* __pick_next_task_dl - Helper to pick the next -deadline task to run.
24092404
* @rq: The runqueue to pick the next task from.
2410-
* @peek: If true, just peek at the next task. Only relevant for dlserver.
24112405
*/
2412-
static struct task_struct *__pick_next_task_dl(struct rq *rq, bool peek)
2406+
static struct task_struct *__pick_task_dl(struct rq *rq)
24132407
{
24142408
struct sched_dl_entity *dl_se;
24152409
struct dl_rq *dl_rq = &rq->dl;
@@ -2423,48 +2417,26 @@ static struct task_struct *__pick_next_task_dl(struct rq *rq, bool peek)
24232417
WARN_ON_ONCE(!dl_se);
24242418

24252419
if (dl_server(dl_se)) {
2426-
if (IS_ENABLED(CONFIG_SMP) && peek)
2427-
p = dl_se->server_pick_task(dl_se);
2428-
else
2429-
p = dl_se->server_pick_next(dl_se);
2420+
p = dl_se->server_pick_task(dl_se);
24302421
if (!p) {
24312422
dl_se->dl_yielded = 1;
24322423
update_curr_dl_se(rq, dl_se, 0);
24332424
goto again;
24342425
}
2435-
p->dl_server = dl_se;
2426+
rq->dl_server = dl_se;
24362427
} else {
24372428
p = dl_task_of(dl_se);
24382429
}
24392430

24402431
return p;
24412432
}
24422433

2443-
#ifdef CONFIG_SMP
24442434
static struct task_struct *pick_task_dl(struct rq *rq)
24452435
{
2446-
return __pick_next_task_dl(rq, true);
2447-
}
2448-
#endif
2449-
2450-
static struct task_struct *pick_next_task_dl(struct rq *rq)
2451-
{
2452-
struct task_struct *p;
2453-
2454-
p = __pick_next_task_dl(rq, false);
2455-
if (!p)
2456-
return p;
2457-
2458-
if (!p->dl_server)
2459-
set_next_task_dl(rq, p, true);
2460-
2461-
if (hrtick_enabled(rq))
2462-
start_hrtick_dl(rq, &p->dl);
2463-
2464-
return p;
2436+
return __pick_task_dl(rq);
24652437
}
24662438

2467-
static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
2439+
static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next)
24682440
{
24692441
struct sched_dl_entity *dl_se = &p->dl;
24702442
struct dl_rq *dl_rq = &rq->dl;
@@ -3156,13 +3128,12 @@ DEFINE_SCHED_CLASS(dl) = {
31563128

31573129
.wakeup_preempt = wakeup_preempt_dl,
31583130

3159-
.pick_next_task = pick_next_task_dl,
3131+
.pick_task = pick_task_dl,
31603132
.put_prev_task = put_prev_task_dl,
31613133
.set_next_task = set_next_task_dl,
31623134

31633135
#ifdef CONFIG_SMP
31643136
.balance = balance_dl,
3165-
.pick_task = pick_task_dl,
31663137
.select_task_rq = select_task_rq_dl,
31673138
.migrate_task_rq = migrate_task_rq_dl,
31683139
.set_cpus_allowed = set_cpus_allowed_dl,

0 commit comments

Comments
 (0)