Skip to content

Commit 165af41

Browse files
EricccTaiwanhtejun
authored andcommitted
sched_ext: Always use SMP versions in kernel/sched/ext.c
Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. tj: Updated subject for clarity. Signed-off-by: Cheng-Yang Chou <[email protected]> Signed-off-by: Tejun Heo <[email protected]>
1 parent 9ec5e0b commit 165af41

File tree

1 file changed

+1
-25
lines changed

1 file changed

+1
-25
lines changed

kernel/sched/ext.c

Lines changed: 1 addition & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1778,12 +1778,10 @@ static void run_deferred(struct rq *rq)
17781778
process_ddsp_deferred_locals(rq);
17791779
}
17801780

1781-
#ifdef CONFIG_SMP
17821781
static void deferred_bal_cb_workfn(struct rq *rq)
17831782
{
17841783
run_deferred(rq);
17851784
}
1786-
#endif
17871785

17881786
static void deferred_irq_workfn(struct irq_work *irq_work)
17891787
{
@@ -1806,7 +1804,6 @@ static void schedule_deferred(struct rq *rq)
18061804
{
18071805
lockdep_assert_rq_held(rq);
18081806

1809-
#ifdef CONFIG_SMP
18101807
/*
18111808
* If in the middle of waking up a task, task_woken_scx() will be called
18121809
* afterwards which will then run the deferred actions, no need to
@@ -1824,7 +1821,7 @@ static void schedule_deferred(struct rq *rq)
18241821
deferred_bal_cb_workfn);
18251822
return;
18261823
}
1827-
#endif
1824+
18281825
/*
18291826
* No scheduler hooks available. Queue an irq work. They are executed on
18301827
* IRQ re-enable which may take a bit longer than the scheduler hooks.
@@ -2528,7 +2525,6 @@ static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
25282525
p->scx.dsq = dst_dsq;
25292526
}
25302527

2531-
#ifdef CONFIG_SMP
25322528
/**
25332529
* move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
25342530
* @p: task to move
@@ -2695,11 +2691,6 @@ static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
26952691
return false;
26962692
}
26972693
}
2698-
#else /* CONFIG_SMP */
2699-
static inline void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, struct rq *src_rq, struct rq *dst_rq) { WARN_ON_ONCE(1); }
2700-
static inline bool task_can_run_on_remote_rq(struct scx_sched *sch, struct task_struct *p, struct rq *rq, bool enforce) { return false; }
2701-
static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; }
2702-
#endif /* CONFIG_SMP */
27032694

27042695
/**
27052696
* move_task_between_dsqs() - Move a task from one DSQ to another
@@ -2872,9 +2863,7 @@ static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
28722863
{
28732864
struct rq *src_rq = task_rq(p);
28742865
struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
2875-
#ifdef CONFIG_SMP
28762866
struct rq *locked_rq = rq;
2877-
#endif
28782867

28792868
/*
28802869
* We're synchronized against dequeue through DISPATCHING. As @p can't
@@ -2888,7 +2877,6 @@ static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
28882877
return;
28892878
}
28902879

2891-
#ifdef CONFIG_SMP
28922880
if (src_rq != dst_rq &&
28932881
unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) {
28942882
dispatch_enqueue(sch, find_global_dsq(p), p,
@@ -2948,9 +2936,6 @@ static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
29482936
raw_spin_rq_unlock(locked_rq);
29492937
raw_spin_rq_lock(rq);
29502938
}
2951-
#else /* CONFIG_SMP */
2952-
BUG(); /* control can not reach here on UP */
2953-
#endif /* CONFIG_SMP */
29542939
}
29552940

29562941
/**
@@ -3274,10 +3259,8 @@ static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
32743259
static enum scx_cpu_preempt_reason
32753260
preempt_reason_from_class(const struct sched_class *class)
32763261
{
3277-
#ifdef CONFIG_SMP
32783262
if (class == &stop_sched_class)
32793263
return SCX_CPU_PREEMPT_STOP;
3280-
#endif
32813264
if (class == &dl_sched_class)
32823265
return SCX_CPU_PREEMPT_DL;
32833266
if (class == &rt_sched_class)
@@ -3290,14 +3273,12 @@ static void switch_class(struct rq *rq, struct task_struct *next)
32903273
struct scx_sched *sch = scx_root;
32913274
const struct sched_class *next_class = next->sched_class;
32923275

3293-
#ifdef CONFIG_SMP
32943276
/*
32953277
* Pairs with the smp_load_acquire() issued by a CPU in
32963278
* kick_cpus_irq_workfn() who is waiting for this CPU to perform a
32973279
* resched.
32983280
*/
32993281
smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
3300-
#endif
33013282
if (!(sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT))
33023283
return;
33033284

@@ -3494,8 +3475,6 @@ bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
34943475
}
34953476
#endif /* CONFIG_SCHED_CORE */
34963477

3497-
#ifdef CONFIG_SMP
3498-
34993478
static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
35003479
{
35013480
struct scx_sched *sch = scx_root;
@@ -3625,7 +3604,6 @@ static void rq_offline_scx(struct rq *rq)
36253604
rq->scx.flags &= ~SCX_RQ_ONLINE;
36263605
}
36273606

3628-
#endif /* CONFIG_SMP */
36293607

36303608
static bool check_rq_for_timeouts(struct rq *rq)
36313609
{
@@ -4285,14 +4263,12 @@ DEFINE_SCHED_CLASS(ext) = {
42854263
.put_prev_task = put_prev_task_scx,
42864264
.set_next_task = set_next_task_scx,
42874265

4288-
#ifdef CONFIG_SMP
42894266
.select_task_rq = select_task_rq_scx,
42904267
.task_woken = task_woken_scx,
42914268
.set_cpus_allowed = set_cpus_allowed_scx,
42924269

42934270
.rq_online = rq_online_scx,
42944271
.rq_offline = rq_offline_scx,
4295-
#endif
42964272

42974273
.task_tick = task_tick_scx,
42984274

0 commit comments

Comments
 (0)