@@ -1778,12 +1778,10 @@ static void run_deferred(struct rq *rq)
1778
1778
process_ddsp_deferred_locals (rq );
1779
1779
}
1780
1780
1781
- #ifdef CONFIG_SMP
1782
1781
static void deferred_bal_cb_workfn (struct rq * rq )
1783
1782
{
1784
1783
run_deferred (rq );
1785
1784
}
1786
- #endif
1787
1785
1788
1786
static void deferred_irq_workfn (struct irq_work * irq_work )
1789
1787
{
@@ -1806,7 +1804,6 @@ static void schedule_deferred(struct rq *rq)
1806
1804
{
1807
1805
lockdep_assert_rq_held (rq );
1808
1806
1809
- #ifdef CONFIG_SMP
1810
1807
/*
1811
1808
* If in the middle of waking up a task, task_woken_scx() will be called
1812
1809
* afterwards which will then run the deferred actions, no need to
@@ -1824,7 +1821,7 @@ static void schedule_deferred(struct rq *rq)
1824
1821
deferred_bal_cb_workfn );
1825
1822
return ;
1826
1823
}
1827
- #endif
1824
+
1828
1825
/*
1829
1826
* No scheduler hooks available. Queue an irq work. They are executed on
1830
1827
* IRQ re-enable which may take a bit longer than the scheduler hooks.
@@ -2528,7 +2525,6 @@ static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
2528
2525
p -> scx .dsq = dst_dsq ;
2529
2526
}
2530
2527
2531
- #ifdef CONFIG_SMP
2532
2528
/**
2533
2529
* move_remote_task_to_local_dsq - Move a task from a foreign rq to a local DSQ
2534
2530
* @p: task to move
@@ -2695,11 +2691,6 @@ static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
2695
2691
return false;
2696
2692
}
2697
2693
}
2698
- #else /* CONFIG_SMP */
2699
- static inline void move_remote_task_to_local_dsq (struct task_struct * p , u64 enq_flags , struct rq * src_rq , struct rq * dst_rq ) { WARN_ON_ONCE (1 ); }
2700
- static inline bool task_can_run_on_remote_rq (struct scx_sched * sch , struct task_struct * p , struct rq * rq , bool enforce ) { return false; }
2701
- static inline bool consume_remote_task (struct rq * this_rq , struct task_struct * p , struct scx_dispatch_q * dsq , struct rq * task_rq ) { return false; }
2702
- #endif /* CONFIG_SMP */
2703
2694
2704
2695
/**
2705
2696
* move_task_between_dsqs() - Move a task from one DSQ to another
@@ -2872,9 +2863,7 @@ static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
2872
2863
{
2873
2864
struct rq * src_rq = task_rq (p );
2874
2865
struct rq * dst_rq = container_of (dst_dsq , struct rq , scx .local_dsq );
2875
- #ifdef CONFIG_SMP
2876
2866
struct rq * locked_rq = rq ;
2877
- #endif
2878
2867
2879
2868
/*
2880
2869
* We're synchronized against dequeue through DISPATCHING. As @p can't
@@ -2888,7 +2877,6 @@ static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
2888
2877
return ;
2889
2878
}
2890
2879
2891
- #ifdef CONFIG_SMP
2892
2880
if (src_rq != dst_rq &&
2893
2881
unlikely (!task_can_run_on_remote_rq (sch , p , dst_rq , true))) {
2894
2882
dispatch_enqueue (sch , find_global_dsq (p ), p ,
@@ -2948,9 +2936,6 @@ static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
2948
2936
raw_spin_rq_unlock (locked_rq );
2949
2937
raw_spin_rq_lock (rq );
2950
2938
}
2951
- #else /* CONFIG_SMP */
2952
- BUG (); /* control can not reach here on UP */
2953
- #endif /* CONFIG_SMP */
2954
2939
}
2955
2940
2956
2941
/**
@@ -3274,10 +3259,8 @@ static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
3274
3259
static enum scx_cpu_preempt_reason
3275
3260
preempt_reason_from_class (const struct sched_class * class )
3276
3261
{
3277
- #ifdef CONFIG_SMP
3278
3262
if (class == & stop_sched_class )
3279
3263
return SCX_CPU_PREEMPT_STOP ;
3280
- #endif
3281
3264
if (class == & dl_sched_class )
3282
3265
return SCX_CPU_PREEMPT_DL ;
3283
3266
if (class == & rt_sched_class )
@@ -3290,14 +3273,12 @@ static void switch_class(struct rq *rq, struct task_struct *next)
3290
3273
struct scx_sched * sch = scx_root ;
3291
3274
const struct sched_class * next_class = next -> sched_class ;
3292
3275
3293
- #ifdef CONFIG_SMP
3294
3276
/*
3295
3277
* Pairs with the smp_load_acquire() issued by a CPU in
3296
3278
* kick_cpus_irq_workfn() who is waiting for this CPU to perform a
3297
3279
* resched.
3298
3280
*/
3299
3281
smp_store_release (& rq -> scx .pnt_seq , rq -> scx .pnt_seq + 1 );
3300
- #endif
3301
3282
if (!(sch -> ops .flags & SCX_OPS_HAS_CPU_PREEMPT ))
3302
3283
return ;
3303
3284
@@ -3494,8 +3475,6 @@ bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
3494
3475
}
3495
3476
#endif /* CONFIG_SCHED_CORE */
3496
3477
3497
- #ifdef CONFIG_SMP
3498
-
3499
3478
static int select_task_rq_scx (struct task_struct * p , int prev_cpu , int wake_flags )
3500
3479
{
3501
3480
struct scx_sched * sch = scx_root ;
@@ -3625,7 +3604,6 @@ static void rq_offline_scx(struct rq *rq)
3625
3604
rq -> scx .flags &= ~SCX_RQ_ONLINE ;
3626
3605
}
3627
3606
3628
- #endif /* CONFIG_SMP */
3629
3607
3630
3608
static bool check_rq_for_timeouts (struct rq * rq )
3631
3609
{
@@ -4285,14 +4263,12 @@ DEFINE_SCHED_CLASS(ext) = {
4285
4263
.put_prev_task = put_prev_task_scx ,
4286
4264
.set_next_task = set_next_task_scx ,
4287
4265
4288
- #ifdef CONFIG_SMP
4289
4266
.select_task_rq = select_task_rq_scx ,
4290
4267
.task_woken = task_woken_scx ,
4291
4268
.set_cpus_allowed = set_cpus_allowed_scx ,
4292
4269
4293
4270
.rq_online = rq_online_scx ,
4294
4271
.rq_offline = rq_offline_scx ,
4295
- #endif
4296
4272
4297
4273
.task_tick = task_tick_scx ,
4298
4274
0 commit comments