Skip to content

Commit 65aaf90

Browse files
committed
sched_ext: Relocate functions in kernel/sched/ext.c
Relocate functions to ease the removal of switch_class_scx(). No functional changes. Signed-off-by: Tejun Heo <[email protected]>
1 parent 753e283 commit 65aaf90

File tree

1 file changed

+78
-78
lines changed

1 file changed

+78
-78
lines changed

kernel/sched/ext.c

Lines changed: 78 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -2642,6 +2642,31 @@ static int balance_scx(struct rq *rq, struct task_struct *prev,
26422642
return ret;
26432643
}
26442644

2645+
static void process_ddsp_deferred_locals(struct rq *rq)
2646+
{
2647+
struct task_struct *p;
2648+
2649+
lockdep_assert_rq_held(rq);
2650+
2651+
/*
2652+
* Now that @rq can be unlocked, execute the deferred enqueueing of
2653+
* tasks directly dispatched to the local DSQs of other CPUs. See
2654+
* direct_dispatch(). Keep popping from the head instead of using
2655+
* list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
2656+
* temporarily.
2657+
*/
2658+
while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
2659+
struct task_struct, scx.dsq_list.node))) {
2660+
s32 ret;
2661+
2662+
list_del_init(&p->scx.dsq_list.node);
2663+
2664+
ret = dispatch_to_local_dsq(rq, p->scx.ddsp_dsq_id, p,
2665+
p->scx.ddsp_enq_flags);
2666+
WARN_ON_ONCE(ret == DTL_NOT_LOCAL);
2667+
}
2668+
}
2669+
26452670
static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
26462671
{
26472672
if (p->scx.flags & SCX_TASK_QUEUED) {
@@ -2684,28 +2709,66 @@ static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
26842709
}
26852710
}
26862711

2687-
static void process_ddsp_deferred_locals(struct rq *rq)
2712+
static enum scx_cpu_preempt_reason
2713+
preempt_reason_from_class(const struct sched_class *class)
26882714
{
2689-
struct task_struct *p;
2715+
#ifdef CONFIG_SMP
2716+
if (class == &stop_sched_class)
2717+
return SCX_CPU_PREEMPT_STOP;
2718+
#endif
2719+
if (class == &dl_sched_class)
2720+
return SCX_CPU_PREEMPT_DL;
2721+
if (class == &rt_sched_class)
2722+
return SCX_CPU_PREEMPT_RT;
2723+
return SCX_CPU_PREEMPT_UNKNOWN;
2724+
}
26902725

2691-
lockdep_assert_rq_held(rq);
2726+
static void switch_class_scx(struct rq *rq, struct task_struct *next)
2727+
{
2728+
const struct sched_class *next_class = next->sched_class;
26922729

2730+
if (!scx_enabled())
2731+
return;
2732+
#ifdef CONFIG_SMP
26932733
/*
2694-
* Now that @rq can be unlocked, execute the deferred enqueueing of
2695-
* tasks directly dispatched to the local DSQs of other CPUs. See
2696-
* direct_dispatch(). Keep popping from the head instead of using
2697-
* list_for_each_entry_safe() as dispatch_local_dsq() may unlock @rq
2698-
* temporarily.
2734+
* Pairs with the smp_load_acquire() issued by a CPU in
2735+
* kick_cpus_irq_workfn() who is waiting for this CPU to perform a
2736+
* resched.
26992737
*/
2700-
while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
2701-
struct task_struct, scx.dsq_list.node))) {
2702-
s32 ret;
2738+
smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
2739+
#endif
2740+
if (!static_branch_unlikely(&scx_ops_cpu_preempt))
2741+
return;
27032742

2704-
list_del_init(&p->scx.dsq_list.node);
2743+
/*
2744+
* The callback is conceptually meant to convey that the CPU is no
2745+
* longer under the control of SCX. Therefore, don't invoke the callback
2746+
* if the next class is below SCX (in which case the BPF scheduler has
2747+
* actively decided not to schedule any tasks on the CPU).
2748+
*/
2749+
if (sched_class_above(&ext_sched_class, next_class))
2750+
return;
27052751

2706-
ret = dispatch_to_local_dsq(rq, p->scx.ddsp_dsq_id, p,
2707-
p->scx.ddsp_enq_flags);
2708-
WARN_ON_ONCE(ret == DTL_NOT_LOCAL);
2752+
/*
2753+
* At this point we know that SCX was preempted by a higher priority
2754+
* sched_class, so invoke the ->cpu_release() callback if we have not
2755+
* done so already. We only send the callback once between SCX being
2756+
* preempted, and it regaining control of the CPU.
2757+
*
2758+
* ->cpu_release() complements ->cpu_acquire(), which is emitted the
2759+
* next time that balance_scx() is invoked.
2760+
*/
2761+
if (!rq->scx.cpu_released) {
2762+
if (SCX_HAS_OP(cpu_release)) {
2763+
struct scx_cpu_release_args args = {
2764+
.reason = preempt_reason_from_class(next_class),
2765+
.task = next,
2766+
};
2767+
2768+
SCX_CALL_OP(SCX_KF_CPU_RELEASE,
2769+
cpu_release, cpu_of(rq), &args);
2770+
}
2771+
rq->scx.cpu_released = true;
27092772
}
27102773
}
27112774

@@ -2821,69 +2884,6 @@ bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
28212884
}
28222885
#endif /* CONFIG_SCHED_CORE */
28232886

2824-
static enum scx_cpu_preempt_reason
2825-
preempt_reason_from_class(const struct sched_class *class)
2826-
{
2827-
#ifdef CONFIG_SMP
2828-
if (class == &stop_sched_class)
2829-
return SCX_CPU_PREEMPT_STOP;
2830-
#endif
2831-
if (class == &dl_sched_class)
2832-
return SCX_CPU_PREEMPT_DL;
2833-
if (class == &rt_sched_class)
2834-
return SCX_CPU_PREEMPT_RT;
2835-
return SCX_CPU_PREEMPT_UNKNOWN;
2836-
}
2837-
2838-
static void switch_class_scx(struct rq *rq, struct task_struct *next)
2839-
{
2840-
const struct sched_class *next_class = next->sched_class;
2841-
2842-
if (!scx_enabled())
2843-
return;
2844-
#ifdef CONFIG_SMP
2845-
/*
2846-
* Pairs with the smp_load_acquire() issued by a CPU in
2847-
* kick_cpus_irq_workfn() who is waiting for this CPU to perform a
2848-
* resched.
2849-
*/
2850-
smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
2851-
#endif
2852-
if (!static_branch_unlikely(&scx_ops_cpu_preempt))
2853-
return;
2854-
2855-
/*
2856-
* The callback is conceptually meant to convey that the CPU is no
2857-
* longer under the control of SCX. Therefore, don't invoke the callback
2858-
* if the next class is below SCX (in which case the BPF scheduler has
2859-
* actively decided not to schedule any tasks on the CPU).
2860-
*/
2861-
if (sched_class_above(&ext_sched_class, next_class))
2862-
return;
2863-
2864-
/*
2865-
* At this point we know that SCX was preempted by a higher priority
2866-
* sched_class, so invoke the ->cpu_release() callback if we have not
2867-
* done so already. We only send the callback once between SCX being
2868-
* preempted, and it regaining control of the CPU.
2869-
*
2870-
* ->cpu_release() complements ->cpu_acquire(), which is emitted the
2871-
* next time that balance_scx() is invoked.
2872-
*/
2873-
if (!rq->scx.cpu_released) {
2874-
if (SCX_HAS_OP(cpu_release)) {
2875-
struct scx_cpu_release_args args = {
2876-
.reason = preempt_reason_from_class(next_class),
2877-
.task = next,
2878-
};
2879-
2880-
SCX_CALL_OP(SCX_KF_CPU_RELEASE,
2881-
cpu_release, cpu_of(rq), &args);
2882-
}
2883-
rq->scx.cpu_released = true;
2884-
}
2885-
}
2886-
28872887
#ifdef CONFIG_SMP
28882888

28892889
static bool test_and_clear_cpu_idle(int cpu)

0 commit comments

Comments
 (0)