Skip to content

Commit 5cbb302

Browse files
committed
sched_ext: Rename scx_bpf_dispatch[_vtime]_from_dsq*() -> scx_bpf_dsq_move[_vtime]*()
In sched_ext API, a repeatedly reported pain point is the overuse of the verb "dispatch" and confusion around "consume": - ops.dispatch() - scx_bpf_dispatch[_vtime]() - scx_bpf_consume() - scx_bpf_dispatch[_vtime]_from_dsq*() This overloading of the term is historical. Originally, there were only built-in DSQs and moving a task into a DSQ always dispatched it for execution. Using the verb "dispatch" for the kfuncs to move tasks into these DSQs made sense. Later, user DSQs were added and scx_bpf_dispatch[_vtime]() updated to be able to insert tasks into any DSQ. The only allowed DSQ to DSQ transfer was from a non-local DSQ to a local DSQ and this operation was named "consume". This was already confusing as a task could be dispatched to a user DSQ from ops.enqueue() and then the DSQ would have to be consumed in ops.dispatch(). Later addition of scx_bpf_dispatch_from_dsq*() made the confusion even worse as "dispatch" in this context meant moving a task to an arbitrary DSQ from a user DSQ. Clean up the API with the following renames: 1. scx_bpf_dispatch[_vtime]() -> scx_bpf_dsq_insert[_vtime]() 2. scx_bpf_consume() -> scx_bpf_dsq_move_to_local() 3. scx_bpf_dispatch[_vtime]_from_dsq*() -> scx_bpf_dsq_move[_vtime]*() This patch performs the third set of renames. Compatibility is maintained by: - The previous kfunc names are still provided by the kernel so that old binaries can run. Kernel generates a warning when the old names are used. - compat.bpf.h provides wrappers for the new names which automatically fall back to the old names when running on older kernels. They also trigger build error if old names are used for new builds. - scx_bpf_dispatch[_vtime]_from_dsq*() were already wrapped in __COMPAT macros as they were introduced during v6.12 cycle. Wrap new API in __COMPAT macros too and trigger build errors on both __COMPAT prefixed and naked usages of the old names. The compat features will be dropped after v6.15. Signed-off-by: Tejun Heo <[email protected]> Acked-by: Andrea Righi <[email protected]> Acked-by: Changwoo Min <[email protected]> Acked-by: Johannes Bechberger <[email protected]> Acked-by: Giovanni Gherdovich <[email protected]> Cc: Dan Schatzberg <[email protected]> Cc: Ming Yang <[email protected]>
1 parent 5209c03 commit 5cbb302

File tree

4 files changed

+152
-59
lines changed

4 files changed

+152
-59
lines changed

kernel/sched/ext.c

Lines changed: 68 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -6422,9 +6422,8 @@ static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
64226422
.set = &scx_kfunc_ids_enqueue_dispatch,
64236423
};
64246424

6425-
static bool scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern *kit,
6426-
struct task_struct *p, u64 dsq_id,
6427-
u64 enq_flags)
6425+
static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit,
6426+
struct task_struct *p, u64 dsq_id, u64 enq_flags)
64286427
{
64296428
struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
64306429
struct rq *this_rq, *src_rq, *locked_rq;
@@ -6594,44 +6593,60 @@ __bpf_kfunc bool scx_bpf_consume(u64 dsq_id)
65946593
}
65956594

65966595
/**
6597-
* scx_bpf_dispatch_from_dsq_set_slice - Override slice when dispatching from DSQ
6596+
* scx_bpf_dsq_move_set_slice - Override slice when moving between DSQs
65986597
* @it__iter: DSQ iterator in progress
6599-
* @slice: duration the dispatched task can run for in nsecs
6598+
* @slice: duration the moved task can run for in nsecs
66006599
*
6601-
* Override the slice of the next task that will be dispatched from @it__iter
6602-
* using scx_bpf_dispatch_from_dsq[_vtime](). If this function is not called,
6603-
* the previous slice duration is kept.
6600+
* Override the slice of the next task that will be moved from @it__iter using
6601+
* scx_bpf_dsq_move[_vtime](). If this function is not called, the previous
6602+
* slice duration is kept.
66046603
*/
6605-
__bpf_kfunc void scx_bpf_dispatch_from_dsq_set_slice(
6606-
struct bpf_iter_scx_dsq *it__iter, u64 slice)
6604+
__bpf_kfunc void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter,
6605+
u64 slice)
66076606
{
66086607
struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
66096608

66106609
kit->slice = slice;
66116610
kit->cursor.flags |= __SCX_DSQ_ITER_HAS_SLICE;
66126611
}
66136612

6613+
/* for backward compatibility, will be removed in v6.15 */
6614+
__bpf_kfunc void scx_bpf_dispatch_from_dsq_set_slice(
6615+
struct bpf_iter_scx_dsq *it__iter, u64 slice)
6616+
{
6617+
printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()");
6618+
scx_bpf_dsq_move_set_slice(it__iter, slice);
6619+
}
6620+
66146621
/**
6615-
* scx_bpf_dispatch_from_dsq_set_vtime - Override vtime when dispatching from DSQ
6622+
* scx_bpf_dsq_move_set_vtime - Override vtime when moving between DSQs
66166623
* @it__iter: DSQ iterator in progress
66176624
* @vtime: task's ordering inside the vtime-sorted queue of the target DSQ
66186625
*
6619-
* Override the vtime of the next task that will be dispatched from @it__iter
6620-
* using scx_bpf_dispatch_from_dsq_vtime(). If this function is not called, the
6621-
* previous slice vtime is kept. If scx_bpf_dispatch_from_dsq() is used to
6622-
* dispatch the next task, the override is ignored and cleared.
6626+
* Override the vtime of the next task that will be moved from @it__iter using
6627+
* scx_bpf_dsq_move_vtime(). If this function is not called, the previous slice
6628+
* vtime is kept. If scx_bpf_dsq_move() is used to dispatch the next task, the
6629+
* override is ignored and cleared.
66236630
*/
6624-
__bpf_kfunc void scx_bpf_dispatch_from_dsq_set_vtime(
6625-
struct bpf_iter_scx_dsq *it__iter, u64 vtime)
6631+
__bpf_kfunc void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter,
6632+
u64 vtime)
66266633
{
66276634
struct bpf_iter_scx_dsq_kern *kit = (void *)it__iter;
66286635

66296636
kit->vtime = vtime;
66306637
kit->cursor.flags |= __SCX_DSQ_ITER_HAS_VTIME;
66316638
}
66326639

6640+
/* for backward compatibility, will be removed in v6.15 */
6641+
__bpf_kfunc void scx_bpf_dispatch_from_dsq_set_vtime(
6642+
struct bpf_iter_scx_dsq *it__iter, u64 vtime)
6643+
{
6644+
printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()");
6645+
scx_bpf_dsq_move_set_vtime(it__iter, vtime);
6646+
}
6647+
66336648
/**
6634-
* scx_bpf_dispatch_from_dsq - Move a task from DSQ iteration to a DSQ
6649+
* scx_bpf_dsq_move - Move a task from DSQ iteration to a DSQ
66356650
* @it__iter: DSQ iterator in progress
66366651
* @p: task to transfer
66376652
* @dsq_id: DSQ to move @p to
@@ -6646,25 +6661,33 @@ __bpf_kfunc void scx_bpf_dispatch_from_dsq_set_vtime(
66466661
* @p was obtained from the DSQ iteration. @p just has to be on the DSQ and have
66476662
* been queued before the iteration started.
66486663
*
6649-
* @p's slice is kept by default. Use scx_bpf_dispatch_from_dsq_set_slice() to
6650-
* update.
6664+
* @p's slice is kept by default. Use scx_bpf_dsq_move_set_slice() to update.
66516665
*
66526666
* Can be called from ops.dispatch() or any BPF context which doesn't hold a rq
66536667
* lock (e.g. BPF timers or SYSCALL programs).
66546668
*
66556669
* Returns %true if @p has been consumed, %false if @p had already been consumed
66566670
* or dequeued.
66576671
*/
6672+
__bpf_kfunc bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter,
6673+
struct task_struct *p, u64 dsq_id,
6674+
u64 enq_flags)
6675+
{
6676+
return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6677+
p, dsq_id, enq_flags);
6678+
}
6679+
6680+
/* for backward compatibility, will be removed in v6.15 */
66586681
__bpf_kfunc bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter,
66596682
struct task_struct *p, u64 dsq_id,
66606683
u64 enq_flags)
66616684
{
6662-
return scx_dispatch_from_dsq((struct bpf_iter_scx_dsq_kern *)it__iter,
6663-
p, dsq_id, enq_flags);
6685+
printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()");
6686+
return scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags);
66646687
}
66656688

66666689
/**
6667-
* scx_bpf_dispatch_vtime_from_dsq - Move a task from DSQ iteration to a PRIQ DSQ
6690+
* scx_bpf_dsq_move_vtime - Move a task from DSQ iteration to a PRIQ DSQ
66686691
* @it__iter: DSQ iterator in progress
66696692
* @p: task to transfer
66706693
* @dsq_id: DSQ to move @p to
@@ -6674,19 +6697,27 @@ __bpf_kfunc bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter,
66746697
* priority queue of the DSQ specified by @dsq_id. The destination must be a
66756698
* user DSQ as only user DSQs support priority queue.
66766699
*
6677-
* @p's slice and vtime are kept by default. Use
6678-
* scx_bpf_dispatch_from_dsq_set_slice() and
6679-
* scx_bpf_dispatch_from_dsq_set_vtime() to update.
6700+
* @p's slice and vtime are kept by default. Use scx_bpf_dsq_move_set_slice()
6701+
* and scx_bpf_dsq_move_set_vtime() to update.
66806702
*
6681-
* All other aspects are identical to scx_bpf_dispatch_from_dsq(). See
6703+
* All other aspects are identical to scx_bpf_dsq_move(). See
66826704
* scx_bpf_dsq_insert_vtime() for more information on @vtime.
66836705
*/
6706+
__bpf_kfunc bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter,
6707+
struct task_struct *p, u64 dsq_id,
6708+
u64 enq_flags)
6709+
{
6710+
return scx_dsq_move((struct bpf_iter_scx_dsq_kern *)it__iter,
6711+
p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6712+
}
6713+
6714+
/* for backward compatibility, will be removed in v6.15 */
66846715
__bpf_kfunc bool scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq *it__iter,
66856716
struct task_struct *p, u64 dsq_id,
66866717
u64 enq_flags)
66876718
{
6688-
return scx_dispatch_from_dsq((struct bpf_iter_scx_dsq_kern *)it__iter,
6689-
p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
6719+
printk_deferred_once(KERN_WARNING "sched_ext: scx_bpf_dispatch_from_dsq_vtime() renamed to scx_bpf_dsq_move_vtime()");
6720+
return scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags);
66906721
}
66916722

66926723
__bpf_kfunc_end_defs();
@@ -6696,6 +6727,10 @@ BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
66966727
BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
66976728
BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local)
66986729
BTF_ID_FLAGS(func, scx_bpf_consume)
6730+
BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
6731+
BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
6732+
BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6733+
BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
66996734
BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
67006735
BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
67016736
BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)
@@ -6796,6 +6831,10 @@ __bpf_kfunc_end_defs();
67966831

67976832
BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
67986833
BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
6834+
BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
6835+
BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
6836+
BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
6837+
BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
67996838
BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_slice)
68006839
BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq_set_vtime)
68016840
BTF_ID_FLAGS(func, scx_bpf_dispatch_from_dsq, KF_RCU)

tools/sched_ext/include/scx/common.bpf.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -41,10 +41,10 @@ void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64
4141
u32 scx_bpf_dispatch_nr_slots(void) __ksym;
4242
void scx_bpf_dispatch_cancel(void) __ksym;
4343
bool scx_bpf_dsq_move_to_local(u64 dsq_id) __ksym;
44-
void scx_bpf_dispatch_from_dsq_set_slice(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
45-
void scx_bpf_dispatch_from_dsq_set_vtime(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
46-
bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
47-
bool scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
44+
void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym;
45+
void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym;
46+
bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
47+
bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
4848
u32 scx_bpf_reenqueue_local(void) __ksym;
4949
void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym;
5050
s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym;
@@ -74,8 +74,8 @@ struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym;
7474
struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym __weak;
7575

7676
/*
77-
* Use the following as @it__iter when calling
78-
* scx_bpf_dispatch[_vtime]_from_dsq() from within bpf_for_each() loops.
77+
* Use the following as @it__iter when calling scx_bpf_dsq_move[_vtime]() from
78+
* within bpf_for_each() loops.
7979
*/
8080
#define BPF_FOR_EACH_ITER (&___it)
8181

tools/sched_ext/include/scx/compat.bpf.h

Lines changed: 68 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -20,30 +20,24 @@
2020
(bpf_ksym_exists(scx_bpf_task_cgroup) ? \
2121
scx_bpf_task_cgroup((p)) : NULL)
2222

23-
/* v6.12: 4c30f5ce4f7a ("sched_ext: Implement scx_bpf_dispatch[_vtime]_from_dsq()") */
24-
#define __COMPAT_scx_bpf_dispatch_from_dsq_set_slice(it, slice) \
25-
(bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_slice) ? \
26-
scx_bpf_dispatch_from_dsq_set_slice((it), (slice)) : (void)0)
27-
#define __COMPAT_scx_bpf_dispatch_from_dsq_set_vtime(it, vtime) \
28-
(bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_vtime) ? \
29-
scx_bpf_dispatch_from_dsq_set_vtime((it), (vtime)) : (void)0)
30-
#define __COMPAT_scx_bpf_dispatch_from_dsq(it, p, dsq_id, enq_flags) \
31-
(bpf_ksym_exists(scx_bpf_dispatch_from_dsq) ? \
32-
scx_bpf_dispatch_from_dsq((it), (p), (dsq_id), (enq_flags)) : false)
33-
#define __COMPAT_scx_bpf_dispatch_vtime_from_dsq(it, p, dsq_id, enq_flags) \
34-
(bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq) ? \
35-
scx_bpf_dispatch_vtime_from_dsq((it), (p), (dsq_id), (enq_flags)) : false)
36-
3723
/*
3824
* v6.13: The verb `dispatch` was too overloaded and confusing. kfuncs are
3925
* renamed to unload the verb.
4026
*
4127
* Build error is triggered if old names are used. New binaries work with both
4228
* new and old names. The compat macros will be removed on v6.15 release.
29+
*
30+
* scx_bpf_dispatch_from_dsq() and friends were added during v6.12 by
31+
* 4c30f5ce4f7a ("sched_ext: Implement scx_bpf_dispatch[_vtime]_from_dsq()").
32+
* Preserve __COMPAT macros until v6.15.
4333
*/
4434
void scx_bpf_dispatch___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
4535
void scx_bpf_dispatch_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
4636
bool scx_bpf_consume___compat(u64 dsq_id) __ksym __weak;
37+
void scx_bpf_dispatch_from_dsq_set_slice___compat(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
38+
void scx_bpf_dispatch_from_dsq_set_vtime___compat(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
39+
bool scx_bpf_dispatch_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
40+
bool scx_bpf_dispatch_vtime_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
4741

4842
#define scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags) \
4943
(bpf_ksym_exists(scx_bpf_dsq_insert) ? \
@@ -60,6 +54,34 @@ bool scx_bpf_consume___compat(u64 dsq_id) __ksym __weak;
6054
scx_bpf_dsq_move_to_local((dsq_id)) : \
6155
scx_bpf_consume___compat((dsq_id)))
6256

57+
#define __COMPAT_scx_bpf_dsq_move_set_slice(it__iter, slice) \
58+
(bpf_ksym_exists(scx_bpf_dsq_move_set_slice) ? \
59+
scx_bpf_dsq_move_set_slice((it__iter), (slice)) : \
60+
(bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_slice___compat) ? \
61+
scx_bpf_dispatch_from_dsq_set_slice___compat((it__iter), (slice)) : \
62+
(void)0))
63+
64+
#define __COMPAT_scx_bpf_dsq_move_set_vtime(it__iter, vtime) \
65+
(bpf_ksym_exists(scx_bpf_dsq_move_set_vtime) ? \
66+
scx_bpf_dsq_move_set_vtime((it__iter), (vtime)) : \
67+
(bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_vtime___compat) ? \
68+
scx_bpf_dispatch_from_dsq_set_vtime___compat((it__iter), (vtime)) : \
69+
(void) 0))
70+
71+
#define __COMPAT_scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags) \
72+
(bpf_ksym_exists(scx_bpf_dsq_move) ? \
73+
scx_bpf_dsq_move((it__iter), (p), (dsq_id), (enq_flags)) : \
74+
(bpf_ksym_exists(scx_bpf_dispatch_from_dsq___compat) ? \
75+
scx_bpf_dispatch_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \
76+
false))
77+
78+
#define __COMPAT_scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags) \
79+
(bpf_ksym_exists(scx_bpf_dsq_move_vtime) ? \
80+
scx_bpf_dsq_move_vtime((it__iter), (p), (dsq_id), (enq_flags)) : \
81+
(bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq___compat) ? \
82+
scx_bpf_dispatch_vtime_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \
83+
false))
84+
6385
#define scx_bpf_dispatch(p, dsq_id, slice, enq_flags) \
6486
_Static_assert(false, "scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()")
6587

@@ -71,6 +93,38 @@ bool scx_bpf_consume___compat(u64 dsq_id) __ksym __weak;
7193
false; \
7294
})
7395

96+
#define scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice) \
97+
_Static_assert(false, "scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()")
98+
99+
#define scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime) \
100+
_Static_assert(false, "scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()")
101+
102+
#define scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
103+
_Static_assert(false, "scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()"); \
104+
false; \
105+
})
106+
107+
#define scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
108+
_Static_assert(false, "scx_bpf_dispatch_vtime_from_dsq() renamed to scx_bpf_dsq_move_vtime()"); \
109+
false; \
110+
})
111+
112+
#define __COMPAT_scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice) \
113+
_Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_slice() renamed to __COMPAT_scx_bpf_dsq_move_set_slice()")
114+
115+
#define __COMPAT_scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime) \
116+
_Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_vtime() renamed to __COMPAT_scx_bpf_dsq_move_set_vtime()")
117+
118+
#define __COMPAT_scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
119+
_Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move()"); \
120+
false; \
121+
})
122+
123+
#define __COMPAT_scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
124+
_Static_assert(false, "__COMPAT_scx_bpf_dispatch_vtime_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move_vtime()"); \
125+
false; \
126+
})
127+
74128
/*
75129
* Define sched_ext_ops. This may be expanded to define multiple variants for
76130
* backward compatibility. See compat.h::SCX_OPS_LOAD/ATTACH().

tools/sched_ext/scx_qmap.bpf.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -294,10 +294,10 @@ static void update_core_sched_head_seq(struct task_struct *p)
294294
}
295295

296296
/*
297-
* To demonstrate the use of scx_bpf_dispatch_from_dsq(), implement silly
298-
* selective priority boosting mechanism by scanning SHARED_DSQ looking for
299-
* highpri tasks, moving them to HIGHPRI_DSQ and then consuming them first. This
300-
* makes minor difference only when dsp_batch is larger than 1.
297+
* To demonstrate the use of scx_bpf_dsq_move(), implement silly selective
298+
* priority boosting mechanism by scanning SHARED_DSQ looking for highpri tasks,
299+
* moving them to HIGHPRI_DSQ and then consuming them first. This makes minor
300+
* difference only when dsp_batch is larger than 1.
301301
*
302302
* scx_bpf_dispatch[_vtime]_from_dsq() are allowed both from ops.dispatch() and
303303
* non-rq-lock holding BPF programs. As demonstration, this function is called
@@ -318,11 +318,11 @@ static bool dispatch_highpri(bool from_timer)
318318

319319
if (tctx->highpri) {
320320
/* exercise the set_*() and vtime interface too */
321-
__COMPAT_scx_bpf_dispatch_from_dsq_set_slice(
321+
__COMPAT_scx_bpf_dsq_move_set_slice(
322322
BPF_FOR_EACH_ITER, slice_ns * 2);
323-
__COMPAT_scx_bpf_dispatch_from_dsq_set_vtime(
323+
__COMPAT_scx_bpf_dsq_move_set_vtime(
324324
BPF_FOR_EACH_ITER, highpri_seq++);
325-
__COMPAT_scx_bpf_dispatch_vtime_from_dsq(
325+
__COMPAT_scx_bpf_dsq_move_vtime(
326326
BPF_FOR_EACH_ITER, p, HIGHPRI_DSQ, 0);
327327
}
328328
}
@@ -340,9 +340,9 @@ static bool dispatch_highpri(bool from_timer)
340340
else
341341
cpu = scx_bpf_pick_any_cpu(p->cpus_ptr, 0);
342342

343-
if (__COMPAT_scx_bpf_dispatch_from_dsq(BPF_FOR_EACH_ITER, p,
344-
SCX_DSQ_LOCAL_ON | cpu,
345-
SCX_ENQ_PREEMPT)) {
343+
if (__COMPAT_scx_bpf_dsq_move(BPF_FOR_EACH_ITER, p,
344+
SCX_DSQ_LOCAL_ON | cpu,
345+
SCX_ENQ_PREEMPT)) {
346346
if (cpu == this_cpu) {
347347
dispatched = true;
348348
__sync_fetch_and_add(&nr_expedited_local, 1);

0 commit comments

Comments
 (0)