@@ -220,10 +220,10 @@ struct sched_ext_ops {
220220 * dispatch. While an explicit custom mechanism can be added,
221221 * select_cpu() serves as the default way to wake up idle CPUs.
222222 *
223- * @p may be dispatched directly by calling scx_bpf_dispatch(). If @p
224- * is dispatched , the ops.enqueue() callback will be skipped. Finally,
225- * if @p is dispatched to SCX_DSQ_LOCAL, it will be dispatched to the
226- * local DSQ of whatever CPU is returned by this callback .
223+ * @p may be inserted into a DSQ directly by calling
224+ * scx_bpf_dsq_insert(). If so , the ops.enqueue() will be skipped.
225+ * Directly inserting into % SCX_DSQ_LOCAL will put @p in the local DSQ
226+ * of the CPU returned by this operation .
227227 *
228228 * Note that select_cpu() is never called for tasks that can only run
229229 * on a single CPU or tasks with migration disabled, as they don't have
@@ -237,12 +237,12 @@ struct sched_ext_ops {
237237 * @p: task being enqueued
238238 * @enq_flags: %SCX_ENQ_*
239239 *
240- * @p is ready to run. Dispatch directly by calling scx_bpf_dispatch()
241- * or enqueue on the BPF scheduler. If not directly dispatched, the bpf
242- * scheduler owns @p and if it fails to dispatch @p, the task will
243- * stall.
240+ * @p is ready to run. Insert directly into a DSQ by calling
241+ * scx_bpf_dsq_insert() or enqueue on the BPF scheduler. If not directly
242+ * inserted, the bpf scheduler owns @p and if it fails to dispatch @p,
243+ * the task will stall.
244244 *
245- * If @p was dispatched from ops.select_cpu(), this callback is
245+ * If @p was inserted into a DSQ from ops.select_cpu(), this callback is
246246 * skipped.
247247 */
248248 void (* enqueue )(struct task_struct * p , u64 enq_flags );
@@ -270,11 +270,11 @@ struct sched_ext_ops {
270270 *
271271 * Called when a CPU's local dsq is empty. The operation should dispatch
272272 * one or more tasks from the BPF scheduler into the DSQs using
273- * scx_bpf_dispatch () and/or consume user DSQs into the local DSQ using
274- * scx_bpf_consume().
273+ * scx_bpf_dsq_insert () and/or consume user DSQs into the local DSQ
274+ * using scx_bpf_consume().
275275 *
276- * The maximum number of times scx_bpf_dispatch () can be called without
277- * an intervening scx_bpf_consume() is specified by
276+ * The maximum number of times scx_bpf_dsq_insert () can be called
277+ * without an intervening scx_bpf_consume() is specified by
278278 * ops.dispatch_max_batch. See the comments on top of the two functions
279279 * for more details.
280280 *
@@ -714,7 +714,7 @@ enum scx_enq_flags {
714714
715715 /*
716716 * Set the following to trigger preemption when calling
717- * scx_bpf_dispatch () with a local dsq as the target. The slice of the
717+ * scx_bpf_dsq_insert () with a local dsq as the target. The slice of the
718718 * current task is cleared to zero and the CPU is kicked into the
719719 * scheduling path. Implies %SCX_ENQ_HEAD.
720720 */
@@ -2322,7 +2322,7 @@ static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
23222322 /*
23232323 * We don't require the BPF scheduler to avoid dispatching to offline
23242324 * CPUs mostly for convenience but also because CPUs can go offline
2325- * between scx_bpf_dispatch () calls and here. Trigger error iff the
2325+ * between scx_bpf_dsq_insert () calls and here. Trigger error iff the
23262326 * picked CPU is outside the allowed mask.
23272327 */
23282328 if (!task_allowed_on_cpu (p , cpu )) {
@@ -2658,7 +2658,7 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
26582658 * Dispatching to local DSQs may need to wait for queueing to complete or
26592659 * require rq lock dancing. As we don't wanna do either while inside
26602660 * ops.dispatch() to avoid locking order inversion, we split dispatching into
2661- * two parts. scx_bpf_dispatch () which is called by ops.dispatch() records the
2661+ * two parts. scx_bpf_dsq_insert () which is called by ops.dispatch() records the
26622662 * task and its qseq. Once ops.dispatch() returns, this function is called to
26632663 * finish up.
26642664 *
@@ -2690,7 +2690,7 @@ static void finish_dispatch(struct rq *rq, struct task_struct *p,
26902690 /*
26912691 * If qseq doesn't match, @p has gone through at least one
26922692 * dispatch/dequeue and re-enqueue cycle between
2693- * scx_bpf_dispatch () and here and we have no claim on it.
2693+ * scx_bpf_dsq_insert () and here and we have no claim on it.
26942694 */
26952695 if ((opss & SCX_OPSS_QSEQ_MASK ) != qseq_at_dispatch )
26962696 return ;
@@ -6258,7 +6258,7 @@ static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
62586258 .set = & scx_kfunc_ids_select_cpu ,
62596259};
62606260
6261- static bool scx_dispatch_preamble (struct task_struct * p , u64 enq_flags )
6261+ static bool scx_dsq_insert_preamble (struct task_struct * p , u64 enq_flags )
62626262{
62636263 if (!scx_kf_allowed (SCX_KF_ENQUEUE | SCX_KF_DISPATCH ))
62646264 return false;
@@ -6278,7 +6278,8 @@ static bool scx_dispatch_preamble(struct task_struct *p, u64 enq_flags)
62786278 return true;
62796279}
62806280
6281- static void scx_dispatch_commit (struct task_struct * p , u64 dsq_id , u64 enq_flags )
6281+ static void scx_dsq_insert_commit (struct task_struct * p , u64 dsq_id ,
6282+ u64 enq_flags )
62826283{
62836284 struct scx_dsp_ctx * dspc = this_cpu_ptr (scx_dsp_ctx );
62846285 struct task_struct * ddsp_task ;
@@ -6305,14 +6306,14 @@ static void scx_dispatch_commit(struct task_struct *p, u64 dsq_id, u64 enq_flags
63056306__bpf_kfunc_start_defs ();
63066307
63076308/**
6308- * scx_bpf_dispatch - Dispatch a task into the FIFO queue of a DSQ
6309- * @p: task_struct to dispatch
6310- * @dsq_id: DSQ to dispatch to
6309+ * scx_bpf_dsq_insert - Insert a task into the FIFO queue of a DSQ
6310+ * @p: task_struct to insert
6311+ * @dsq_id: DSQ to insert into
63116312 * @slice: duration @p can run for in nsecs, 0 to keep the current value
63126313 * @enq_flags: SCX_ENQ_*
63136314 *
6314- * Dispatch @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe
6315- * to call this function spuriously. Can be called from ops.enqueue(),
6315+ * Insert @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe to
6316+ * call this function spuriously. Can be called from ops.enqueue(),
63166317 * ops.select_cpu(), and ops.dispatch().
63176318 *
63186319 * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
@@ -6321,14 +6322,14 @@ __bpf_kfunc_start_defs();
63216322 * ops.select_cpu() to be on the target CPU in the first place.
63226323 *
63236324 * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
6324- * will be directly dispatched to the corresponding dispatch queue after
6325- * ops.select_cpu() returns. If @p is dispatched to SCX_DSQ_LOCAL, it will be
6326- * dispatched to the local DSQ of the CPU returned by ops.select_cpu().
6325+ * will be directly inserted into the corresponding dispatch queue after
6326+ * ops.select_cpu() returns. If @p is inserted into SCX_DSQ_LOCAL, it will be
6327+ * inserted into the local DSQ of the CPU returned by ops.select_cpu().
63276328 * @enq_flags are OR'd with the enqueue flags on the enqueue path before the
6328- * task is dispatched .
6329+ * task is inserted .
63296330 *
63306331 * When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
6331- * and this function can be called upto ops.dispatch_max_batch times to dispatch
6332+ * and this function can be called upto ops.dispatch_max_batch times to insert
63326333 * multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
63336334 * remaining slots. scx_bpf_consume() flushes the batch and resets the counter.
63346335 *
@@ -6340,41 +6341,49 @@ __bpf_kfunc_start_defs();
63406341 * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
63416342 * scx_bpf_kick_cpu() to trigger scheduling.
63426343 */
6343- __bpf_kfunc void scx_bpf_dispatch (struct task_struct * p , u64 dsq_id , u64 slice ,
6344- u64 enq_flags )
6344+ __bpf_kfunc void scx_bpf_dsq_insert (struct task_struct * p , u64 dsq_id , u64 slice ,
6345+ u64 enq_flags )
63456346{
6346- if (!scx_dispatch_preamble (p , enq_flags ))
6347+ if (!scx_dsq_insert_preamble (p , enq_flags ))
63476348 return ;
63486349
63496350 if (slice )
63506351 p -> scx .slice = slice ;
63516352 else
63526353 p -> scx .slice = p -> scx .slice ?: 1 ;
63536354
6354- scx_dispatch_commit (p , dsq_id , enq_flags );
6355+ scx_dsq_insert_commit (p , dsq_id , enq_flags );
6356+ }
6357+
6358+ /* for backward compatibility, will be removed in v6.15 */
6359+ __bpf_kfunc void scx_bpf_dispatch (struct task_struct * p , u64 dsq_id , u64 slice ,
6360+ u64 enq_flags )
6361+ {
6362+ printk_deferred_once (KERN_WARNING "sched_ext: scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()" );
6363+ scx_bpf_dsq_insert (p , dsq_id , slice , enq_flags );
63556364}
63566365
63576366/**
6358- * scx_bpf_dispatch_vtime - Dispatch a task into the vtime priority queue of a DSQ
6359- * @p: task_struct to dispatch
6360- * @dsq_id: DSQ to dispatch to
6367+ * scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ
6368+ * @p: task_struct to insert
6369+ * @dsq_id: DSQ to insert into
63616370 * @slice: duration @p can run for in nsecs, 0 to keep the current value
63626371 * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
63636372 * @enq_flags: SCX_ENQ_*
63646373 *
6365- * Dispatch @p into the vtime priority queue of the DSQ identified by @dsq_id.
6374+ * Insert @p into the vtime priority queue of the DSQ identified by @dsq_id.
63666375 * Tasks queued into the priority queue are ordered by @vtime and always
63676376 * consumed after the tasks in the FIFO queue. All other aspects are identical
6368- * to scx_bpf_dispatch ().
6377+ * to scx_bpf_dsq_insert ().
63696378 *
63706379 * @vtime ordering is according to time_before64() which considers wrapping. A
63716380 * numerically larger vtime may indicate an earlier position in the ordering and
63726381 * vice-versa.
63736382 */
6374- __bpf_kfunc void scx_bpf_dispatch_vtime (struct task_struct * p , u64 dsq_id ,
6375- u64 slice , u64 vtime , u64 enq_flags )
6383+ __bpf_kfunc void scx_bpf_dsq_insert_vtime (struct task_struct * p , u64 dsq_id ,
6384+ u64 slice , u64 vtime , u64 enq_flags )
63766385{
6377- if (!scx_dispatch_preamble (p , enq_flags ))
6386+ if (!scx_dsq_insert_preamble (p , enq_flags ))
63786387 return ;
63796388
63806389 if (slice )
@@ -6384,12 +6393,22 @@ __bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id,
63846393
63856394 p -> scx .dsq_vtime = vtime ;
63866395
6387- scx_dispatch_commit (p , dsq_id , enq_flags | SCX_ENQ_DSQ_PRIQ );
6396+ scx_dsq_insert_commit (p , dsq_id , enq_flags | SCX_ENQ_DSQ_PRIQ );
6397+ }
6398+
6399+ /* for backward compatibility, will be removed in v6.15 */
6400+ __bpf_kfunc void scx_bpf_dispatch_vtime (struct task_struct * p , u64 dsq_id ,
6401+ u64 slice , u64 vtime , u64 enq_flags )
6402+ {
6403+ printk_deferred_once (KERN_WARNING "sched_ext: scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()" );
6404+ scx_bpf_dsq_insert_vtime (p , dsq_id , slice , vtime , enq_flags );
63886405}
63896406
63906407__bpf_kfunc_end_defs ();
63916408
63926409BTF_KFUNCS_START (scx_kfunc_ids_enqueue_dispatch )
6410+ BTF_ID_FLAGS (func , scx_bpf_dsq_insert , KF_RCU )
6411+ BTF_ID_FLAGS (func , scx_bpf_dsq_insert_vtime , KF_RCU )
63936412BTF_ID_FLAGS (func , scx_bpf_dispatch , KF_RCU )
63946413BTF_ID_FLAGS (func , scx_bpf_dispatch_vtime , KF_RCU )
63956414BTF_KFUNCS_END (scx_kfunc_ids_enqueue_dispatch )
@@ -6527,9 +6546,9 @@ __bpf_kfunc void scx_bpf_dispatch_cancel(void)
65276546 * to the current CPU's local DSQ for execution. Can only be called from
65286547 * ops.dispatch().
65296548 *
6530- * This function flushes the in-flight dispatches from scx_bpf_dispatch() before
6531- * trying to consume the specified DSQ. It may also grab rq locks and thus can't
6532- * be called under any BPF locks.
6549+ * This function flushes the in-flight dispatches from scx_bpf_dsq_insert()
6550+ * before trying to consume the specified DSQ. It may also grab rq locks and
6551+ * thus can't be called under any BPF locks.
65336552 *
65346553 * Returns %true if a task has been consumed, %false if there isn't any task to
65356554 * consume.
@@ -6650,7 +6669,7 @@ __bpf_kfunc bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter,
66506669 * scx_bpf_dispatch_from_dsq_set_vtime() to update.
66516670 *
66526671 * All other aspects are identical to scx_bpf_dispatch_from_dsq(). See
6653- * scx_bpf_dispatch_vtime () for more information on @vtime.
6672+ * scx_bpf_dsq_insert_vtime () for more information on @vtime.
66546673 */
66556674__bpf_kfunc bool scx_bpf_dispatch_vtime_from_dsq (struct bpf_iter_scx_dsq * it__iter ,
66566675 struct task_struct * p , u64 dsq_id ,
0 commit comments