@@ -220,10 +220,10 @@ struct sched_ext_ops {
220
220
* dispatch. While an explicit custom mechanism can be added,
221
221
* select_cpu() serves as the default way to wake up idle CPUs.
222
222
*
223
- * @p may be dispatched directly by calling scx_bpf_dispatch(). If @p
224
- * is dispatched , the ops.enqueue() callback will be skipped. Finally,
225
- * if @p is dispatched to SCX_DSQ_LOCAL, it will be dispatched to the
226
- * local DSQ of whatever CPU is returned by this callback .
223
+ * @p may be inserted into a DSQ directly by calling
224
+ * scx_bpf_dsq_insert(). If so , the ops.enqueue() will be skipped.
225
+ * Directly inserting into % SCX_DSQ_LOCAL will put @p in the local DSQ
226
+ * of the CPU returned by this operation .
227
227
*
228
228
* Note that select_cpu() is never called for tasks that can only run
229
229
* on a single CPU or tasks with migration disabled, as they don't have
@@ -237,12 +237,12 @@ struct sched_ext_ops {
237
237
* @p: task being enqueued
238
238
* @enq_flags: %SCX_ENQ_*
239
239
*
240
- * @p is ready to run. Dispatch directly by calling scx_bpf_dispatch()
241
- * or enqueue on the BPF scheduler. If not directly dispatched, the bpf
242
- * scheduler owns @p and if it fails to dispatch @p, the task will
243
- * stall.
240
+ * @p is ready to run. Insert directly into a DSQ by calling
241
+ * scx_bpf_dsq_insert() or enqueue on the BPF scheduler. If not directly
242
+ * inserted, the bpf scheduler owns @p and if it fails to dispatch @p,
243
+ * the task will stall.
244
244
*
245
- * If @p was dispatched from ops.select_cpu(), this callback is
245
+ * If @p was inserted into a DSQ from ops.select_cpu(), this callback is
246
246
* skipped.
247
247
*/
248
248
void (* enqueue )(struct task_struct * p , u64 enq_flags );
@@ -270,11 +270,11 @@ struct sched_ext_ops {
270
270
*
271
271
* Called when a CPU's local dsq is empty. The operation should dispatch
272
272
* one or more tasks from the BPF scheduler into the DSQs using
273
- * scx_bpf_dispatch () and/or consume user DSQs into the local DSQ using
274
- * scx_bpf_consume().
273
+ * scx_bpf_dsq_insert () and/or consume user DSQs into the local DSQ
274
+ * using scx_bpf_consume().
275
275
*
276
- * The maximum number of times scx_bpf_dispatch () can be called without
277
- * an intervening scx_bpf_consume() is specified by
276
+ * The maximum number of times scx_bpf_dsq_insert () can be called
277
+ * without an intervening scx_bpf_consume() is specified by
278
278
* ops.dispatch_max_batch. See the comments on top of the two functions
279
279
* for more details.
280
280
*
@@ -714,7 +714,7 @@ enum scx_enq_flags {
714
714
715
715
/*
716
716
* Set the following to trigger preemption when calling
717
- * scx_bpf_dispatch () with a local dsq as the target. The slice of the
717
+ * scx_bpf_dsq_insert () with a local dsq as the target. The slice of the
718
718
* current task is cleared to zero and the CPU is kicked into the
719
719
* scheduling path. Implies %SCX_ENQ_HEAD.
720
720
*/
@@ -2322,7 +2322,7 @@ static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
2322
2322
/*
2323
2323
* We don't require the BPF scheduler to avoid dispatching to offline
2324
2324
* CPUs mostly for convenience but also because CPUs can go offline
2325
- * between scx_bpf_dispatch () calls and here. Trigger error iff the
2325
+ * between scx_bpf_dsq_insert () calls and here. Trigger error iff the
2326
2326
* picked CPU is outside the allowed mask.
2327
2327
*/
2328
2328
if (!task_allowed_on_cpu (p , cpu )) {
@@ -2658,7 +2658,7 @@ static void dispatch_to_local_dsq(struct rq *rq, struct scx_dispatch_q *dst_dsq,
2658
2658
* Dispatching to local DSQs may need to wait for queueing to complete or
2659
2659
* require rq lock dancing. As we don't wanna do either while inside
2660
2660
* ops.dispatch() to avoid locking order inversion, we split dispatching into
2661
- * two parts. scx_bpf_dispatch () which is called by ops.dispatch() records the
2661
+ * two parts. scx_bpf_dsq_insert () which is called by ops.dispatch() records the
2662
2662
* task and its qseq. Once ops.dispatch() returns, this function is called to
2663
2663
* finish up.
2664
2664
*
@@ -2690,7 +2690,7 @@ static void finish_dispatch(struct rq *rq, struct task_struct *p,
2690
2690
/*
2691
2691
* If qseq doesn't match, @p has gone through at least one
2692
2692
* dispatch/dequeue and re-enqueue cycle between
2693
- * scx_bpf_dispatch () and here and we have no claim on it.
2693
+ * scx_bpf_dsq_insert () and here and we have no claim on it.
2694
2694
*/
2695
2695
if ((opss & SCX_OPSS_QSEQ_MASK ) != qseq_at_dispatch )
2696
2696
return ;
@@ -6258,7 +6258,7 @@ static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
6258
6258
.set = & scx_kfunc_ids_select_cpu ,
6259
6259
};
6260
6260
6261
- static bool scx_dispatch_preamble (struct task_struct * p , u64 enq_flags )
6261
+ static bool scx_dsq_insert_preamble (struct task_struct * p , u64 enq_flags )
6262
6262
{
6263
6263
if (!scx_kf_allowed (SCX_KF_ENQUEUE | SCX_KF_DISPATCH ))
6264
6264
return false;
@@ -6278,7 +6278,8 @@ static bool scx_dispatch_preamble(struct task_struct *p, u64 enq_flags)
6278
6278
return true;
6279
6279
}
6280
6280
6281
- static void scx_dispatch_commit (struct task_struct * p , u64 dsq_id , u64 enq_flags )
6281
+ static void scx_dsq_insert_commit (struct task_struct * p , u64 dsq_id ,
6282
+ u64 enq_flags )
6282
6283
{
6283
6284
struct scx_dsp_ctx * dspc = this_cpu_ptr (scx_dsp_ctx );
6284
6285
struct task_struct * ddsp_task ;
@@ -6305,14 +6306,14 @@ static void scx_dispatch_commit(struct task_struct *p, u64 dsq_id, u64 enq_flags
6305
6306
__bpf_kfunc_start_defs ();
6306
6307
6307
6308
/**
6308
- * scx_bpf_dispatch - Dispatch a task into the FIFO queue of a DSQ
6309
- * @p: task_struct to dispatch
6310
- * @dsq_id: DSQ to dispatch to
6309
+ * scx_bpf_dsq_insert - Insert a task into the FIFO queue of a DSQ
6310
+ * @p: task_struct to insert
6311
+ * @dsq_id: DSQ to insert into
6311
6312
* @slice: duration @p can run for in nsecs, 0 to keep the current value
6312
6313
* @enq_flags: SCX_ENQ_*
6313
6314
*
6314
- * Dispatch @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe
6315
- * to call this function spuriously. Can be called from ops.enqueue(),
6315
+ * Insert @p into the FIFO queue of the DSQ identified by @dsq_id. It is safe to
6316
+ * call this function spuriously. Can be called from ops.enqueue(),
6316
6317
* ops.select_cpu(), and ops.dispatch().
6317
6318
*
6318
6319
* When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch
@@ -6321,14 +6322,14 @@ __bpf_kfunc_start_defs();
6321
6322
* ops.select_cpu() to be on the target CPU in the first place.
6322
6323
*
6323
6324
* When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p
6324
- * will be directly dispatched to the corresponding dispatch queue after
6325
- * ops.select_cpu() returns. If @p is dispatched to SCX_DSQ_LOCAL, it will be
6326
- * dispatched to the local DSQ of the CPU returned by ops.select_cpu().
6325
+ * will be directly inserted into the corresponding dispatch queue after
6326
+ * ops.select_cpu() returns. If @p is inserted into SCX_DSQ_LOCAL, it will be
6327
+ * inserted into the local DSQ of the CPU returned by ops.select_cpu().
6327
6328
* @enq_flags are OR'd with the enqueue flags on the enqueue path before the
6328
- * task is dispatched .
6329
+ * task is inserted .
6329
6330
*
6330
6331
* When called from ops.dispatch(), there are no restrictions on @p or @dsq_id
6331
- * and this function can be called upto ops.dispatch_max_batch times to dispatch
6332
+ * and this function can be called upto ops.dispatch_max_batch times to insert
6332
6333
* multiple tasks. scx_bpf_dispatch_nr_slots() returns the number of the
6333
6334
* remaining slots. scx_bpf_consume() flushes the batch and resets the counter.
6334
6335
*
@@ -6340,41 +6341,49 @@ __bpf_kfunc_start_defs();
6340
6341
* %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with
6341
6342
* scx_bpf_kick_cpu() to trigger scheduling.
6342
6343
*/
6343
- __bpf_kfunc void scx_bpf_dispatch (struct task_struct * p , u64 dsq_id , u64 slice ,
6344
- u64 enq_flags )
6344
+ __bpf_kfunc void scx_bpf_dsq_insert (struct task_struct * p , u64 dsq_id , u64 slice ,
6345
+ u64 enq_flags )
6345
6346
{
6346
- if (!scx_dispatch_preamble (p , enq_flags ))
6347
+ if (!scx_dsq_insert_preamble (p , enq_flags ))
6347
6348
return ;
6348
6349
6349
6350
if (slice )
6350
6351
p -> scx .slice = slice ;
6351
6352
else
6352
6353
p -> scx .slice = p -> scx .slice ?: 1 ;
6353
6354
6354
- scx_dispatch_commit (p , dsq_id , enq_flags );
6355
+ scx_dsq_insert_commit (p , dsq_id , enq_flags );
6356
+ }
6357
+
6358
+ /* for backward compatibility, will be removed in v6.15 */
6359
+ __bpf_kfunc void scx_bpf_dispatch (struct task_struct * p , u64 dsq_id , u64 slice ,
6360
+ u64 enq_flags )
6361
+ {
6362
+ printk_deferred_once (KERN_WARNING "sched_ext: scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()" );
6363
+ scx_bpf_dsq_insert (p , dsq_id , slice , enq_flags );
6355
6364
}
6356
6365
6357
6366
/**
6358
- * scx_bpf_dispatch_vtime - Dispatch a task into the vtime priority queue of a DSQ
6359
- * @p: task_struct to dispatch
6360
- * @dsq_id: DSQ to dispatch to
6367
+ * scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ
6368
+ * @p: task_struct to insert
6369
+ * @dsq_id: DSQ to insert into
6361
6370
* @slice: duration @p can run for in nsecs, 0 to keep the current value
6362
6371
* @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
6363
6372
* @enq_flags: SCX_ENQ_*
6364
6373
*
6365
- * Dispatch @p into the vtime priority queue of the DSQ identified by @dsq_id.
6374
+ * Insert @p into the vtime priority queue of the DSQ identified by @dsq_id.
6366
6375
* Tasks queued into the priority queue are ordered by @vtime and always
6367
6376
* consumed after the tasks in the FIFO queue. All other aspects are identical
6368
- * to scx_bpf_dispatch ().
6377
+ * to scx_bpf_dsq_insert ().
6369
6378
*
6370
6379
* @vtime ordering is according to time_before64() which considers wrapping. A
6371
6380
* numerically larger vtime may indicate an earlier position in the ordering and
6372
6381
* vice-versa.
6373
6382
*/
6374
- __bpf_kfunc void scx_bpf_dispatch_vtime (struct task_struct * p , u64 dsq_id ,
6375
- u64 slice , u64 vtime , u64 enq_flags )
6383
+ __bpf_kfunc void scx_bpf_dsq_insert_vtime (struct task_struct * p , u64 dsq_id ,
6384
+ u64 slice , u64 vtime , u64 enq_flags )
6376
6385
{
6377
- if (!scx_dispatch_preamble (p , enq_flags ))
6386
+ if (!scx_dsq_insert_preamble (p , enq_flags ))
6378
6387
return ;
6379
6388
6380
6389
if (slice )
@@ -6384,12 +6393,22 @@ __bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id,
6384
6393
6385
6394
p -> scx .dsq_vtime = vtime ;
6386
6395
6387
- scx_dispatch_commit (p , dsq_id , enq_flags | SCX_ENQ_DSQ_PRIQ );
6396
+ scx_dsq_insert_commit (p , dsq_id , enq_flags | SCX_ENQ_DSQ_PRIQ );
6397
+ }
6398
+
6399
+ /* for backward compatibility, will be removed in v6.15 */
6400
+ __bpf_kfunc void scx_bpf_dispatch_vtime (struct task_struct * p , u64 dsq_id ,
6401
+ u64 slice , u64 vtime , u64 enq_flags )
6402
+ {
6403
+ printk_deferred_once (KERN_WARNING "sched_ext: scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()" );
6404
+ scx_bpf_dsq_insert_vtime (p , dsq_id , slice , vtime , enq_flags );
6388
6405
}
6389
6406
6390
6407
__bpf_kfunc_end_defs ();
6391
6408
6392
6409
BTF_KFUNCS_START (scx_kfunc_ids_enqueue_dispatch )
6410
+ BTF_ID_FLAGS (func , scx_bpf_dsq_insert , KF_RCU )
6411
+ BTF_ID_FLAGS (func , scx_bpf_dsq_insert_vtime , KF_RCU )
6393
6412
BTF_ID_FLAGS (func , scx_bpf_dispatch , KF_RCU )
6394
6413
BTF_ID_FLAGS (func , scx_bpf_dispatch_vtime , KF_RCU )
6395
6414
BTF_KFUNCS_END (scx_kfunc_ids_enqueue_dispatch )
@@ -6527,9 +6546,9 @@ __bpf_kfunc void scx_bpf_dispatch_cancel(void)
6527
6546
* to the current CPU's local DSQ for execution. Can only be called from
6528
6547
* ops.dispatch().
6529
6548
*
6530
- * This function flushes the in-flight dispatches from scx_bpf_dispatch() before
6531
- * trying to consume the specified DSQ. It may also grab rq locks and thus can't
6532
- * be called under any BPF locks.
6549
+ * This function flushes the in-flight dispatches from scx_bpf_dsq_insert()
6550
+ * before trying to consume the specified DSQ. It may also grab rq locks and
6551
+ * thus can't be called under any BPF locks.
6533
6552
*
6534
6553
* Returns %true if a task has been consumed, %false if there isn't any task to
6535
6554
* consume.
@@ -6650,7 +6669,7 @@ __bpf_kfunc bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter,
6650
6669
* scx_bpf_dispatch_from_dsq_set_vtime() to update.
6651
6670
*
6652
6671
* All other aspects are identical to scx_bpf_dispatch_from_dsq(). See
6653
- * scx_bpf_dispatch_vtime () for more information on @vtime.
6672
+ * scx_bpf_dsq_insert_vtime () for more information on @vtime.
6654
6673
*/
6655
6674
__bpf_kfunc bool scx_bpf_dispatch_vtime_from_dsq (struct bpf_iter_scx_dsq * it__iter ,
6656
6675
struct task_struct * p , u64 dsq_id ,
0 commit comments