Skip to content

Commit 8a9b158

Browse files
committed
sched_ext: Merge branch 'for-6.14-fixes' into for-6.15
Pull for-6.14-fixes to receive: 9360dfe ("sched_ext: Validate prev_cpu in scx_bpf_select_cpu_dfl()") which conflicts with: 337d1b3 ("sched_ext: Move built-in idle CPU selection policy to a separate file") Signed-off-by: Tejun Heo <[email protected]>
2 parents 0f0714a + 9360dfe commit 8a9b158

File tree

5 files changed

+25
-11
lines changed

5 files changed

+25
-11
lines changed

kernel/sched/ext.c

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3250,7 +3250,6 @@ static struct task_struct *pick_task_scx(struct rq *rq)
32503250
{
32513251
struct task_struct *prev = rq->curr;
32523252
struct task_struct *p;
3253-
bool prev_on_scx = prev->sched_class == &ext_sched_class;
32543253
bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
32553254
bool kick_idle = false;
32563255

@@ -3270,14 +3269,18 @@ static struct task_struct *pick_task_scx(struct rq *rq)
32703269
* if pick_task_scx() is called without preceding balance_scx().
32713270
*/
32723271
if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) {
3273-
if (prev_on_scx) {
3272+
if (prev->scx.flags & SCX_TASK_QUEUED) {
32743273
keep_prev = true;
32753274
} else {
32763275
keep_prev = false;
32773276
kick_idle = true;
32783277
}
3279-
} else if (unlikely(keep_prev && !prev_on_scx)) {
3280-
/* only allowed during transitions */
3278+
} else if (unlikely(keep_prev &&
3279+
prev->sched_class != &ext_sched_class)) {
3280+
/*
3281+
* Can happen while enabling as SCX_RQ_BAL_PENDING assertion is
3282+
* conditional on scx_enabled() and may have been skipped.
3283+
*/
32813284
WARN_ON_ONCE(scx_ops_enable_state() == SCX_OPS_ENABLED);
32823285
keep_prev = false;
32833286
}
@@ -3544,7 +3547,7 @@ static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
35443547
curr->scx.slice = 0;
35453548
touch_core_sched(rq, curr);
35463549
} else if (SCX_HAS_OP(tick)) {
3547-
SCX_CALL_OP(SCX_KF_REST, tick, curr);
3550+
SCX_CALL_OP_TASK(SCX_KF_REST, tick, curr);
35483551
}
35493552

35503553
if (!curr->scx.slice)
@@ -3691,7 +3694,7 @@ static void scx_ops_disable_task(struct task_struct *p)
36913694
WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
36923695

36933696
if (SCX_HAS_OP(disable))
3694-
SCX_CALL_OP(SCX_KF_REST, disable, p);
3697+
SCX_CALL_OP_TASK(SCX_KF_REST, disable, p);
36953698
scx_set_task_state(p, SCX_TASK_READY);
36963699
}
36973700

@@ -3720,7 +3723,7 @@ static void scx_ops_exit_task(struct task_struct *p)
37203723
}
37213724

37223725
if (SCX_HAS_OP(exit_task))
3723-
SCX_CALL_OP(SCX_KF_REST, exit_task, p, &args);
3726+
SCX_CALL_OP_TASK(SCX_KF_REST, exit_task, p, &args);
37243727
scx_set_task_state(p, SCX_TASK_NONE);
37253728
}
37263729

kernel/sched/ext_idle.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -800,6 +800,9 @@ __bpf_kfunc int scx_bpf_cpu_node(s32 cpu)
800800
__bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
801801
u64 wake_flags, bool *is_idle)
802802
{
803+
if (!ops_cpu_valid(prev_cpu, NULL))
804+
goto prev_cpu;
805+
803806
if (!check_builtin_idle_enabled())
804807
goto prev_cpu;
805808

tools/sched_ext/include/scx/common.bpf.h

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -288,8 +288,16 @@ void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;
288288
#define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL))
289289
#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)
290290

291-
void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node) __ksym;
292-
void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node) __ksym;
291+
int bpf_list_push_front_impl(struct bpf_list_head *head,
292+
struct bpf_list_node *node,
293+
void *meta, __u64 off) __ksym;
294+
#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0)
295+
296+
int bpf_list_push_back_impl(struct bpf_list_head *head,
297+
struct bpf_list_node *node,
298+
void *meta, __u64 off) __ksym;
299+
#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0)
300+
293301
struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym;
294302
struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym;
295303
struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,

tools/testing/selftests/sched_ext/init_enable_count.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ static enum scx_test_status run(void *ctx)
150150

151151
struct scx_test init_enable_count = {
152152
.name = "init_enable_count",
153-
.description = "Verify we do the correct amount of counting of init, "
153+
.description = "Verify we correctly count the occurrences of init, "
154154
"enable, etc callbacks.",
155155
.run = run,
156156
};

tools/testing/selftests/sched_ext/maybe_null.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ static enum scx_test_status run(void *ctx)
4343

4444
struct scx_test maybe_null = {
4545
.name = "maybe_null",
46-
.description = "Verify if PTR_MAYBE_NULL work for .dispatch",
46+
.description = "Verify if PTR_MAYBE_NULL works for .dispatch",
4747
.run = run,
4848
};
4949
REGISTER_SCX_TEST(&maybe_null)

0 commit comments

Comments
 (0)