Skip to content

Commit 418592a

Browse files
committed
Merge tag 'sched_ext-for-6.18-rc6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext
Pull sched_ext fixes from Tejun Heo: "Five fixes addressing PREEMPT_RT compatibility and locking issues. Three commits fix potential deadlocks and sleeps in atomic contexts on RT kernels by converting locks to raw spinlocks and ensuring IRQ work runs in hard-irq context. The remaining two fix unsafe locking in the debug dump path and a variable dereference typo" * tag 'sched_ext-for-6.18-rc6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext: sched_ext: Use IRQ_WORK_INIT_HARD() to initialize rq->scx.kick_cpus_irq_work sched_ext: Fix possible deadlock in the deferred_irq_workfn() sched/ext: convert scx_tasks_lock to raw spinlock sched_ext: Fix unsafe locking in the scx_dump_state() sched_ext: Fix use of uninitialized variable in scx_bpf_cpuperf_set()
2 parents e97c618 + 36c6f3c commit 418592a

File tree

1 file changed

+13
-13
lines changed

1 file changed

+13
-13
lines changed

kernel/sched/ext.c

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ static struct scx_sched __rcu *scx_root;
2525
* guarantee system safety. Maintain a dedicated task list which contains every
2626
* task between its fork and eventual free.
2727
*/
28-
static DEFINE_SPINLOCK(scx_tasks_lock);
28+
static DEFINE_RAW_SPINLOCK(scx_tasks_lock);
2929
static LIST_HEAD(scx_tasks);
3030

3131
/* ops enable/disable */
@@ -476,7 +476,7 @@ static void scx_task_iter_start(struct scx_task_iter *iter)
476476
BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
477477
((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
478478

479-
spin_lock_irq(&scx_tasks_lock);
479+
raw_spin_lock_irq(&scx_tasks_lock);
480480

481481
iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
482482
list_add(&iter->cursor.tasks_node, &scx_tasks);
@@ -507,14 +507,14 @@ static void scx_task_iter_unlock(struct scx_task_iter *iter)
507507
__scx_task_iter_rq_unlock(iter);
508508
if (iter->list_locked) {
509509
iter->list_locked = false;
510-
spin_unlock_irq(&scx_tasks_lock);
510+
raw_spin_unlock_irq(&scx_tasks_lock);
511511
}
512512
}
513513

514514
static void __scx_task_iter_maybe_relock(struct scx_task_iter *iter)
515515
{
516516
if (!iter->list_locked) {
517-
spin_lock_irq(&scx_tasks_lock);
517+
raw_spin_lock_irq(&scx_tasks_lock);
518518
iter->list_locked = true;
519519
}
520520
}
@@ -2940,9 +2940,9 @@ void scx_post_fork(struct task_struct *p)
29402940
}
29412941
}
29422942

2943-
spin_lock_irq(&scx_tasks_lock);
2943+
raw_spin_lock_irq(&scx_tasks_lock);
29442944
list_add_tail(&p->scx.tasks_node, &scx_tasks);
2945-
spin_unlock_irq(&scx_tasks_lock);
2945+
raw_spin_unlock_irq(&scx_tasks_lock);
29462946

29472947
percpu_up_read(&scx_fork_rwsem);
29482948
}
@@ -2966,9 +2966,9 @@ void sched_ext_free(struct task_struct *p)
29662966
{
29672967
unsigned long flags;
29682968

2969-
spin_lock_irqsave(&scx_tasks_lock, flags);
2969+
raw_spin_lock_irqsave(&scx_tasks_lock, flags);
29702970
list_del_init(&p->scx.tasks_node);
2971-
spin_unlock_irqrestore(&scx_tasks_lock, flags);
2971+
raw_spin_unlock_irqrestore(&scx_tasks_lock, flags);
29722972

29732973
/*
29742974
* @p is off scx_tasks and wholly ours. scx_enable()'s READY -> ENABLED
@@ -4276,7 +4276,7 @@ static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
42764276
size_t avail, used;
42774277
bool idle;
42784278

4279-
rq_lock(rq, &rf);
4279+
rq_lock_irqsave(rq, &rf);
42804280

42814281
idle = list_empty(&rq->scx.runnable_list) &&
42824282
rq->curr->sched_class == &idle_sched_class;
@@ -4345,7 +4345,7 @@ static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
43454345
list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
43464346
scx_dump_task(&s, &dctx, p, ' ');
43474347
next:
4348-
rq_unlock(rq, &rf);
4348+
rq_unlock_irqrestore(rq, &rf);
43494349
}
43504350

43514351
dump_newline(&s);
@@ -5321,8 +5321,8 @@ void __init init_sched_ext_class(void)
53215321
BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n));
53225322
BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n));
53235323
BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n));
5324-
init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn);
5325-
init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn);
5324+
rq->scx.deferred_irq_work = IRQ_WORK_INIT_HARD(deferred_irq_workfn);
5325+
rq->scx.kick_cpus_irq_work = IRQ_WORK_INIT_HARD(kick_cpus_irq_workfn);
53265326

53275327
if (cpu_online(cpu))
53285328
cpu_rq(cpu)->scx.flags |= SCX_RQ_ONLINE;
@@ -6401,7 +6401,7 @@ __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
64016401

64026402
guard(rcu)();
64036403

6404-
sch = rcu_dereference(sch);
6404+
sch = rcu_dereference(scx_root);
64056405
if (unlikely(!sch))
64066406
return;
64076407

0 commit comments

Comments
 (0)