Skip to content

Commit 1602165

Browse files
committed
sched_ext: Decouple locks in scx_ops_disable_workfn()
The disable path uses three big locks - scx_fork_rwsem, scx_cgroup_rwsem and cpus_read_lock. Currently, the locks are grabbed together which is prone to locking order problems. With the preceding scx_cgroup_enabled change, we can decouple them: - As cgroup disabling no longer requires modifying a static_key which requires cpus_read_lock(), no need to grab cpus_read_lock() before grabbing scx_cgroup_rwsem. - cgroup can now be independently disabled before tasks are moved back to the fair class. Relocate scx_cgroup_exit() invocation before scx_fork_rwsem is grabbed, drop now unnecessary cpus_read_lock() and move static_key operations out of scx_fork_rwsem. This decouples all three locks in the disable path. Signed-off-by: Tejun Heo <[email protected]> Reported-and-tested-by: Aboorva Devarajan <[email protected]> Link: http://lkml.kernel.org/r/[email protected]
1 parent 568894e commit 1602165

File tree

1 file changed

+17
-20
lines changed

1 file changed

+17
-20
lines changed

kernel/sched/ext.c

Lines changed: 17 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -4456,21 +4456,23 @@ static void scx_ops_disable_workfn(struct kthread_work *work)
44564456
WRITE_ONCE(scx_switching_all, false);
44574457

44584458
/*
4459-
* Avoid racing against fork and cgroup changes. See scx_ops_enable()
4460-
* for explanation on the locking order.
4459+
* Shut down cgroup support before tasks so that the cgroup attach path
4460+
* doesn't race against scx_ops_exit_task().
44614461
*/
4462-
percpu_down_write(&scx_fork_rwsem);
4463-
cpus_read_lock();
44644462
scx_cgroup_lock();
4463+
scx_cgroup_exit();
4464+
scx_cgroup_unlock();
44654465

4466-
scx_ops_init_task_enabled = false;
4467-
4468-
spin_lock_irq(&scx_tasks_lock);
4469-
scx_task_iter_init(&sti);
44704466
/*
44714467
* The BPF scheduler is going away. All tasks including %TASK_DEAD ones
44724468
* must be switched out and exited synchronously.
44734469
*/
4470+
percpu_down_write(&scx_fork_rwsem);
4471+
4472+
scx_ops_init_task_enabled = false;
4473+
4474+
spin_lock_irq(&scx_tasks_lock);
4475+
scx_task_iter_init(&sti);
44744476
while ((p = scx_task_iter_next_locked(&sti))) {
44754477
const struct sched_class *old_class = p->sched_class;
44764478
struct sched_enq_and_set_ctx ctx;
@@ -4488,23 +4490,18 @@ static void scx_ops_disable_workfn(struct kthread_work *work)
44884490
}
44894491
scx_task_iter_exit(&sti);
44904492
spin_unlock_irq(&scx_tasks_lock);
4493+
percpu_up_write(&scx_fork_rwsem);
44914494

44924495
/* no task is on scx, turn off all the switches and flush in-progress calls */
4493-
static_branch_disable_cpuslocked(&__scx_ops_enabled);
4496+
static_branch_disable(&__scx_ops_enabled);
44944497
for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
4495-
static_branch_disable_cpuslocked(&scx_has_op[i]);
4496-
static_branch_disable_cpuslocked(&scx_ops_enq_last);
4497-
static_branch_disable_cpuslocked(&scx_ops_enq_exiting);
4498-
static_branch_disable_cpuslocked(&scx_ops_cpu_preempt);
4499-
static_branch_disable_cpuslocked(&scx_builtin_idle_enabled);
4498+
static_branch_disable(&scx_has_op[i]);
4499+
static_branch_disable(&scx_ops_enq_last);
4500+
static_branch_disable(&scx_ops_enq_exiting);
4501+
static_branch_disable(&scx_ops_cpu_preempt);
4502+
static_branch_disable(&scx_builtin_idle_enabled);
45004503
synchronize_rcu();
45014504

4502-
scx_cgroup_exit();
4503-
4504-
scx_cgroup_unlock();
4505-
cpus_read_unlock();
4506-
percpu_up_write(&scx_fork_rwsem);
4507-
45084505
if (ei->kind >= SCX_EXIT_ERROR) {
45094506
pr_err("sched_ext: BPF scheduler \"%s\" disabled (%s)\n",
45104507
scx_ops.name, ei->reason);

0 commit comments

Comments
 (0)