Skip to content

Commit efe231d

Browse files
committed
sched_ext: Decouple locks in scx_ops_enable()
The enable path uses three big locks - scx_fork_rwsem, scx_cgroup_rwsem and cpus_read_lock. Currently, the locks are grabbed together which is prone to locking order problems. For example, currently, there is a possible deadlock involving scx_fork_rwsem and cpus_read_lock. cpus_read_lock has to nest inside scx_fork_rwsem due to locking order existing in other subsystems. However, there exists a dependency in the other direction during hotplug if hotplug needs to fork a new task, which happens in some cases. This leads to the following deadlock: scx_ops_enable() hotplug percpu_down_write(&cpu_hotplug_lock) percpu_down_write(&scx_fork_rwsem) block on cpu_hotplug_lock kthread_create() waits for kthreadd kthreadd blocks on scx_fork_rwsem Note that this doesn't trigger lockdep because the hotplug side dependency bounces through kthreadd. With the preceding scx_cgroup_enabled change, this can be solved by decoupling cpus_read_lock, which is needed for static_key manipulations, from the other two locks. - Move the first block of static_key manipulations outside of scx_fork_rwsem and scx_cgroup_rwsem. This is now safe with the preceding scx_cgroup_enabled change. - Drop scx_cgroup_rwsem and scx_fork_rwsem between the two task iteration blocks so that __scx_ops_enabled static_key enabling is outside the two rwsems. Signed-off-by: Tejun Heo <[email protected]> Reported-and-tested-by: Aboorva Devarajan <[email protected]> Link: http://lkml.kernel.org/r/[email protected]
1 parent 1602165 commit efe231d

File tree

1 file changed

+27
-40
lines changed

1 file changed

+27
-40
lines changed

kernel/sched/ext.c

Lines changed: 27 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -5049,7 +5049,8 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
50495049
ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init);
50505050
if (ret) {
50515051
ret = ops_sanitize_err("init", ret);
5052-
goto err_disable_unlock_cpus;
5052+
cpus_read_unlock();
5053+
goto err_disable;
50535054
}
50545055
}
50555056

@@ -5092,54 +5093,30 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
50925093
*/
50935094
scx_ops_bypass(true);
50945095

5095-
/*
5096-
* Lock out forks, cgroup on/offlining and moves before opening the
5097-
* floodgate so that they don't wander into the operations prematurely.
5098-
*
5099-
* We don't need to keep the CPUs stable but static_branch_*() requires
5100-
* cpus_read_lock() and scx_cgroup_rwsem must nest inside
5101-
* cpu_hotplug_lock because of the following dependency chain:
5102-
*
5103-
* cpu_hotplug_lock --> cgroup_threadgroup_rwsem --> scx_cgroup_rwsem
5104-
*
5105-
* So, we need to do cpus_read_lock() before scx_cgroup_lock() and use
5106-
* static_branch_*_cpuslocked().
5107-
*
5108-
* Note that cpu_hotplug_lock must nest inside scx_fork_rwsem due to the
5109-
* following dependency chain:
5110-
*
5111-
* scx_fork_rwsem --> pernet_ops_rwsem --> cpu_hotplug_lock
5112-
*/
5113-
percpu_down_write(&scx_fork_rwsem);
5114-
cpus_read_lock();
5115-
scx_cgroup_lock();
5116-
51175096
for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
51185097
if (((void (**)(void))ops)[i])
5119-
static_branch_enable_cpuslocked(&scx_has_op[i]);
5098+
static_branch_enable(&scx_has_op[i]);
51205099

51215100
if (ops->flags & SCX_OPS_ENQ_LAST)
5122-
static_branch_enable_cpuslocked(&scx_ops_enq_last);
5101+
static_branch_enable(&scx_ops_enq_last);
51235102

51245103
if (ops->flags & SCX_OPS_ENQ_EXITING)
5125-
static_branch_enable_cpuslocked(&scx_ops_enq_exiting);
5104+
static_branch_enable(&scx_ops_enq_exiting);
51265105
if (scx_ops.cpu_acquire || scx_ops.cpu_release)
5127-
static_branch_enable_cpuslocked(&scx_ops_cpu_preempt);
5106+
static_branch_enable(&scx_ops_cpu_preempt);
51285107

51295108
if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
51305109
reset_idle_masks();
5131-
static_branch_enable_cpuslocked(&scx_builtin_idle_enabled);
5110+
static_branch_enable(&scx_builtin_idle_enabled);
51325111
} else {
5133-
static_branch_disable_cpuslocked(&scx_builtin_idle_enabled);
5112+
static_branch_disable(&scx_builtin_idle_enabled);
51345113
}
51355114

51365115
/*
5137-
* All cgroups should be initialized before letting in tasks. cgroup
5138-
* on/offlining and task migrations are already locked out.
5116+
* Lock out forks, cgroup on/offlining and moves before opening the
5117+
* floodgate so that they don't wander into the operations prematurely.
51395118
*/
5140-
ret = scx_cgroup_init();
5141-
if (ret)
5142-
goto err_disable_unlock_all;
5119+
percpu_down_write(&scx_fork_rwsem);
51435120

51445121
WARN_ON_ONCE(scx_ops_init_task_enabled);
51455122
scx_ops_init_task_enabled = true;
@@ -5150,7 +5127,18 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
51505127
* leaving as sched_ext_free() can handle both prepped and enabled
51515128
* tasks. Prep all tasks first and then enable them with preemption
51525129
* disabled.
5130+
*
5131+
* All cgroups should be initialized before scx_ops_init_task() so that
5132+
* the BPF scheduler can reliably track each task's cgroup membership
5133+
* from scx_ops_init_task(). Lock out cgroup on/offlining and task
5134+
* migrations while tasks are being initialized so that
5135+
* scx_cgroup_can_attach() never sees uninitialized tasks.
51535136
*/
5137+
scx_cgroup_lock();
5138+
ret = scx_cgroup_init();
5139+
if (ret)
5140+
goto err_disable_unlock_all;
5141+
51545142
spin_lock_irq(&scx_tasks_lock);
51555143
scx_task_iter_init(&sti);
51565144
while ((p = scx_task_iter_next_locked(&sti))) {
@@ -5183,19 +5171,22 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
51835171
}
51845172
scx_task_iter_exit(&sti);
51855173
spin_unlock_irq(&scx_tasks_lock);
5174+
scx_cgroup_unlock();
5175+
percpu_up_write(&scx_fork_rwsem);
51865176

51875177
/*
51885178
* All tasks are READY. It's safe to turn on scx_enabled() and switch
51895179
* all eligible tasks.
51905180
*/
51915181
WRITE_ONCE(scx_switching_all, !(ops->flags & SCX_OPS_SWITCH_PARTIAL));
5192-
static_branch_enable_cpuslocked(&__scx_ops_enabled);
5182+
static_branch_enable(&__scx_ops_enabled);
51935183

51945184
/*
51955185
* We're fully committed and can't fail. The task READY -> ENABLED
51965186
* transitions here are synchronized against sched_ext_free() through
51975187
* scx_tasks_lock.
51985188
*/
5189+
percpu_down_write(&scx_fork_rwsem);
51995190
spin_lock_irq(&scx_tasks_lock);
52005191
scx_task_iter_init(&sti);
52015192
while ((p = scx_task_iter_next_locked(&sti))) {
@@ -5213,10 +5204,8 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
52135204
}
52145205
scx_task_iter_exit(&sti);
52155206
spin_unlock_irq(&scx_tasks_lock);
5216-
5217-
scx_cgroup_unlock();
5218-
cpus_read_unlock();
52195207
percpu_up_write(&scx_fork_rwsem);
5208+
52205209
scx_ops_bypass(false);
52215210

52225211
/*
@@ -5259,8 +5248,6 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
52595248
scx_cgroup_unlock();
52605249
percpu_up_write(&scx_fork_rwsem);
52615250
scx_ops_bypass(false);
5262-
err_disable_unlock_cpus:
5263-
cpus_read_unlock();
52645251
err_disable:
52655252
mutex_unlock(&scx_ops_enable_mutex);
52665253
/* must be fully disabled before returning */

0 commit comments

Comments
 (0)