@@ -1276,76 +1276,86 @@ struct scx_task_iter {
12761276};
12771277
12781278/**
1279- * scx_task_iter_init - Initialize a task iterator
1279+ * scx_task_iter_start - Lock scx_tasks_lock and start a task iteration
12801280 * @iter: iterator to init
12811281 *
1282- * Initialize @iter. Must be called with scx_tasks_lock held. Once initialized,
1283- * @iter must eventually be exited with scx_task_iter_exit ().
1282+ * Initialize @iter and return with scx_tasks_lock held. Once initialized, @iter
1283+ * must eventually be stopped with scx_task_iter_stop ().
12841284 *
1285- * scx_tasks_lock may be released between this and the first next() call or
1286- * between any two next() calls. If scx_tasks_lock is released between two
1287- * next() calls, the caller is responsible for ensuring that the task being
1288- * iterated remains accessible either through RCU read lock or obtaining a
1289- * reference count.
1285+ * scx_tasks_lock and the rq lock may be released using scx_task_iter_unlock()
1286+ * between this and the first next() call or between any two next() calls. If
1287+ * the locks are released between two next() calls, the caller is responsible
1288+ * for ensuring that the task being iterated remains accessible either through
1289+ * RCU read lock or obtaining a reference count.
12901290 *
12911291 * All tasks which existed when the iteration started are guaranteed to be
12921292 * visited as long as they still exist.
12931293 */
1294- static void scx_task_iter_init (struct scx_task_iter * iter )
1294+ static void scx_task_iter_start (struct scx_task_iter * iter )
12951295{
1296- lockdep_assert_held (& scx_tasks_lock );
1297-
12981296 BUILD_BUG_ON (__SCX_DSQ_ITER_ALL_FLAGS &
12991297 ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT ) - 1 ));
13001298
1299+ spin_lock_irq (& scx_tasks_lock );
1300+
13011301 iter -> cursor = (struct sched_ext_entity ){ .flags = SCX_TASK_CURSOR };
13021302 list_add (& iter -> cursor .tasks_node , & scx_tasks );
13031303 iter -> locked = NULL ;
13041304}
13051305
1306+ static void __scx_task_iter_rq_unlock (struct scx_task_iter * iter )
1307+ {
1308+ if (iter -> locked ) {
1309+ task_rq_unlock (iter -> rq , iter -> locked , & iter -> rf );
1310+ iter -> locked = NULL ;
1311+ }
1312+ }
1313+
13061314/**
1307- * scx_task_iter_rq_unlock - Unlock rq locked by a task iterator
1308- * @iter: iterator to unlock rq for
1315+ * scx_task_iter_unlock - Unlock rq and scx_tasks_lock held by a task iterator
1316+ * @iter: iterator to unlock
13091317 *
13101318 * If @iter is in the middle of a locked iteration, it may be locking the rq of
1311- * the task currently being visited. Unlock the rq if so. This function can be
1312- * safely called anytime during an iteration.
1319+ * the task currently being visited in addition to scx_tasks_lock. Unlock both.
1320+ * This function can be safely called anytime during an iteration.
1321+ */
1322+ static void scx_task_iter_unlock (struct scx_task_iter * iter )
1323+ {
1324+ __scx_task_iter_rq_unlock (iter );
1325+ spin_unlock_irq (& scx_tasks_lock );
1326+ }
1327+
1328+ /**
1329+ * scx_task_iter_relock - Lock scx_tasks_lock released by scx_task_iter_unlock()
1330+ * @iter: iterator to re-lock
13131331 *
1314- * Returns %true if the rq @iter was locking is unlocked. %false if @iter was
1315- * not locking an rq.
1332+ * Re-lock scx_tasks_lock unlocked by scx_task_iter_unlock(). Note that it
1333+ * doesn't re-lock the rq lock. Must be called before other iterator operations .
13161334 */
1317- static bool scx_task_iter_rq_unlock (struct scx_task_iter * iter )
1335+ static void scx_task_iter_relock (struct scx_task_iter * iter )
13181336{
1319- if (iter -> locked ) {
1320- task_rq_unlock (iter -> rq , iter -> locked , & iter -> rf );
1321- iter -> locked = NULL ;
1322- return true;
1323- } else {
1324- return false;
1325- }
1337+ spin_lock_irq (& scx_tasks_lock );
13261338}
13271339
13281340/**
1329- * scx_task_iter_exit - Exit a task iterator
1341+ * scx_task_iter_stop - Stop a task iteration and unlock scx_tasks_lock
13301342 * @iter: iterator to exit
13311343 *
1332- * Exit a previously initialized @iter. Must be called with scx_tasks_lock held.
1333- * If the iterator holds a task's rq lock, that rq lock is released. See
1334- * scx_task_iter_init () for details.
1344+ * Exit a previously initialized @iter. Must be called with scx_tasks_lock held
1345+ * which is released on return. If the iterator holds a task's rq lock, that rq
1346+ * lock is also released. See scx_task_iter_start () for details.
13351347 */
1336- static void scx_task_iter_exit (struct scx_task_iter * iter )
1348+ static void scx_task_iter_stop (struct scx_task_iter * iter )
13371349{
1338- lockdep_assert_held (& scx_tasks_lock );
1339-
1340- scx_task_iter_rq_unlock (iter );
13411350 list_del_init (& iter -> cursor .tasks_node );
1351+ scx_task_iter_unlock (iter );
13421352}
13431353
13441354/**
13451355 * scx_task_iter_next - Next task
13461356 * @iter: iterator to walk
13471357 *
1348- * Visit the next task. See scx_task_iter_init () for details.
1358+ * Visit the next task. See scx_task_iter_start () for details.
13491359 */
13501360static struct task_struct * scx_task_iter_next (struct scx_task_iter * iter )
13511361{
@@ -1373,14 +1383,14 @@ static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
13731383 * @include_dead: Whether we should include dead tasks in the iteration
13741384 *
13751385 * Visit the non-idle task with its rq lock held. Allows callers to specify
1376- * whether they would like to filter out dead tasks. See scx_task_iter_init ()
1386+ * whether they would like to filter out dead tasks. See scx_task_iter_start ()
13771387 * for details.
13781388 */
13791389static struct task_struct * scx_task_iter_next_locked (struct scx_task_iter * iter )
13801390{
13811391 struct task_struct * p ;
13821392
1383- scx_task_iter_rq_unlock (iter );
1393+ __scx_task_iter_rq_unlock (iter );
13841394
13851395 while ((p = scx_task_iter_next (iter ))) {
13861396 /*
@@ -4462,8 +4472,7 @@ static void scx_ops_disable_workfn(struct kthread_work *work)
44624472
44634473 scx_ops_init_task_enabled = false;
44644474
4465- spin_lock_irq (& scx_tasks_lock );
4466- scx_task_iter_init (& sti );
4475+ scx_task_iter_start (& sti );
44674476 while ((p = scx_task_iter_next_locked (& sti ))) {
44684477 const struct sched_class * old_class = p -> sched_class ;
44694478 struct sched_enq_and_set_ctx ctx ;
@@ -4478,8 +4487,7 @@ static void scx_ops_disable_workfn(struct kthread_work *work)
44784487 check_class_changed (task_rq (p ), p , old_class , p -> prio );
44794488 scx_ops_exit_task (p );
44804489 }
4481- scx_task_iter_exit (& sti );
4482- spin_unlock_irq (& scx_tasks_lock );
4490+ scx_task_iter_stop (& sti );
44834491 percpu_up_write (& scx_fork_rwsem );
44844492
44854493 /* no task is on scx, turn off all the switches and flush in-progress calls */
@@ -5130,8 +5138,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
51305138 if (ret )
51315139 goto err_disable_unlock_all ;
51325140
5133- spin_lock_irq (& scx_tasks_lock );
5134- scx_task_iter_init (& sti );
5141+ scx_task_iter_start (& sti );
51355142 while ((p = scx_task_iter_next_locked (& sti ))) {
51365143 /*
51375144 * @p may already be dead, have lost all its usages counts and
@@ -5141,15 +5148,13 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
51415148 if (!tryget_task_struct (p ))
51425149 continue ;
51435150
5144- scx_task_iter_rq_unlock (& sti );
5145- spin_unlock_irq (& scx_tasks_lock );
5151+ scx_task_iter_unlock (& sti );
51465152
51475153 ret = scx_ops_init_task (p , task_group (p ), false);
51485154 if (ret ) {
51495155 put_task_struct (p );
5150- spin_lock_irq (& scx_tasks_lock );
5151- scx_task_iter_exit (& sti );
5152- spin_unlock_irq (& scx_tasks_lock );
5156+ scx_task_iter_relock (& sti );
5157+ scx_task_iter_stop (& sti );
51535158 scx_ops_error ("ops.init_task() failed (%d) for %s[%d]" ,
51545159 ret , p -> comm , p -> pid );
51555160 goto err_disable_unlock_all ;
@@ -5158,10 +5163,9 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
51585163 scx_set_task_state (p , SCX_TASK_READY );
51595164
51605165 put_task_struct (p );
5161- spin_lock_irq ( & scx_tasks_lock );
5166+ scx_task_iter_relock ( & sti );
51625167 }
5163- scx_task_iter_exit (& sti );
5164- spin_unlock_irq (& scx_tasks_lock );
5168+ scx_task_iter_stop (& sti );
51655169 scx_cgroup_unlock ();
51665170 percpu_up_write (& scx_fork_rwsem );
51675171
@@ -5178,8 +5182,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
51785182 * scx_tasks_lock.
51795183 */
51805184 percpu_down_write (& scx_fork_rwsem );
5181- spin_lock_irq (& scx_tasks_lock );
5182- scx_task_iter_init (& sti );
5185+ scx_task_iter_start (& sti );
51835186 while ((p = scx_task_iter_next_locked (& sti ))) {
51845187 const struct sched_class * old_class = p -> sched_class ;
51855188 struct sched_enq_and_set_ctx ctx ;
@@ -5194,8 +5197,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
51945197
51955198 check_class_changed (task_rq (p ), p , old_class , p -> prio );
51965199 }
5197- scx_task_iter_exit (& sti );
5198- spin_unlock_irq (& scx_tasks_lock );
5200+ scx_task_iter_stop (& sti );
51995201 percpu_up_write (& scx_fork_rwsem );
52005202
52015203 scx_ops_bypass (false);
0 commit comments