Skip to content

Commit 989442d

Browse files
Lai Jiangshanhtejun
authored andcommitted
workqueue: Move the code of waking a worker up in unbind_workers()
In unbind_workers(), there are two pool->lock held sections separated by the code of zapping nr_running. wake_up_worker() needs to be in pool->lock held section and after zapping nr_running. And zapping nr_running had to be after schedule() when the local wake up functionality was in use. Now, the call to schedule() has been removed along with the local wake up functionality, so the code can be merged into the same pool->lock held section. The diffstat shows that it is other code moved down because the diff tools can not know the meaning of merging lock sections by swapping two code blocks. Signed-off-by: Lai Jiangshan <[email protected]> Signed-off-by: Tejun Heo <[email protected]>
1 parent b4ac938 commit 989442d

File tree

1 file changed

+15
-23
lines changed

1 file changed

+15
-23
lines changed

kernel/workqueue.c

Lines changed: 15 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1810,14 +1810,8 @@ static void worker_enter_idle(struct worker *worker)
18101810
if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
18111811
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
18121812

1813-
/*
1814-
* Sanity check nr_running. Because unbind_workers() releases
1815-
* pool->lock between setting %WORKER_UNBOUND and zapping
1816-
* nr_running, the warning may trigger spuriously. Check iff
1817-
* unbind is not in progress.
1818-
*/
1819-
WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1820-
pool->nr_workers == pool->nr_idle &&
1813+
/* Sanity check nr_running. */
1814+
WARN_ON_ONCE(pool->nr_workers == pool->nr_idle &&
18211815
atomic_read(&pool->nr_running));
18221816
}
18231817

@@ -4988,21 +4982,12 @@ static void unbind_workers(int cpu)
49884982

49894983
pool->flags |= POOL_DISASSOCIATED;
49904984

4991-
raw_spin_unlock_irq(&pool->lock);
4992-
4993-
for_each_pool_worker(worker, pool) {
4994-
kthread_set_per_cpu(worker->task, -1);
4995-
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
4996-
}
4997-
4998-
mutex_unlock(&wq_pool_attach_mutex);
4999-
50004985
/*
5001-
* Sched callbacks are disabled now. Zap nr_running.
5002-
* After this, nr_running stays zero and need_more_worker()
5003-
* and keep_working() are always true as long as the
5004-
* worklist is not empty. This pool now behaves as an
5005-
* unbound (in terms of concurrency management) pool which
4986+
* The handling of nr_running in sched callbacks are disabled
4987+
* now. Zap nr_running. After this, nr_running stays zero and
4988+
* need_more_worker() and keep_working() are always true as
4989+
* long as the worklist is not empty. This pool now behaves as
4990+
* an unbound (in terms of concurrency management) pool which
50064991
* are served by workers tied to the pool.
50074992
*/
50084993
atomic_set(&pool->nr_running, 0);
@@ -5012,9 +4997,16 @@ static void unbind_workers(int cpu)
50124997
* worker blocking could lead to lengthy stalls. Kick off
50134998
* unbound chain execution of currently pending work items.
50144999
*/
5015-
raw_spin_lock_irq(&pool->lock);
50165000
wake_up_worker(pool);
5001+
50175002
raw_spin_unlock_irq(&pool->lock);
5003+
5004+
for_each_pool_worker(worker, pool) {
5005+
kthread_set_per_cpu(worker->task, -1);
5006+
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
5007+
}
5008+
5009+
mutex_unlock(&wq_pool_attach_mutex);
50185010
}
50195011
}
50205012

0 commit comments

Comments
 (0)