Skip to content

Commit 2b60145

Browse files
committed
Merge tag 'wq-for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue updates from Tejun Heo: - Rescuer affinity management: Affinity is now updated only when detached using wq_unbound_cpumask consistently. DISASSOCIATED workers also follow unbound cpumask changes to avoid breaking CPU isolation - Rescuer cleanups preparing for fetching work items one by one from pool list: Work assignment factored out, optimized to skip pwqs no longer needing rescue, and shutdown logic simplified - Unused assert_rcu_or_wq_mutex_or_pool_mutex() removed * tag 'wq-for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: Don't rely on wq->rescuer to stop rescuer workqueue: Only assign rescuer work when really needed workqueue: Factor out assign_rescuer_work() workqueue: Init rescuer's affinities as wq_unbound_cpumask workqueue: Let DISASSOCIATED workers follow unbound wq cpumask changes workqueue: Update the rescuer's affinity only when it is detached workqueue: Remove unused assert_rcu_or_wq_mutex_or_pool_mutex
2 parents 4d38b88 + 6d90215 commit 2b60145

File tree

1 file changed

+50
-36
lines changed

1 file changed

+50
-36
lines changed

kernel/workqueue.c

Lines changed: 50 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -541,12 +541,6 @@ static void show_one_worker_pool(struct worker_pool *pool);
541541
!lockdep_is_held(&wq_pool_mutex), \
542542
"RCU or wq_pool_mutex should be held")
543543

544-
#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
545-
RCU_LOCKDEP_WARN(!rcu_read_lock_any_held() && \
546-
!lockdep_is_held(&wq->mutex) && \
547-
!lockdep_is_held(&wq_pool_mutex), \
548-
"RCU, wq->mutex or wq_pool_mutex should be held")
549-
550544
#define for_each_bh_worker_pool(pool, cpu) \
551545
for ((pool) = &per_cpu(bh_worker_pools, cpu)[0]; \
552546
(pool) < &per_cpu(bh_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
@@ -3443,6 +3437,27 @@ static int worker_thread(void *__worker)
34433437
goto woke_up;
34443438
}
34453439

3440+
static bool assign_rescuer_work(struct pool_workqueue *pwq, struct worker *rescuer)
3441+
{
3442+
struct worker_pool *pool = pwq->pool;
3443+
struct work_struct *work, *n;
3444+
3445+
/* need rescue? */
3446+
if (!pwq->nr_active || !need_to_create_worker(pool))
3447+
return false;
3448+
3449+
/*
3450+
* Slurp in all works issued via this workqueue and
3451+
* process'em.
3452+
*/
3453+
list_for_each_entry_safe(work, n, &pool->worklist, entry) {
3454+
if (get_work_pwq(work) == pwq && assign_work(work, rescuer, &n))
3455+
pwq->stats[PWQ_STAT_RESCUED]++;
3456+
}
3457+
3458+
return !list_empty(&rescuer->scheduled);
3459+
}
3460+
34463461
/**
34473462
* rescuer_thread - the rescuer thread function
34483463
* @__rescuer: self
@@ -3497,7 +3512,6 @@ static int rescuer_thread(void *__rescuer)
34973512
struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
34983513
struct pool_workqueue, mayday_node);
34993514
struct worker_pool *pool = pwq->pool;
3500-
struct work_struct *work, *n;
35013515

35023516
__set_current_state(TASK_RUNNING);
35033517
list_del_init(&pwq->mayday_node);
@@ -3508,18 +3522,9 @@ static int rescuer_thread(void *__rescuer)
35083522

35093523
raw_spin_lock_irq(&pool->lock);
35103524

3511-
/*
3512-
* Slurp in all works issued via this workqueue and
3513-
* process'em.
3514-
*/
35153525
WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
3516-
list_for_each_entry_safe(work, n, &pool->worklist, entry) {
3517-
if (get_work_pwq(work) == pwq &&
3518-
assign_work(work, rescuer, &n))
3519-
pwq->stats[PWQ_STAT_RESCUED]++;
3520-
}
35213526

3522-
if (!list_empty(&rescuer->scheduled)) {
3527+
if (assign_rescuer_work(pwq, rescuer)) {
35233528
process_scheduled_works(rescuer);
35243529

35253530
/*
@@ -3534,10 +3539,9 @@ static int rescuer_thread(void *__rescuer)
35343539
if (pwq->nr_active && need_to_create_worker(pool)) {
35353540
raw_spin_lock(&wq_mayday_lock);
35363541
/*
3537-
* Queue iff we aren't racing destruction
3538-
* and somebody else hasn't queued it already.
3542+
* Queue iff somebody else hasn't queued it already.
35393543
*/
3540-
if (wq->rescuer && list_empty(&pwq->mayday_node)) {
3544+
if (list_empty(&pwq->mayday_node)) {
35413545
get_pwq(pwq);
35423546
list_add_tail(&pwq->mayday_node, &wq->maydays);
35433547
}
@@ -5376,11 +5380,6 @@ static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
53765380
/* update node_nr_active->max */
53775381
wq_update_node_max_active(ctx->wq, -1);
53785382

5379-
/* rescuer needs to respect wq cpumask changes */
5380-
if (ctx->wq->rescuer)
5381-
set_cpus_allowed_ptr(ctx->wq->rescuer->task,
5382-
unbound_effective_cpumask(ctx->wq));
5383-
53845383
mutex_unlock(&ctx->wq->mutex);
53855384
}
53865385

@@ -5614,10 +5613,13 @@ static int init_rescuer(struct workqueue_struct *wq)
56145613
}
56155614

56165615
wq->rescuer = rescuer;
5617-
if (wq->flags & WQ_UNBOUND)
5618-
kthread_bind_mask(rescuer->task, unbound_effective_cpumask(wq));
5616+
5617+
/* initial cpumask is consistent with the detached rescuer and unbind_worker() */
5618+
if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
5619+
kthread_bind_mask(rescuer->task, wq_unbound_cpumask);
56195620
else
56205621
kthread_bind_mask(rescuer->task, cpu_possible_mask);
5622+
56215623
wake_up_process(rescuer->task);
56225624

56235625
return 0;
@@ -5902,16 +5904,10 @@ void destroy_workqueue(struct workqueue_struct *wq)
59025904

59035905
/* kill rescuer, if sanity checks fail, leave it w/o rescuer */
59045906
if (wq->rescuer) {
5905-
struct worker *rescuer = wq->rescuer;
5906-
5907-
/* this prevents new queueing */
5908-
raw_spin_lock_irq(&wq_mayday_lock);
5909-
wq->rescuer = NULL;
5910-
raw_spin_unlock_irq(&wq_mayday_lock);
5911-
59125907
/* rescuer will empty maydays list before exiting */
5913-
kthread_stop(rescuer->task);
5914-
kfree(rescuer);
5908+
kthread_stop(wq->rescuer->task);
5909+
kfree(wq->rescuer);
5910+
wq->rescuer = NULL;
59155911
}
59165912

59175913
/*
@@ -6937,8 +6933,26 @@ static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
69376933
}
69386934

69396935
if (!ret) {
6936+
int cpu;
6937+
struct worker_pool *pool;
6938+
struct worker *worker;
6939+
69406940
mutex_lock(&wq_pool_attach_mutex);
69416941
cpumask_copy(wq_unbound_cpumask, unbound_cpumask);
6942+
/* rescuer needs to respect cpumask changes when it is not attached */
6943+
list_for_each_entry(wq, &workqueues, list) {
6944+
if (wq->rescuer && !wq->rescuer->pool)
6945+
unbind_worker(wq->rescuer);
6946+
}
6947+
/* DISASSOCIATED worker needs to respect wq_unbound_cpumask */
6948+
for_each_possible_cpu(cpu) {
6949+
for_each_cpu_worker_pool(pool, cpu) {
6950+
if (!(pool->flags & POOL_DISASSOCIATED))
6951+
continue;
6952+
for_each_pool_worker(worker, pool)
6953+
unbind_worker(worker);
6954+
}
6955+
}
69426956
mutex_unlock(&wq_pool_attach_mutex);
69436957
}
69446958
return ret;

0 commit comments

Comments
 (0)