Skip to content

Commit cb2c84b

Browse files
committed
Merge tag 'wq-for-6.11-rc4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue fixes from Tejun Heo: "Nothing too interesting. One patch to remove spurious warning and others to address static checker warnings" * tag 'wq-for-6.11-rc4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: Correct declaration of cpu_pwq in struct workqueue_struct workqueue: Fix spruious data race in __flush_work() workqueue: Remove incorrect "WARN_ON_ONCE(!list_empty(&worker->entry));" from dying worker workqueue: Fix UBSAN 'subtraction overflow' error in shift_and_mask() workqueue: doc: Fix function name, remove markers
2 parents 5bd6cf0 + c4c8f36 commit cb2c84b

File tree

2 files changed

+28
-24
lines changed

2 files changed

+28
-24
lines changed

Documentation/core-api/workqueue.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -260,7 +260,7 @@ Some users depend on strict execution ordering where only one work item
260260
is in flight at any given time and the work items are processed in
261261
queueing order. While the combination of ``@max_active`` of 1 and
262262
``WQ_UNBOUND`` used to achieve this behavior, this is no longer the
263-
case. Use ``alloc_ordered_queue()`` instead.
263+
case. Use alloc_ordered_workqueue() instead.
264264

265265

266266
Example Execution Scenarios

kernel/workqueue.c

Lines changed: 27 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -377,7 +377,7 @@ struct workqueue_struct {
377377

378378
/* hot fields used during command issue, aligned to cacheline */
379379
unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
380-
struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */
380+
struct pool_workqueue __rcu * __percpu *cpu_pwq; /* I: per-cpu pwqs */
381381
struct wq_node_nr_active *node_nr_active[]; /* I: per-node nr_active */
382382
};
383383

@@ -897,7 +897,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work)
897897

898898
static unsigned long shift_and_mask(unsigned long v, u32 shift, u32 bits)
899899
{
900-
return (v >> shift) & ((1 << bits) - 1);
900+
return (v >> shift) & ((1U << bits) - 1);
901901
}
902902

903903
static void work_offqd_unpack(struct work_offq_data *offqd, unsigned long data)
@@ -3351,7 +3351,6 @@ static int worker_thread(void *__worker)
33513351
set_pf_worker(false);
33523352

33533353
ida_free(&pool->worker_ida, worker->id);
3354-
WARN_ON_ONCE(!list_empty(&worker->entry));
33553354
return 0;
33563355
}
33573356

@@ -4167,7 +4166,6 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
41674166
static bool __flush_work(struct work_struct *work, bool from_cancel)
41684167
{
41694168
struct wq_barrier barr;
4170-
unsigned long data;
41714169

41724170
if (WARN_ON(!wq_online))
41734171
return false;
@@ -4185,29 +4183,35 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
41854183
* was queued on a BH workqueue, we also know that it was running in the
41864184
* BH context and thus can be busy-waited.
41874185
*/
4188-
data = *work_data_bits(work);
4189-
if (from_cancel &&
4190-
!WARN_ON_ONCE(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_BH)) {
4191-
/*
4192-
* On RT, prevent a live lock when %current preempted soft
4193-
* interrupt processing or prevents ksoftirqd from running by
4194-
* keeping flipping BH. If the BH work item runs on a different
4195-
* CPU then this has no effect other than doing the BH
4196-
* disable/enable dance for nothing. This is copied from
4197-
* kernel/softirq.c::tasklet_unlock_spin_wait().
4198-
*/
4199-
while (!try_wait_for_completion(&barr.done)) {
4200-
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
4201-
local_bh_disable();
4202-
local_bh_enable();
4203-
} else {
4204-
cpu_relax();
4186+
if (from_cancel) {
4187+
unsigned long data = *work_data_bits(work);
4188+
4189+
if (!WARN_ON_ONCE(data & WORK_STRUCT_PWQ) &&
4190+
(data & WORK_OFFQ_BH)) {
4191+
/*
4192+
* On RT, prevent a live lock when %current preempted
4193+
* soft interrupt processing or prevents ksoftirqd from
4194+
* running by keeping flipping BH. If the BH work item
4195+
* runs on a different CPU then this has no effect other
4196+
* than doing the BH disable/enable dance for nothing.
4197+
* This is copied from
4198+
* kernel/softirq.c::tasklet_unlock_spin_wait().
4199+
*/
4200+
while (!try_wait_for_completion(&barr.done)) {
4201+
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
4202+
local_bh_disable();
4203+
local_bh_enable();
4204+
} else {
4205+
cpu_relax();
4206+
}
42054207
}
4208+
goto out_destroy;
42064209
}
4207-
} else {
4208-
wait_for_completion(&barr.done);
42094210
}
42104211

4212+
wait_for_completion(&barr.done);
4213+
4214+
out_destroy:
42114215
destroy_work_on_stack(&barr.work);
42124216
return true;
42134217
}

0 commit comments

Comments
 (0)