Skip to content

Commit 2c1aca4

Browse files
committed
Merge branch 'for-5.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue fixes from Tejun Heo: "Workqueue has been incorrectly round-robining per-cpu work items. Hillf's patch fixes that. The other patch documents memory-ordering properties of workqueue operations" * 'for-5.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: don't use wq_select_unbound_cpu() for bound works workqueue: Document (some) memory-ordering properties of {queue,schedule}_work()
2 parents 30bb557 + aa202f1 commit 2c1aca4

File tree

2 files changed

+24
-6
lines changed

2 files changed

+24
-6
lines changed

include/linux/workqueue.h

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -487,6 +487,19 @@ extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
487487
*
488488
* We queue the work to the CPU on which it was submitted, but if the CPU dies
489489
* it can be processed by another CPU.
490+
*
491+
* Memory-ordering properties: If it returns %true, guarantees that all stores
492+
* preceding the call to queue_work() in the program order will be visible from
493+
* the CPU which will execute @work by the time such work executes, e.g.,
494+
*
495+
* { x is initially 0 }
496+
*
497+
* CPU0 CPU1
498+
*
499+
* WRITE_ONCE(x, 1); [ @work is being executed ]
500+
* r0 = queue_work(wq, work); r1 = READ_ONCE(x);
501+
*
502+
* Forbids: r0 == true && r1 == 0
490503
*/
491504
static inline bool queue_work(struct workqueue_struct *wq,
492505
struct work_struct *work)
@@ -546,6 +559,9 @@ static inline bool schedule_work_on(int cpu, struct work_struct *work)
546559
* This puts a job in the kernel-global workqueue if it was not already
547560
* queued and leaves it in the same position on the kernel-global
548561
* workqueue otherwise.
562+
*
563+
* Shares the same memory-ordering properties of queue_work(), cf. the
564+
* DocBook header of queue_work().
549565
*/
550566
static inline bool schedule_work(struct work_struct *work)
551567
{

kernel/workqueue.c

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1411,14 +1411,16 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
14111411
return;
14121412
rcu_read_lock();
14131413
retry:
1414-
if (req_cpu == WORK_CPU_UNBOUND)
1415-
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1416-
14171414
/* pwq which will be used unless @work is executing elsewhere */
1418-
if (!(wq->flags & WQ_UNBOUND))
1419-
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1420-
else
1415+
if (wq->flags & WQ_UNBOUND) {
1416+
if (req_cpu == WORK_CPU_UNBOUND)
1417+
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
14211418
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1419+
} else {
1420+
if (req_cpu == WORK_CPU_UNBOUND)
1421+
cpu = raw_smp_processor_id();
1422+
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1423+
}
14221424

14231425
/*
14241426
* If @work was previously on a different pool, it might still be

0 commit comments

Comments
 (0)