@@ -308,7 +308,7 @@ struct workqueue_struct {
308
308
int saved_max_active ; /* WQ: saved max_active */
309
309
310
310
struct workqueue_attrs * unbound_attrs ; /* PW: only for unbound wqs */
311
- struct pool_workqueue * dfl_pwq ; /* PW: only for unbound wqs */
311
+ struct pool_workqueue __rcu * dfl_pwq ; /* PW: only for unbound wqs */
312
312
313
313
#ifdef CONFIG_SYSFS
314
314
struct wq_device * wq_dev ; /* I: for sysfs interface */
@@ -639,6 +639,23 @@ static int worker_pool_assign_id(struct worker_pool *pool)
639
639
return ret ;
640
640
}
641
641
642
+ static struct pool_workqueue __rcu * *
643
+ unbound_pwq_slot (struct workqueue_struct * wq , int cpu )
644
+ {
645
+ if (cpu >= 0 )
646
+ return per_cpu_ptr (wq -> cpu_pwq , cpu );
647
+ else
648
+ return & wq -> dfl_pwq ;
649
+ }
650
+
651
+ /* @cpu < 0 for dfl_pwq */
652
+ static struct pool_workqueue * unbound_pwq (struct workqueue_struct * wq , int cpu )
653
+ {
654
+ return rcu_dereference_check (* unbound_pwq_slot (wq , cpu ),
655
+ lockdep_is_held (& wq_pool_mutex ) ||
656
+ lockdep_is_held (& wq -> mutex ));
657
+ }
658
+
642
659
static unsigned int work_color_to_flags (int color )
643
660
{
644
661
return color << WORK_STRUCT_COLOR_SHIFT ;
@@ -4328,10 +4345,11 @@ static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu,
4328
4345
"possible intersect\n" );
4329
4346
}
4330
4347
4331
- /* install @pwq into @wq's cpu_pwq and return the old pwq */
4348
+ /* install @pwq into @wq and return the old pwq, @cpu < 0 for dfl_pwq */
4332
4349
static struct pool_workqueue * install_unbound_pwq (struct workqueue_struct * wq ,
4333
4350
int cpu , struct pool_workqueue * pwq )
4334
4351
{
4352
+ struct pool_workqueue __rcu * * slot = unbound_pwq_slot (wq , cpu );
4335
4353
struct pool_workqueue * old_pwq ;
4336
4354
4337
4355
lockdep_assert_held (& wq_pool_mutex );
@@ -4340,8 +4358,8 @@ static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq,
4340
4358
/* link_pwq() can handle duplicate calls */
4341
4359
link_pwq (pwq );
4342
4360
4343
- old_pwq = rcu_access_pointer (* per_cpu_ptr ( wq -> cpu_pwq , cpu ) );
4344
- rcu_assign_pointer (* per_cpu_ptr ( wq -> cpu_pwq , cpu ) , pwq );
4361
+ old_pwq = rcu_access_pointer (* slot );
4362
+ rcu_assign_pointer (* slot , pwq );
4345
4363
return old_pwq ;
4346
4364
}
4347
4365
@@ -4441,14 +4459,11 @@ static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
4441
4459
4442
4460
copy_workqueue_attrs (ctx -> wq -> unbound_attrs , ctx -> attrs );
4443
4461
4444
- /* save the previous pwq and install the new one */
4462
+ /* save the previous pwqs and install the new ones */
4445
4463
for_each_possible_cpu (cpu )
4446
4464
ctx -> pwq_tbl [cpu ] = install_unbound_pwq (ctx -> wq , cpu ,
4447
4465
ctx -> pwq_tbl [cpu ]);
4448
-
4449
- /* @dfl_pwq might not have been used, ensure it's linked */
4450
- link_pwq (ctx -> dfl_pwq );
4451
- swap (ctx -> wq -> dfl_pwq , ctx -> dfl_pwq );
4466
+ ctx -> dfl_pwq = install_unbound_pwq (ctx -> wq , -1 , ctx -> dfl_pwq );
4452
4467
4453
4468
mutex_unlock (& ctx -> wq -> mutex );
4454
4469
}
@@ -4558,9 +4573,7 @@ static void wq_update_pod(struct workqueue_struct *wq, int cpu,
4558
4573
4559
4574
/* nothing to do if the target cpumask matches the current pwq */
4560
4575
wq_calc_pod_cpumask (target_attrs , cpu , off_cpu );
4561
- pwq = rcu_dereference_protected (* per_cpu_ptr (wq -> cpu_pwq , cpu ),
4562
- lockdep_is_held (& wq_pool_mutex ));
4563
- if (wqattrs_equal (target_attrs , pwq -> pool -> attrs ))
4576
+ if (wqattrs_equal (target_attrs , unbound_pwq (wq , cpu )-> pool -> attrs ))
4564
4577
return ;
4565
4578
4566
4579
/* create a new pwq */
@@ -4578,10 +4591,11 @@ static void wq_update_pod(struct workqueue_struct *wq, int cpu,
4578
4591
4579
4592
use_dfl_pwq :
4580
4593
mutex_lock (& wq -> mutex );
4581
- raw_spin_lock_irq (& wq -> dfl_pwq -> pool -> lock );
4582
- get_pwq (wq -> dfl_pwq );
4583
- raw_spin_unlock_irq (& wq -> dfl_pwq -> pool -> lock );
4584
- old_pwq = install_unbound_pwq (wq , cpu , wq -> dfl_pwq );
4594
+ pwq = unbound_pwq (wq , -1 );
4595
+ raw_spin_lock_irq (& pwq -> pool -> lock );
4596
+ get_pwq (pwq );
4597
+ raw_spin_unlock_irq (& pwq -> pool -> lock );
4598
+ old_pwq = install_unbound_pwq (wq , cpu , pwq );
4585
4599
out_unlock :
4586
4600
mutex_unlock (& wq -> mutex );
4587
4601
put_pwq_unlocked (old_pwq );
@@ -4619,10 +4633,13 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4619
4633
4620
4634
cpus_read_lock ();
4621
4635
if (wq -> flags & __WQ_ORDERED ) {
4636
+ struct pool_workqueue * dfl_pwq ;
4637
+
4622
4638
ret = apply_workqueue_attrs (wq , ordered_wq_attrs [highpri ]);
4623
4639
/* there should only be single pwq for ordering guarantee */
4624
- WARN (!ret && (wq -> pwqs .next != & wq -> dfl_pwq -> pwqs_node ||
4625
- wq -> pwqs .prev != & wq -> dfl_pwq -> pwqs_node ),
4640
+ dfl_pwq = rcu_access_pointer (wq -> dfl_pwq );
4641
+ WARN (!ret && (wq -> pwqs .next != & dfl_pwq -> pwqs_node ||
4642
+ wq -> pwqs .prev != & dfl_pwq -> pwqs_node ),
4626
4643
"ordering guarantee broken for workqueue %s\n" , wq -> name );
4627
4644
} else {
4628
4645
ret = apply_workqueue_attrs (wq , unbound_std_wq_attrs [highpri ]);
@@ -4856,7 +4873,7 @@ static bool pwq_busy(struct pool_workqueue *pwq)
4856
4873
if (pwq -> nr_in_flight [i ])
4857
4874
return true;
4858
4875
4859
- if ((pwq != pwq -> wq -> dfl_pwq ) && (pwq -> refcnt > 1 ))
4876
+ if ((pwq != rcu_access_pointer ( pwq -> wq -> dfl_pwq ) ) && (pwq -> refcnt > 1 ))
4860
4877
return true;
4861
4878
if (!pwq_is_empty (pwq ))
4862
4879
return true;
@@ -4940,13 +4957,12 @@ void destroy_workqueue(struct workqueue_struct *wq)
4940
4957
rcu_read_lock ();
4941
4958
4942
4959
for_each_possible_cpu (cpu ) {
4943
- pwq = rcu_access_pointer (* per_cpu_ptr (wq -> cpu_pwq , cpu ));
4944
- RCU_INIT_POINTER (* per_cpu_ptr (wq -> cpu_pwq , cpu ), NULL );
4945
- put_pwq_unlocked (pwq );
4960
+ put_pwq_unlocked (unbound_pwq (wq , cpu ));
4961
+ RCU_INIT_POINTER (* unbound_pwq_slot (wq , cpu ), NULL );
4946
4962
}
4947
4963
4948
- put_pwq_unlocked (wq -> dfl_pwq );
4949
- wq -> dfl_pwq = NULL ;
4964
+ put_pwq_unlocked (unbound_pwq ( wq , -1 ) );
4965
+ RCU_INIT_POINTER ( * unbound_pwq_slot ( wq , -1 ), NULL ) ;
4950
4966
4951
4967
rcu_read_unlock ();
4952
4968
}
0 commit comments