@@ -99,6 +99,7 @@ enum worker_flags {
99
99
100
100
enum work_cancel_flags {
101
101
WORK_CANCEL_DELAYED = 1 << 0 , /* canceling a delayed_work */
102
+ WORK_CANCEL_DISABLE = 1 << 1 , /* canceling to disable */
102
103
};
103
104
104
105
enum wq_internal_consts {
@@ -394,6 +395,7 @@ struct wq_pod_type {
394
395
395
396
struct work_offq_data {
396
397
u32 pool_id ;
398
+ u32 disable ;
397
399
u32 flags ;
398
400
};
399
401
@@ -908,12 +910,15 @@ static void work_offqd_unpack(struct work_offq_data *offqd, unsigned long data)
908
910
909
911
offqd -> pool_id = shift_and_mask (data , WORK_OFFQ_POOL_SHIFT ,
910
912
WORK_OFFQ_POOL_BITS );
913
+ offqd -> disable = shift_and_mask (data , WORK_OFFQ_DISABLE_SHIFT ,
914
+ WORK_OFFQ_DISABLE_BITS );
911
915
offqd -> flags = data & WORK_OFFQ_FLAG_MASK ;
912
916
}
913
917
914
918
static unsigned long work_offqd_pack_flags (struct work_offq_data * offqd )
915
919
{
916
- return (unsigned long )offqd -> flags ;
920
+ return ((unsigned long )offqd -> disable << WORK_OFFQ_DISABLE_SHIFT ) |
921
+ ((unsigned long )offqd -> flags );
917
922
}
918
923
919
924
static bool work_is_canceling (struct work_struct * work )
@@ -2408,6 +2413,21 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
2408
2413
rcu_read_unlock ();
2409
2414
}
2410
2415
2416
+ static bool clear_pending_if_disabled (struct work_struct * work )
2417
+ {
2418
+ unsigned long data = * work_data_bits (work );
2419
+ struct work_offq_data offqd ;
2420
+
2421
+ if (likely ((data & WORK_STRUCT_PWQ ) ||
2422
+ !(data & WORK_OFFQ_DISABLE_MASK )))
2423
+ return false;
2424
+
2425
+ work_offqd_unpack (& offqd , data );
2426
+ set_work_pool_and_clear_pending (work , offqd .pool_id ,
2427
+ work_offqd_pack_flags (& offqd ));
2428
+ return true;
2429
+ }
2430
+
2411
2431
/**
2412
2432
* queue_work_on - queue work on specific cpu
2413
2433
* @cpu: CPU number to execute work on
@@ -2430,7 +2450,8 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
2430
2450
2431
2451
local_irq_save (irq_flags );
2432
2452
2433
- if (!test_and_set_bit (WORK_STRUCT_PENDING_BIT , work_data_bits (work ))) {
2453
+ if (!test_and_set_bit (WORK_STRUCT_PENDING_BIT , work_data_bits (work )) &&
2454
+ !clear_pending_if_disabled (work )) {
2434
2455
__queue_work (cpu , wq , work );
2435
2456
ret = true;
2436
2457
}
@@ -2508,7 +2529,8 @@ bool queue_work_node(int node, struct workqueue_struct *wq,
2508
2529
2509
2530
local_irq_save (irq_flags );
2510
2531
2511
- if (!test_and_set_bit (WORK_STRUCT_PENDING_BIT , work_data_bits (work ))) {
2532
+ if (!test_and_set_bit (WORK_STRUCT_PENDING_BIT , work_data_bits (work )) &&
2533
+ !clear_pending_if_disabled (work )) {
2512
2534
int cpu = select_numa_node_cpu (node );
2513
2535
2514
2536
__queue_work (cpu , wq , work );
@@ -2590,7 +2612,8 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
2590
2612
/* read the comment in __queue_work() */
2591
2613
local_irq_save (irq_flags );
2592
2614
2593
- if (!test_and_set_bit (WORK_STRUCT_PENDING_BIT , work_data_bits (work ))) {
2615
+ if (!test_and_set_bit (WORK_STRUCT_PENDING_BIT , work_data_bits (work )) &&
2616
+ !clear_pending_if_disabled (work )) {
2594
2617
__queue_delayed_work (cpu , wq , dwork , delay );
2595
2618
ret = true;
2596
2619
}
@@ -2663,7 +2686,12 @@ bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
2663
2686
{
2664
2687
struct work_struct * work = & rwork -> work ;
2665
2688
2666
- if (!test_and_set_bit (WORK_STRUCT_PENDING_BIT , work_data_bits (work ))) {
2689
+ /*
2690
+ * rcu_work can't be canceled or disabled. Warn if the user reached
2691
+ * inside @rwork and disabled the inner work.
2692
+ */
2693
+ if (!test_and_set_bit (WORK_STRUCT_PENDING_BIT , work_data_bits (work )) &&
2694
+ !WARN_ON_ONCE (clear_pending_if_disabled (work ))) {
2667
2695
rwork -> wq = wq ;
2668
2696
call_rcu_hurry (& rwork -> rcu , rcu_work_rcufn );
2669
2697
return true;
@@ -4268,20 +4296,46 @@ bool flush_rcu_work(struct rcu_work *rwork)
4268
4296
}
4269
4297
EXPORT_SYMBOL (flush_rcu_work );
4270
4298
4299
+ static void work_offqd_disable (struct work_offq_data * offqd )
4300
+ {
4301
+ const unsigned long max = (1lu << WORK_OFFQ_DISABLE_BITS ) - 1 ;
4302
+
4303
+ if (likely (offqd -> disable < max ))
4304
+ offqd -> disable ++ ;
4305
+ else
4306
+ WARN_ONCE (true, "workqueue: work disable count overflowed\n" );
4307
+ }
4308
+
4309
+ static void work_offqd_enable (struct work_offq_data * offqd )
4310
+ {
4311
+ if (likely (offqd -> disable > 0 ))
4312
+ offqd -> disable -- ;
4313
+ else
4314
+ WARN_ONCE (true, "workqueue: work disable count underflowed\n" );
4315
+ }
4316
+
4271
4317
static bool __cancel_work (struct work_struct * work , u32 cflags )
4272
4318
{
4273
4319
struct work_offq_data offqd ;
4274
4320
unsigned long irq_flags ;
4275
4321
int ret ;
4276
4322
4277
- do {
4278
- ret = try_to_grab_pending (work , cflags , & irq_flags );
4279
- } while (unlikely (ret == - EAGAIN ));
4323
+ if (cflags & WORK_CANCEL_DISABLE ) {
4324
+ ret = work_grab_pending (work , cflags , & irq_flags );
4325
+ } else {
4326
+ do {
4327
+ ret = try_to_grab_pending (work , cflags , & irq_flags );
4328
+ } while (unlikely (ret == - EAGAIN ));
4280
4329
4281
- if (unlikely (ret < 0 ))
4282
- return false;
4330
+ if (unlikely (ret < 0 ))
4331
+ return false;
4332
+ }
4283
4333
4284
4334
work_offqd_unpack (& offqd , * work_data_bits (work ));
4335
+
4336
+ if (cflags & WORK_CANCEL_DISABLE )
4337
+ work_offqd_disable (& offqd );
4338
+
4285
4339
set_work_pool_and_clear_pending (work , offqd .pool_id ,
4286
4340
work_offqd_pack_flags (& offqd ));
4287
4341
local_irq_restore (irq_flags );
@@ -4298,6 +4352,10 @@ static bool __cancel_work_sync(struct work_struct *work, u32 cflags)
4298
4352
ret = work_grab_pending (work , cflags , & irq_flags );
4299
4353
4300
4354
work_offqd_unpack (& offqd , * work_data_bits (work ));
4355
+
4356
+ if (cflags & WORK_CANCEL_DISABLE )
4357
+ work_offqd_disable (& offqd );
4358
+
4301
4359
offqd .flags |= WORK_OFFQ_CANCELING ;
4302
4360
set_work_pool_and_keep_pending (work , offqd .pool_id ,
4303
4361
work_offqd_pack_flags (& offqd ));
@@ -4397,6 +4455,105 @@ bool cancel_delayed_work_sync(struct delayed_work *dwork)
4397
4455
}
4398
4456
EXPORT_SYMBOL (cancel_delayed_work_sync );
4399
4457
4458
+ /**
4459
+ * disable_work - Disable and cancel a work item
4460
+ * @work: work item to disable
4461
+ *
4462
+ * Disable @work by incrementing its disable count and cancel it if currently
4463
+ * pending. As long as the disable count is non-zero, any attempt to queue @work
4464
+ * will fail and return %false. The maximum supported disable depth is 2 to the
4465
+ * power of %WORK_OFFQ_DISABLE_BITS, currently 65536.
4466
+ *
4467
+ * Must be called from a sleepable context. Returns %true if @work was pending,
4468
+ * %false otherwise.
4469
+ */
4470
+ bool disable_work (struct work_struct * work )
4471
+ {
4472
+ return __cancel_work (work , WORK_CANCEL_DISABLE );
4473
+ }
4474
+ EXPORT_SYMBOL_GPL (disable_work );
4475
+
4476
+ /**
4477
+ * disable_work_sync - Disable, cancel and drain a work item
4478
+ * @work: work item to disable
4479
+ *
4480
+ * Similar to disable_work() but also wait for @work to finish if currently
4481
+ * executing.
4482
+ *
4483
+ * Must be called from a sleepable context. Returns %true if @work was pending,
4484
+ * %false otherwise.
4485
+ */
4486
+ bool disable_work_sync (struct work_struct * work )
4487
+ {
4488
+ return __cancel_work_sync (work , WORK_CANCEL_DISABLE );
4489
+ }
4490
+ EXPORT_SYMBOL_GPL (disable_work_sync );
4491
+
4492
+ /**
4493
+ * enable_work - Enable a work item
4494
+ * @work: work item to enable
4495
+ *
4496
+ * Undo disable_work[_sync]() by decrementing @work's disable count. @work can
4497
+ * only be queued if its disable count is 0.
4498
+ *
4499
+ * Must be called from a sleepable context. Returns %true if the disable count
4500
+ * reached 0. Otherwise, %false.
4501
+ */
4502
+ bool enable_work (struct work_struct * work )
4503
+ {
4504
+ struct work_offq_data offqd ;
4505
+ unsigned long irq_flags ;
4506
+
4507
+ work_grab_pending (work , 0 , & irq_flags );
4508
+
4509
+ work_offqd_unpack (& offqd , * work_data_bits (work ));
4510
+ work_offqd_enable (& offqd );
4511
+ set_work_pool_and_clear_pending (work , offqd .pool_id ,
4512
+ work_offqd_pack_flags (& offqd ));
4513
+ local_irq_restore (irq_flags );
4514
+
4515
+ return !offqd .disable ;
4516
+ }
4517
+ EXPORT_SYMBOL_GPL (enable_work );
4518
+
4519
+ /**
4520
+ * disable_delayed_work - Disable and cancel a delayed work item
4521
+ * @dwork: delayed work item to disable
4522
+ *
4523
+ * disable_work() for delayed work items.
4524
+ */
4525
+ bool disable_delayed_work (struct delayed_work * dwork )
4526
+ {
4527
+ return __cancel_work (& dwork -> work ,
4528
+ WORK_CANCEL_DELAYED | WORK_CANCEL_DISABLE );
4529
+ }
4530
+ EXPORT_SYMBOL_GPL (disable_delayed_work );
4531
+
4532
+ /**
4533
+ * disable_delayed_work_sync - Disable, cancel and drain a delayed work item
4534
+ * @dwork: delayed work item to disable
4535
+ *
4536
+ * disable_work_sync() for delayed work items.
4537
+ */
4538
+ bool disable_delayed_work_sync (struct delayed_work * dwork )
4539
+ {
4540
+ return __cancel_work_sync (& dwork -> work ,
4541
+ WORK_CANCEL_DELAYED | WORK_CANCEL_DISABLE );
4542
+ }
4543
+ EXPORT_SYMBOL_GPL (disable_delayed_work_sync );
4544
+
4545
+ /**
4546
+ * enable_delayed_work - Enable a delayed work item
4547
+ * @dwork: delayed work item to enable
4548
+ *
4549
+ * enable_work() for delayed work items.
4550
+ */
4551
+ bool enable_delayed_work (struct delayed_work * dwork )
4552
+ {
4553
+ return enable_work (& dwork -> work );
4554
+ }
4555
+ EXPORT_SYMBOL_GPL (enable_delayed_work );
4556
+
4400
4557
/**
4401
4558
* schedule_on_each_cpu - execute a function synchronously on each online CPU
4402
4559
* @func: the function to call
0 commit comments