@@ -180,6 +180,9 @@ static void __init rcu_tasks_bootup_oddness(void)
180
180
else
181
181
pr_info ("\tTasks RCU enabled.\n" );
182
182
#endif /* #ifdef CONFIG_TASKS_RCU */
183
+ #ifdef CONFIG_TASKS_RUDE_RCU
184
+ pr_info ("\tRude variant of Tasks RCU enabled.\n" );
185
+ #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
183
186
}
184
187
185
188
#endif /* #ifndef CONFIG_TINY_RCU */
@@ -410,3 +413,98 @@ static int __init rcu_spawn_tasks_kthread(void)
410
413
core_initcall (rcu_spawn_tasks_kthread );
411
414
412
415
#endif /* #ifdef CONFIG_TASKS_RCU */
416
+
417
+ #ifdef CONFIG_TASKS_RUDE_RCU
418
+
419
+ ////////////////////////////////////////////////////////////////////////
420
+ //
421
+ // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
422
+ // passing an empty function to schedule_on_each_cpu(). This approach
423
+ // provides an asynchronous call_rcu_tasks_rude() API and batching
424
+ // of concurrent calls to the synchronous synchronize_rcu_rude() API.
425
+ // This sends IPIs far and wide and induces otherwise unnecessary context
426
+ // switches on all online CPUs, whether idle or not.
427
+
428
+ // Empty function to allow workqueues to force a context switch.
429
+ static void rcu_tasks_be_rude (struct work_struct * work )
430
+ {
431
+ }
432
+
433
+ // Wait for one rude RCU-tasks grace period.
434
+ static void rcu_tasks_rude_wait_gp (struct rcu_tasks * rtp )
435
+ {
436
+ schedule_on_each_cpu (rcu_tasks_be_rude );
437
+ }
438
+
439
+ void call_rcu_tasks_rude (struct rcu_head * rhp , rcu_callback_t func );
440
+ DEFINE_RCU_TASKS (rcu_tasks_rude , rcu_tasks_rude_wait_gp , call_rcu_tasks_rude );
441
+
442
+ /**
443
+ * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
444
+ * @rhp: structure to be used for queueing the RCU updates.
445
+ * @func: actual callback function to be invoked after the grace period
446
+ *
447
+ * The callback function will be invoked some time after a full grace
448
+ * period elapses, in other words after all currently executing RCU
449
+ * read-side critical sections have completed. call_rcu_tasks_rude()
450
+ * assumes that the read-side critical sections end at context switch,
451
+ * cond_resched_rcu_qs(), or transition to usermode execution. As such,
452
+ * there are no read-side primitives analogous to rcu_read_lock() and
453
+ * rcu_read_unlock() because this primitive is intended to determine
454
+ * that all tasks have passed through a safe state, not so much for
455
+ * data-strcuture synchronization.
456
+ *
457
+ * See the description of call_rcu() for more detailed information on
458
+ * memory ordering guarantees.
459
+ */
460
+ void call_rcu_tasks_rude (struct rcu_head * rhp , rcu_callback_t func )
461
+ {
462
+ call_rcu_tasks_generic (rhp , func , & rcu_tasks_rude );
463
+ }
464
+ EXPORT_SYMBOL_GPL (call_rcu_tasks_rude );
465
+
466
+ /**
467
+ * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
468
+ *
469
+ * Control will return to the caller some time after a rude rcu-tasks
470
+ * grace period has elapsed, in other words after all currently
471
+ * executing rcu-tasks read-side critical sections have elapsed. These
472
+ * read-side critical sections are delimited by calls to schedule(),
473
+ * cond_resched_tasks_rcu_qs(), userspace execution, and (in theory,
474
+ * anyway) cond_resched().
475
+ *
476
+ * This is a very specialized primitive, intended only for a few uses in
477
+ * tracing and other situations requiring manipulation of function preambles
478
+ * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
479
+ * (yet) intended for heavy use from multiple CPUs.
480
+ *
481
+ * See the description of synchronize_rcu() for more detailed information
482
+ * on memory ordering guarantees.
483
+ */
484
+ void synchronize_rcu_tasks_rude (void )
485
+ {
486
+ synchronize_rcu_tasks_generic (& rcu_tasks_rude );
487
+ }
488
+ EXPORT_SYMBOL_GPL (synchronize_rcu_tasks_rude );
489
+
490
+ /**
491
+ * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
492
+ *
493
+ * Although the current implementation is guaranteed to wait, it is not
494
+ * obligated to, for example, if there are no pending callbacks.
495
+ */
496
+ void rcu_barrier_tasks_rude (void )
497
+ {
498
+ /* There is only one callback queue, so this is easy. ;-) */
499
+ synchronize_rcu_tasks_rude ();
500
+ }
501
+ EXPORT_SYMBOL_GPL (rcu_barrier_tasks_rude );
502
+
503
+ static int __init rcu_spawn_tasks_rude_kthread (void )
504
+ {
505
+ rcu_spawn_tasks_kthread_generic (& rcu_tasks_rude );
506
+ return 0 ;
507
+ }
508
+ core_initcall (rcu_spawn_tasks_rude_kthread );
509
+
510
+ #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
0 commit comments