12
12
13
13
struct rcu_tasks ;
14
14
typedef void (* rcu_tasks_gp_func_t )(struct rcu_tasks * rtp );
15
+ typedef void (* pregp_func_t )(void );
16
+ typedef void (* pertask_func_t )(struct task_struct * t , struct list_head * hop );
17
+ typedef void (* postscan_func_t )(void );
18
+ typedef void (* holdouts_func_t )(struct list_head * hop , bool ndrpt , bool * frptp );
19
+ typedef void (* postgp_func_t )(void );
15
20
16
21
/**
17
22
* Definition for a Tasks-RCU-like mechanism.
@@ -21,6 +26,11 @@ typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
21
26
* @cbs_lock: Lock protecting callback list.
22
27
* @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
23
28
* @gp_func: This flavor's grace-period-wait function.
29
+ * @pregp_func: This flavor's pre-grace-period function (optional).
30
+ * @pertask_func: This flavor's per-task scan function (optional).
31
+ * @postscan_func: This flavor's post-task scan function (optional).
32
+ * @holdout_func: This flavor's holdout-list scan function (optional).
33
+ * @postgp_func: This flavor's post-grace-period function (optional).
24
34
* @call_func: This flavor's call_rcu()-equivalent function.
25
35
* @name: This flavor's textual name.
26
36
* @kname: This flavor's kthread name.
@@ -32,6 +42,11 @@ struct rcu_tasks {
32
42
raw_spinlock_t cbs_lock ;
33
43
struct task_struct * kthread_ptr ;
34
44
rcu_tasks_gp_func_t gp_func ;
45
+ pregp_func_t pregp_func ;
46
+ pertask_func_t pertask_func ;
47
+ postscan_func_t postscan_func ;
48
+ holdouts_func_t holdouts_func ;
49
+ postgp_func_t postgp_func ;
35
50
call_rcu_func_t call_func ;
36
51
char * name ;
37
52
char * kname ;
@@ -113,6 +128,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
113
128
114
129
/* Pick up any new callbacks. */
115
130
raw_spin_lock_irqsave (& rtp -> cbs_lock , flags );
131
+ smp_mb__after_unlock_lock (); // Order updates vs. GP.
116
132
list = rtp -> cbs_head ;
117
133
rtp -> cbs_head = NULL ;
118
134
rtp -> cbs_tail = & rtp -> cbs_head ;
@@ -207,6 +223,49 @@ static void __init rcu_tasks_bootup_oddness(void)
207
223
// rates from multiple CPUs. If this is required, per-CPU callback lists
208
224
// will be needed.
209
225
226
+ /* Pre-grace-period preparation. */
227
+ static void rcu_tasks_pregp_step (void )
228
+ {
229
+ /*
230
+ * Wait for all pre-existing t->on_rq and t->nvcsw transitions
231
+ * to complete. Invoking synchronize_rcu() suffices because all
232
+ * these transitions occur with interrupts disabled. Without this
233
+ * synchronize_rcu(), a read-side critical section that started
234
+ * before the grace period might be incorrectly seen as having
235
+ * started after the grace period.
236
+ *
237
+ * This synchronize_rcu() also dispenses with the need for a
238
+ * memory barrier on the first store to t->rcu_tasks_holdout,
239
+ * as it forces the store to happen after the beginning of the
240
+ * grace period.
241
+ */
242
+ synchronize_rcu ();
243
+ }
244
+
245
+ /* Per-task initial processing. */
246
+ static void rcu_tasks_pertask (struct task_struct * t , struct list_head * hop )
247
+ {
248
+ if (t != current && READ_ONCE (t -> on_rq ) && !is_idle_task (t )) {
249
+ get_task_struct (t );
250
+ t -> rcu_tasks_nvcsw = READ_ONCE (t -> nvcsw );
251
+ WRITE_ONCE (t -> rcu_tasks_holdout , true);
252
+ list_add (& t -> rcu_tasks_holdout_list , hop );
253
+ }
254
+ }
255
+
256
+ /* Processing between scanning taskslist and draining the holdout list. */
257
+ void rcu_tasks_postscan (void )
258
+ {
259
+ /*
260
+ * Wait for tasks that are in the process of exiting. This
261
+ * does only part of the job, ensuring that all tasks that were
262
+ * previously exiting reach the point where they have disabled
263
+ * preemption, allowing the later synchronize_rcu() to finish
264
+ * the job.
265
+ */
266
+ synchronize_srcu (& tasks_rcu_exit_srcu );
267
+ }
268
+
210
269
/* See if tasks are still holding out, complain if so. */
211
270
static void check_holdout_task (struct task_struct * t ,
212
271
bool needreport , bool * firstreport )
@@ -239,55 +298,63 @@ static void check_holdout_task(struct task_struct *t,
239
298
sched_show_task (t );
240
299
}
241
300
301
+ /* Scan the holdout lists for tasks no longer holding out. */
302
+ static void check_all_holdout_tasks (struct list_head * hop ,
303
+ bool needreport , bool * firstreport )
304
+ {
305
+ struct task_struct * t , * t1 ;
306
+
307
+ list_for_each_entry_safe (t , t1 , hop , rcu_tasks_holdout_list ) {
308
+ check_holdout_task (t , needreport , firstreport );
309
+ cond_resched ();
310
+ }
311
+ }
312
+
313
+ /* Finish off the Tasks-RCU grace period. */
314
+ static void rcu_tasks_postgp (void )
315
+ {
316
+ /*
317
+ * Because ->on_rq and ->nvcsw are not guaranteed to have a full
318
+ * memory barriers prior to them in the schedule() path, memory
319
+ * reordering on other CPUs could cause their RCU-tasks read-side
320
+ * critical sections to extend past the end of the grace period.
321
+ * However, because these ->nvcsw updates are carried out with
322
+ * interrupts disabled, we can use synchronize_rcu() to force the
323
+ * needed ordering on all such CPUs.
324
+ *
325
+ * This synchronize_rcu() also confines all ->rcu_tasks_holdout
326
+ * accesses to be within the grace period, avoiding the need for
327
+ * memory barriers for ->rcu_tasks_holdout accesses.
328
+ *
329
+ * In addition, this synchronize_rcu() waits for exiting tasks
330
+ * to complete their final preempt_disable() region of execution,
331
+ * cleaning up after the synchronize_srcu() above.
332
+ */
333
+ synchronize_rcu ();
334
+ }
335
+
242
336
/* Wait for one RCU-tasks grace period. */
243
337
static void rcu_tasks_wait_gp (struct rcu_tasks * rtp )
244
338
{
245
339
struct task_struct * g , * t ;
246
340
unsigned long lastreport ;
247
- LIST_HEAD (rcu_tasks_holdouts );
341
+ LIST_HEAD (holdouts );
248
342
int fract ;
249
343
250
- /*
251
- * Wait for all pre-existing t->on_rq and t->nvcsw transitions
252
- * to complete. Invoking synchronize_rcu() suffices because all
253
- * these transitions occur with interrupts disabled. Without this
254
- * synchronize_rcu(), a read-side critical section that started
255
- * before the grace period might be incorrectly seen as having
256
- * started after the grace period.
257
- *
258
- * This synchronize_rcu() also dispenses with the need for a
259
- * memory barrier on the first store to t->rcu_tasks_holdout,
260
- * as it forces the store to happen after the beginning of the
261
- * grace period.
262
- */
263
- synchronize_rcu ();
344
+ rtp -> pregp_func ();
264
345
265
346
/*
266
347
* There were callbacks, so we need to wait for an RCU-tasks
267
348
* grace period. Start off by scanning the task list for tasks
268
349
* that are not already voluntarily blocked. Mark these tasks
269
- * and make a list of them in rcu_tasks_holdouts .
350
+ * and make a list of them in holdouts .
270
351
*/
271
352
rcu_read_lock ();
272
- for_each_process_thread (g , t ) {
273
- if (t != current && READ_ONCE (t -> on_rq ) && !is_idle_task (t )) {
274
- get_task_struct (t );
275
- t -> rcu_tasks_nvcsw = READ_ONCE (t -> nvcsw );
276
- WRITE_ONCE (t -> rcu_tasks_holdout , true);
277
- list_add (& t -> rcu_tasks_holdout_list ,
278
- & rcu_tasks_holdouts );
279
- }
280
- }
353
+ for_each_process_thread (g , t )
354
+ rtp -> pertask_func (t , & holdouts );
281
355
rcu_read_unlock ();
282
356
283
- /*
284
- * Wait for tasks that are in the process of exiting. This
285
- * does only part of the job, ensuring that all tasks that were
286
- * previously exiting reach the point where they have disabled
287
- * preemption, allowing the later synchronize_rcu() to finish
288
- * the job.
289
- */
290
- synchronize_srcu (& tasks_rcu_exit_srcu );
357
+ rtp -> postscan_func ();
291
358
292
359
/*
293
360
* Each pass through the following loop scans the list of holdout
@@ -303,9 +370,8 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
303
370
bool firstreport ;
304
371
bool needreport ;
305
372
int rtst ;
306
- struct task_struct * t1 ;
307
373
308
- if (list_empty (& rcu_tasks_holdouts ))
374
+ if (list_empty (& holdouts ))
309
375
break ;
310
376
311
377
/* Slowly back off waiting for holdouts */
@@ -320,31 +386,10 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
320
386
lastreport = jiffies ;
321
387
firstreport = true;
322
388
WARN_ON (signal_pending (current ));
323
- list_for_each_entry_safe (t , t1 , & rcu_tasks_holdouts ,
324
- rcu_tasks_holdout_list ) {
325
- check_holdout_task (t , needreport , & firstreport );
326
- cond_resched ();
327
- }
389
+ rtp -> holdouts_func (& holdouts , needreport , & firstreport );
328
390
}
329
391
330
- /*
331
- * Because ->on_rq and ->nvcsw are not guaranteed to have a full
332
- * memory barriers prior to them in the schedule() path, memory
333
- * reordering on other CPUs could cause their RCU-tasks read-side
334
- * critical sections to extend past the end of the grace period.
335
- * However, because these ->nvcsw updates are carried out with
336
- * interrupts disabled, we can use synchronize_rcu() to force the
337
- * needed ordering on all such CPUs.
338
- *
339
- * This synchronize_rcu() also confines all ->rcu_tasks_holdout
340
- * accesses to be within the grace period, avoiding the need for
341
- * memory barriers for ->rcu_tasks_holdout accesses.
342
- *
343
- * In addition, this synchronize_rcu() waits for exiting tasks
344
- * to complete their final preempt_disable() region of execution,
345
- * cleaning up after the synchronize_srcu() above.
346
- */
347
- synchronize_rcu ();
392
+ rtp -> postgp_func ();
348
393
}
349
394
350
395
void call_rcu_tasks (struct rcu_head * rhp , rcu_callback_t func );
@@ -413,6 +458,11 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
413
458
414
459
static int __init rcu_spawn_tasks_kthread (void )
415
460
{
461
+ rcu_tasks .pregp_func = rcu_tasks_pregp_step ;
462
+ rcu_tasks .pertask_func = rcu_tasks_pertask ;
463
+ rcu_tasks .postscan_func = rcu_tasks_postscan ;
464
+ rcu_tasks .holdouts_func = check_all_holdout_tasks ;
465
+ rcu_tasks .postgp_func = rcu_tasks_postgp ;
416
466
rcu_spawn_tasks_kthread_generic (& rcu_tasks );
417
467
return 0 ;
418
468
}
0 commit comments