Skip to content

Commit e4fe5dd

Browse files
committed
rcu-tasks: Further refactor RCU-tasks to allow adding more variants
This commit refactors RCU tasks to allow variants to be added. These variants will share the current Tasks-RCU tasklist scan and the holdout list processing. Signed-off-by: Paul E. McKenney <[email protected]>
1 parent c97d12a commit e4fe5dd

File tree

1 file changed

+108
-58
lines changed

1 file changed

+108
-58
lines changed

kernel/rcu/tasks.h

Lines changed: 108 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,11 @@
1212

1313
struct rcu_tasks;
1414
typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
15+
typedef void (*pregp_func_t)(void);
16+
typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
17+
typedef void (*postscan_func_t)(void);
18+
typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
19+
typedef void (*postgp_func_t)(void);
1520

1621
/**
1722
* Definition for a Tasks-RCU-like mechanism.
@@ -21,6 +26,11 @@ typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
2126
* @cbs_lock: Lock protecting callback list.
2227
* @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
2328
* @gp_func: This flavor's grace-period-wait function.
29+
* @pregp_func: This flavor's pre-grace-period function (optional).
30+
* @pertask_func: This flavor's per-task scan function (optional).
31+
* @postscan_func: This flavor's post-task scan function (optional).
32+
* @holdout_func: This flavor's holdout-list scan function (optional).
33+
* @postgp_func: This flavor's post-grace-period function (optional).
2434
* @call_func: This flavor's call_rcu()-equivalent function.
2535
* @name: This flavor's textual name.
2636
* @kname: This flavor's kthread name.
@@ -32,6 +42,11 @@ struct rcu_tasks {
3242
raw_spinlock_t cbs_lock;
3343
struct task_struct *kthread_ptr;
3444
rcu_tasks_gp_func_t gp_func;
45+
pregp_func_t pregp_func;
46+
pertask_func_t pertask_func;
47+
postscan_func_t postscan_func;
48+
holdouts_func_t holdouts_func;
49+
postgp_func_t postgp_func;
3550
call_rcu_func_t call_func;
3651
char *name;
3752
char *kname;
@@ -113,6 +128,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
113128

114129
/* Pick up any new callbacks. */
115130
raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
131+
smp_mb__after_unlock_lock(); // Order updates vs. GP.
116132
list = rtp->cbs_head;
117133
rtp->cbs_head = NULL;
118134
rtp->cbs_tail = &rtp->cbs_head;
@@ -207,6 +223,49 @@ static void __init rcu_tasks_bootup_oddness(void)
207223
// rates from multiple CPUs. If this is required, per-CPU callback lists
208224
// will be needed.
209225

226+
/* Pre-grace-period preparation. */
227+
static void rcu_tasks_pregp_step(void)
228+
{
229+
/*
230+
* Wait for all pre-existing t->on_rq and t->nvcsw transitions
231+
* to complete. Invoking synchronize_rcu() suffices because all
232+
* these transitions occur with interrupts disabled. Without this
233+
* synchronize_rcu(), a read-side critical section that started
234+
* before the grace period might be incorrectly seen as having
235+
* started after the grace period.
236+
*
237+
* This synchronize_rcu() also dispenses with the need for a
238+
* memory barrier on the first store to t->rcu_tasks_holdout,
239+
* as it forces the store to happen after the beginning of the
240+
* grace period.
241+
*/
242+
synchronize_rcu();
243+
}
244+
245+
/* Per-task initial processing. */
246+
static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
247+
{
248+
if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
249+
get_task_struct(t);
250+
t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
251+
WRITE_ONCE(t->rcu_tasks_holdout, true);
252+
list_add(&t->rcu_tasks_holdout_list, hop);
253+
}
254+
}
255+
256+
/* Processing between scanning taskslist and draining the holdout list. */
257+
void rcu_tasks_postscan(void)
258+
{
259+
/*
260+
* Wait for tasks that are in the process of exiting. This
261+
* does only part of the job, ensuring that all tasks that were
262+
* previously exiting reach the point where they have disabled
263+
* preemption, allowing the later synchronize_rcu() to finish
264+
* the job.
265+
*/
266+
synchronize_srcu(&tasks_rcu_exit_srcu);
267+
}
268+
210269
/* See if tasks are still holding out, complain if so. */
211270
static void check_holdout_task(struct task_struct *t,
212271
bool needreport, bool *firstreport)
@@ -239,55 +298,63 @@ static void check_holdout_task(struct task_struct *t,
239298
sched_show_task(t);
240299
}
241300

301+
/* Scan the holdout lists for tasks no longer holding out. */
302+
static void check_all_holdout_tasks(struct list_head *hop,
303+
bool needreport, bool *firstreport)
304+
{
305+
struct task_struct *t, *t1;
306+
307+
list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
308+
check_holdout_task(t, needreport, firstreport);
309+
cond_resched();
310+
}
311+
}
312+
313+
/* Finish off the Tasks-RCU grace period. */
314+
static void rcu_tasks_postgp(void)
315+
{
316+
/*
317+
* Because ->on_rq and ->nvcsw are not guaranteed to have a full
318+
* memory barriers prior to them in the schedule() path, memory
319+
* reordering on other CPUs could cause their RCU-tasks read-side
320+
* critical sections to extend past the end of the grace period.
321+
* However, because these ->nvcsw updates are carried out with
322+
* interrupts disabled, we can use synchronize_rcu() to force the
323+
* needed ordering on all such CPUs.
324+
*
325+
* This synchronize_rcu() also confines all ->rcu_tasks_holdout
326+
* accesses to be within the grace period, avoiding the need for
327+
* memory barriers for ->rcu_tasks_holdout accesses.
328+
*
329+
* In addition, this synchronize_rcu() waits for exiting tasks
330+
* to complete their final preempt_disable() region of execution,
331+
* cleaning up after the synchronize_srcu() above.
332+
*/
333+
synchronize_rcu();
334+
}
335+
242336
/* Wait for one RCU-tasks grace period. */
243337
static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
244338
{
245339
struct task_struct *g, *t;
246340
unsigned long lastreport;
247-
LIST_HEAD(rcu_tasks_holdouts);
341+
LIST_HEAD(holdouts);
248342
int fract;
249343

250-
/*
251-
* Wait for all pre-existing t->on_rq and t->nvcsw transitions
252-
* to complete. Invoking synchronize_rcu() suffices because all
253-
* these transitions occur with interrupts disabled. Without this
254-
* synchronize_rcu(), a read-side critical section that started
255-
* before the grace period might be incorrectly seen as having
256-
* started after the grace period.
257-
*
258-
* This synchronize_rcu() also dispenses with the need for a
259-
* memory barrier on the first store to t->rcu_tasks_holdout,
260-
* as it forces the store to happen after the beginning of the
261-
* grace period.
262-
*/
263-
synchronize_rcu();
344+
rtp->pregp_func();
264345

265346
/*
266347
* There were callbacks, so we need to wait for an RCU-tasks
267348
* grace period. Start off by scanning the task list for tasks
268349
* that are not already voluntarily blocked. Mark these tasks
269-
* and make a list of them in rcu_tasks_holdouts.
350+
* and make a list of them in holdouts.
270351
*/
271352
rcu_read_lock();
272-
for_each_process_thread(g, t) {
273-
if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
274-
get_task_struct(t);
275-
t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
276-
WRITE_ONCE(t->rcu_tasks_holdout, true);
277-
list_add(&t->rcu_tasks_holdout_list,
278-
&rcu_tasks_holdouts);
279-
}
280-
}
353+
for_each_process_thread(g, t)
354+
rtp->pertask_func(t, &holdouts);
281355
rcu_read_unlock();
282356

283-
/*
284-
* Wait for tasks that are in the process of exiting. This
285-
* does only part of the job, ensuring that all tasks that were
286-
* previously exiting reach the point where they have disabled
287-
* preemption, allowing the later synchronize_rcu() to finish
288-
* the job.
289-
*/
290-
synchronize_srcu(&tasks_rcu_exit_srcu);
357+
rtp->postscan_func();
291358

292359
/*
293360
* Each pass through the following loop scans the list of holdout
@@ -303,9 +370,8 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
303370
bool firstreport;
304371
bool needreport;
305372
int rtst;
306-
struct task_struct *t1;
307373

308-
if (list_empty(&rcu_tasks_holdouts))
374+
if (list_empty(&holdouts))
309375
break;
310376

311377
/* Slowly back off waiting for holdouts */
@@ -320,31 +386,10 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
320386
lastreport = jiffies;
321387
firstreport = true;
322388
WARN_ON(signal_pending(current));
323-
list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
324-
rcu_tasks_holdout_list) {
325-
check_holdout_task(t, needreport, &firstreport);
326-
cond_resched();
327-
}
389+
rtp->holdouts_func(&holdouts, needreport, &firstreport);
328390
}
329391

330-
/*
331-
* Because ->on_rq and ->nvcsw are not guaranteed to have a full
332-
* memory barriers prior to them in the schedule() path, memory
333-
* reordering on other CPUs could cause their RCU-tasks read-side
334-
* critical sections to extend past the end of the grace period.
335-
* However, because these ->nvcsw updates are carried out with
336-
* interrupts disabled, we can use synchronize_rcu() to force the
337-
* needed ordering on all such CPUs.
338-
*
339-
* This synchronize_rcu() also confines all ->rcu_tasks_holdout
340-
* accesses to be within the grace period, avoiding the need for
341-
* memory barriers for ->rcu_tasks_holdout accesses.
342-
*
343-
* In addition, this synchronize_rcu() waits for exiting tasks
344-
* to complete their final preempt_disable() region of execution,
345-
* cleaning up after the synchronize_srcu() above.
346-
*/
347-
synchronize_rcu();
392+
rtp->postgp_func();
348393
}
349394

350395
void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
@@ -413,6 +458,11 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
413458

414459
static int __init rcu_spawn_tasks_kthread(void)
415460
{
461+
rcu_tasks.pregp_func = rcu_tasks_pregp_step;
462+
rcu_tasks.pertask_func = rcu_tasks_pertask;
463+
rcu_tasks.postscan_func = rcu_tasks_postscan;
464+
rcu_tasks.holdouts_func = check_all_holdout_tasks;
465+
rcu_tasks.postgp_func = rcu_tasks_postgp;
416466
rcu_spawn_tasks_kthread_generic(&rcu_tasks);
417467
return 0;
418468
}

0 commit comments

Comments
 (0)