Skip to content

Commit 07e1051

Browse files
committed
rcu-tasks: Create struct to hold state information
This commit creates an rcu_tasks struct to hold state information for RCU Tasks. This is a preparation commit for adding additional flavors of Tasks RCU, each of which would have its own rcu_tasks struct. Signed-off-by: Paul E. McKenney <[email protected]>
1 parent eacd6f0 commit 07e1051

File tree

1 file changed

+46
-27
lines changed

1 file changed

+46
-27
lines changed

kernel/rcu/tasks.h

Lines changed: 46 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,30 @@
77

88
#ifdef CONFIG_TASKS_RCU
99

10+
/**
11+
* Definition for a Tasks-RCU-like mechanism.
12+
* @cbs_head: Head of callback list.
13+
* @cbs_tail: Tail pointer for callback list.
14+
* @cbs_wq: Wait queue allowning new callback to get kthread's attention.
15+
* @cbs_lock: Lock protecting callback list.
16+
* @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
17+
*/
18+
struct rcu_tasks {
19+
struct rcu_head *cbs_head;
20+
struct rcu_head **cbs_tail;
21+
struct wait_queue_head cbs_wq;
22+
raw_spinlock_t cbs_lock;
23+
struct task_struct *kthread_ptr;
24+
};
25+
26+
#define DEFINE_RCU_TASKS(name) \
27+
static struct rcu_tasks name = \
28+
{ \
29+
.cbs_tail = &name.cbs_head, \
30+
.cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(name.cbs_wq), \
31+
.cbs_lock = __RAW_SPIN_LOCK_UNLOCKED(name.cbs_lock), \
32+
}
33+
1034
/*
1135
* Simple variant of RCU whose quiescent states are voluntary context
1236
* switch, cond_resched_rcu_qs(), user-space execution, and idle.
@@ -18,12 +42,7 @@
1842
* rates from multiple CPUs. If this is required, per-CPU callback lists
1943
* will be needed.
2044
*/
21-
22-
/* Global list of callbacks and associated lock. */
23-
static struct rcu_head *rcu_tasks_cbs_head;
24-
static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
25-
static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
26-
static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
45+
DEFINE_RCU_TASKS(rcu_tasks);
2746

2847
/* Track exiting tasks in order to allow them to be waited for. */
2948
DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
@@ -33,8 +52,6 @@ DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
3352
static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
3453
module_param(rcu_task_stall_timeout, int, 0644);
3554

36-
static struct task_struct *rcu_tasks_kthread_ptr;
37-
3855
/**
3956
* call_rcu_tasks() - Queue an RCU for invocation task-based grace period
4057
* @rhp: structure to be used for queueing the RCU updates.
@@ -57,17 +74,18 @@ void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
5774
{
5875
unsigned long flags;
5976
bool needwake;
77+
struct rcu_tasks *rtp = &rcu_tasks;
6078

6179
rhp->next = NULL;
6280
rhp->func = func;
63-
raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
64-
needwake = !rcu_tasks_cbs_head;
65-
WRITE_ONCE(*rcu_tasks_cbs_tail, rhp);
66-
rcu_tasks_cbs_tail = &rhp->next;
67-
raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
81+
raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
82+
needwake = !rtp->cbs_head;
83+
WRITE_ONCE(*rtp->cbs_tail, rhp);
84+
rtp->cbs_tail = &rhp->next;
85+
raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
6886
/* We can't create the thread unless interrupts are enabled. */
69-
if (needwake && READ_ONCE(rcu_tasks_kthread_ptr))
70-
wake_up(&rcu_tasks_cbs_wq);
87+
if (needwake && READ_ONCE(rtp->kthread_ptr))
88+
wake_up(&rtp->cbs_wq);
7189
}
7290
EXPORT_SYMBOL_GPL(call_rcu_tasks);
7391

@@ -169,10 +187,12 @@ static int __noreturn rcu_tasks_kthread(void *arg)
169187
struct rcu_head *list;
170188
struct rcu_head *next;
171189
LIST_HEAD(rcu_tasks_holdouts);
190+
struct rcu_tasks *rtp = arg;
172191
int fract;
173192

174193
/* Run on housekeeping CPUs by default. Sysadm can move if desired. */
175194
housekeeping_affine(current, HK_FLAG_RCU);
195+
WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
176196

177197
/*
178198
* Each pass through the following loop makes one check for
@@ -183,17 +203,17 @@ static int __noreturn rcu_tasks_kthread(void *arg)
183203
for (;;) {
184204

185205
/* Pick up any new callbacks. */
186-
raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
187-
list = rcu_tasks_cbs_head;
188-
rcu_tasks_cbs_head = NULL;
189-
rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
190-
raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
206+
raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
207+
list = rtp->cbs_head;
208+
rtp->cbs_head = NULL;
209+
rtp->cbs_tail = &rtp->cbs_head;
210+
raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
191211

192212
/* If there were none, wait a bit and start over. */
193213
if (!list) {
194-
wait_event_interruptible(rcu_tasks_cbs_wq,
195-
READ_ONCE(rcu_tasks_cbs_head));
196-
if (!rcu_tasks_cbs_head) {
214+
wait_event_interruptible(rtp->cbs_wq,
215+
READ_ONCE(rtp->cbs_head));
216+
if (!rtp->cbs_head) {
197217
WARN_ON(signal_pending(current));
198218
schedule_timeout_interruptible(HZ/10);
199219
}
@@ -211,7 +231,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
211231
*
212232
* This synchronize_rcu() also dispenses with the
213233
* need for a memory barrier on the first store to
214-
* ->rcu_tasks_holdout, as it forces the store to happen
234+
* t->rcu_tasks_holdout, as it forces the store to happen
215235
* after the beginning of the grace period.
216236
*/
217237
synchronize_rcu();
@@ -278,7 +298,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
278298
firstreport = true;
279299
WARN_ON(signal_pending(current));
280300
list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
281-
rcu_tasks_holdout_list) {
301+
rcu_tasks_holdout_list) {
282302
check_holdout_task(t, needreport, &firstreport);
283303
cond_resched();
284304
}
@@ -325,11 +345,10 @@ static int __init rcu_spawn_tasks_kthread(void)
325345
{
326346
struct task_struct *t;
327347

328-
t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
348+
t = kthread_run(rcu_tasks_kthread, &rcu_tasks, "rcu_tasks_kthread");
329349
if (WARN_ONCE(IS_ERR(t), "%s: Could not start Tasks-RCU grace-period kthread, OOM is now expected behavior\n", __func__))
330350
return 0;
331351
smp_mb(); /* Ensure others see full kthread. */
332-
WRITE_ONCE(rcu_tasks_kthread_ptr, t);
333352
return 0;
334353
}
335354
core_initcall(rcu_spawn_tasks_kthread);

0 commit comments

Comments
 (0)