7
7
8
8
#ifdef CONFIG_TASKS_RCU
9
9
10
+ /**
11
+ * Definition for a Tasks-RCU-like mechanism.
12
+ * @cbs_head: Head of callback list.
13
+ * @cbs_tail: Tail pointer for callback list.
14
+ * @cbs_wq: Wait queue allowning new callback to get kthread's attention.
15
+ * @cbs_lock: Lock protecting callback list.
16
+ * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
17
+ */
18
+ struct rcu_tasks {
19
+ struct rcu_head * cbs_head ;
20
+ struct rcu_head * * cbs_tail ;
21
+ struct wait_queue_head cbs_wq ;
22
+ raw_spinlock_t cbs_lock ;
23
+ struct task_struct * kthread_ptr ;
24
+ };
25
+
26
+ #define DEFINE_RCU_TASKS (name ) \
27
+ static struct rcu_tasks name = \
28
+ { \
29
+ .cbs_tail = &name.cbs_head, \
30
+ .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(name.cbs_wq), \
31
+ .cbs_lock = __RAW_SPIN_LOCK_UNLOCKED(name.cbs_lock), \
32
+ }
33
+
10
34
/*
11
35
* Simple variant of RCU whose quiescent states are voluntary context
12
36
* switch, cond_resched_rcu_qs(), user-space execution, and idle.
18
42
* rates from multiple CPUs. If this is required, per-CPU callback lists
19
43
* will be needed.
20
44
*/
21
-
22
- /* Global list of callbacks and associated lock. */
23
- static struct rcu_head * rcu_tasks_cbs_head ;
24
- static struct rcu_head * * rcu_tasks_cbs_tail = & rcu_tasks_cbs_head ;
25
- static DECLARE_WAIT_QUEUE_HEAD (rcu_tasks_cbs_wq );
26
- static DEFINE_RAW_SPINLOCK (rcu_tasks_cbs_lock );
45
+ DEFINE_RCU_TASKS (rcu_tasks );
27
46
28
47
/* Track exiting tasks in order to allow them to be waited for. */
29
48
DEFINE_STATIC_SRCU (tasks_rcu_exit_srcu );
@@ -33,8 +52,6 @@ DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
33
52
static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT ;
34
53
module_param (rcu_task_stall_timeout , int , 0644 );
35
54
36
- static struct task_struct * rcu_tasks_kthread_ptr ;
37
-
38
55
/**
39
56
* call_rcu_tasks() - Queue an RCU for invocation task-based grace period
40
57
* @rhp: structure to be used for queueing the RCU updates.
@@ -57,17 +74,18 @@ void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
57
74
{
58
75
unsigned long flags ;
59
76
bool needwake ;
77
+ struct rcu_tasks * rtp = & rcu_tasks ;
60
78
61
79
rhp -> next = NULL ;
62
80
rhp -> func = func ;
63
- raw_spin_lock_irqsave (& rcu_tasks_cbs_lock , flags );
64
- needwake = !rcu_tasks_cbs_head ;
65
- WRITE_ONCE (* rcu_tasks_cbs_tail , rhp );
66
- rcu_tasks_cbs_tail = & rhp -> next ;
67
- raw_spin_unlock_irqrestore (& rcu_tasks_cbs_lock , flags );
81
+ raw_spin_lock_irqsave (& rtp -> cbs_lock , flags );
82
+ needwake = !rtp -> cbs_head ;
83
+ WRITE_ONCE (* rtp -> cbs_tail , rhp );
84
+ rtp -> cbs_tail = & rhp -> next ;
85
+ raw_spin_unlock_irqrestore (& rtp -> cbs_lock , flags );
68
86
/* We can't create the thread unless interrupts are enabled. */
69
- if (needwake && READ_ONCE (rcu_tasks_kthread_ptr ))
70
- wake_up (& rcu_tasks_cbs_wq );
87
+ if (needwake && READ_ONCE (rtp -> kthread_ptr ))
88
+ wake_up (& rtp -> cbs_wq );
71
89
}
72
90
EXPORT_SYMBOL_GPL (call_rcu_tasks );
73
91
@@ -169,10 +187,12 @@ static int __noreturn rcu_tasks_kthread(void *arg)
169
187
struct rcu_head * list ;
170
188
struct rcu_head * next ;
171
189
LIST_HEAD (rcu_tasks_holdouts );
190
+ struct rcu_tasks * rtp = arg ;
172
191
int fract ;
173
192
174
193
/* Run on housekeeping CPUs by default. Sysadm can move if desired. */
175
194
housekeeping_affine (current , HK_FLAG_RCU );
195
+ WRITE_ONCE (rtp -> kthread_ptr , current ); // Let GPs start!
176
196
177
197
/*
178
198
* Each pass through the following loop makes one check for
@@ -183,17 +203,17 @@ static int __noreturn rcu_tasks_kthread(void *arg)
183
203
for (;;) {
184
204
185
205
/* Pick up any new callbacks. */
186
- raw_spin_lock_irqsave (& rcu_tasks_cbs_lock , flags );
187
- list = rcu_tasks_cbs_head ;
188
- rcu_tasks_cbs_head = NULL ;
189
- rcu_tasks_cbs_tail = & rcu_tasks_cbs_head ;
190
- raw_spin_unlock_irqrestore (& rcu_tasks_cbs_lock , flags );
206
+ raw_spin_lock_irqsave (& rtp -> cbs_lock , flags );
207
+ list = rtp -> cbs_head ;
208
+ rtp -> cbs_head = NULL ;
209
+ rtp -> cbs_tail = & rtp -> cbs_head ;
210
+ raw_spin_unlock_irqrestore (& rtp -> cbs_lock , flags );
191
211
192
212
/* If there were none, wait a bit and start over. */
193
213
if (!list ) {
194
- wait_event_interruptible (rcu_tasks_cbs_wq ,
195
- READ_ONCE (rcu_tasks_cbs_head ));
196
- if (!rcu_tasks_cbs_head ) {
214
+ wait_event_interruptible (rtp -> cbs_wq ,
215
+ READ_ONCE (rtp -> cbs_head ));
216
+ if (!rtp -> cbs_head ) {
197
217
WARN_ON (signal_pending (current ));
198
218
schedule_timeout_interruptible (HZ /10 );
199
219
}
@@ -211,7 +231,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
211
231
*
212
232
* This synchronize_rcu() also dispenses with the
213
233
* need for a memory barrier on the first store to
214
- * ->rcu_tasks_holdout, as it forces the store to happen
234
+ * t ->rcu_tasks_holdout, as it forces the store to happen
215
235
* after the beginning of the grace period.
216
236
*/
217
237
synchronize_rcu ();
@@ -278,7 +298,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
278
298
firstreport = true;
279
299
WARN_ON (signal_pending (current ));
280
300
list_for_each_entry_safe (t , t1 , & rcu_tasks_holdouts ,
281
- rcu_tasks_holdout_list ) {
301
+ rcu_tasks_holdout_list ) {
282
302
check_holdout_task (t , needreport , & firstreport );
283
303
cond_resched ();
284
304
}
@@ -325,11 +345,10 @@ static int __init rcu_spawn_tasks_kthread(void)
325
345
{
326
346
struct task_struct * t ;
327
347
328
- t = kthread_run (rcu_tasks_kthread , NULL , "rcu_tasks_kthread" );
348
+ t = kthread_run (rcu_tasks_kthread , & rcu_tasks , "rcu_tasks_kthread" );
329
349
if (WARN_ONCE (IS_ERR (t ), "%s: Could not start Tasks-RCU grace-period kthread, OOM is now expected behavior\n" , __func__ ))
330
350
return 0 ;
331
351
smp_mb (); /* Ensure others see full kthread. */
332
- WRITE_ONCE (rcu_tasks_kthread_ptr , t );
333
352
return 0 ;
334
353
}
335
354
core_initcall (rcu_spawn_tasks_kthread );
0 commit comments