@@ -72,79 +72,79 @@ For example, if it uses call_rcu(), call_srcu() on srcu_struct_1, and
72
72
call_srcu() on srcu_struct_2, then the following three lines of code
73
73
will be required when unloading::
74
74
75
- 1 rcu_barrier();
76
- 2 srcu_barrier(&srcu_struct_1);
77
- 3 srcu_barrier(&srcu_struct_2);
75
+ 1 rcu_barrier();
76
+ 2 srcu_barrier(&srcu_struct_1);
77
+ 3 srcu_barrier(&srcu_struct_2);
78
78
79
79
If latency is of the essence, workqueues could be used to run these
80
80
three functions concurrently.
81
81
82
82
An ancient version of the rcutorture module makes use of rcu_barrier()
83
83
in its exit function as follows::
84
84
85
- 1 static void
86
- 2 rcu_torture_cleanup(void)
87
- 3 {
88
- 4 int i;
89
- 5
90
- 6 fullstop = 1;
91
- 7 if (shuffler_task != NULL) {
92
- 8 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
93
- 9 kthread_stop(shuffler_task);
94
- 10 }
95
- 11 shuffler_task = NULL;
85
+ 1 static void
86
+ 2 rcu_torture_cleanup(void)
87
+ 3 {
88
+ 4 int i;
89
+ 5
90
+ 6 fullstop = 1;
91
+ 7 if (shuffler_task != NULL) {
92
+ 8 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
93
+ 9 kthread_stop(shuffler_task);
94
+ 10 }
95
+ 11 shuffler_task = NULL;
96
96
12
97
- 13 if (writer_task != NULL) {
98
- 14 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
99
- 15 kthread_stop(writer_task);
100
- 16 }
101
- 17 writer_task = NULL;
97
+ 13 if (writer_task != NULL) {
98
+ 14 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
99
+ 15 kthread_stop(writer_task);
100
+ 16 }
101
+ 17 writer_task = NULL;
102
102
18
103
- 19 if (reader_tasks != NULL) {
104
- 20 for (i = 0; i < nrealreaders; i++) {
105
- 21 if (reader_tasks[i] != NULL) {
106
- 22 VERBOSE_PRINTK_STRING(
107
- 23 "Stopping rcu_torture_reader task");
108
- 24 kthread_stop(reader_tasks[i]);
109
- 25 }
110
- 26 reader_tasks[i] = NULL;
111
- 27 }
112
- 28 kfree(reader_tasks);
113
- 29 reader_tasks = NULL;
114
- 30 }
115
- 31 rcu_torture_current = NULL;
103
+ 19 if (reader_tasks != NULL) {
104
+ 20 for (i = 0; i < nrealreaders; i++) {
105
+ 21 if (reader_tasks[i] != NULL) {
106
+ 22 VERBOSE_PRINTK_STRING(
107
+ 23 "Stopping rcu_torture_reader task");
108
+ 24 kthread_stop(reader_tasks[i]);
109
+ 25 }
110
+ 26 reader_tasks[i] = NULL;
111
+ 27 }
112
+ 28 kfree(reader_tasks);
113
+ 29 reader_tasks = NULL;
114
+ 30 }
115
+ 31 rcu_torture_current = NULL;
116
116
32
117
- 33 if (fakewriter_tasks != NULL) {
118
- 34 for (i = 0; i < nfakewriters; i++) {
119
- 35 if (fakewriter_tasks[i] != NULL) {
120
- 36 VERBOSE_PRINTK_STRING(
121
- 37 "Stopping rcu_torture_fakewriter task");
122
- 38 kthread_stop(fakewriter_tasks[i]);
123
- 39 }
124
- 40 fakewriter_tasks[i] = NULL;
125
- 41 }
126
- 42 kfree(fakewriter_tasks);
127
- 43 fakewriter_tasks = NULL;
128
- 44 }
117
+ 33 if (fakewriter_tasks != NULL) {
118
+ 34 for (i = 0; i < nfakewriters; i++) {
119
+ 35 if (fakewriter_tasks[i] != NULL) {
120
+ 36 VERBOSE_PRINTK_STRING(
121
+ 37 "Stopping rcu_torture_fakewriter task");
122
+ 38 kthread_stop(fakewriter_tasks[i]);
123
+ 39 }
124
+ 40 fakewriter_tasks[i] = NULL;
125
+ 41 }
126
+ 42 kfree(fakewriter_tasks);
127
+ 43 fakewriter_tasks = NULL;
128
+ 44 }
129
129
45
130
- 46 if (stats_task != NULL) {
131
- 47 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
132
- 48 kthread_stop(stats_task);
133
- 49 }
134
- 50 stats_task = NULL;
130
+ 46 if (stats_task != NULL) {
131
+ 47 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
132
+ 48 kthread_stop(stats_task);
133
+ 49 }
134
+ 50 stats_task = NULL;
135
135
51
136
- 52 /* Wait for all RCU callbacks to fire. */
137
- 53 rcu_barrier();
136
+ 52 /* Wait for all RCU callbacks to fire. */
137
+ 53 rcu_barrier();
138
138
54
139
- 55 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
139
+ 55 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
140
140
56
141
- 57 if (cur_ops->cleanup != NULL)
142
- 58 cur_ops->cleanup();
143
- 59 if (atomic_read(&n_rcu_torture_error))
144
- 60 rcu_torture_print_module_parms("End of test: FAILURE");
145
- 61 else
146
- 62 rcu_torture_print_module_parms("End of test: SUCCESS");
147
- 63 }
141
+ 57 if (cur_ops->cleanup != NULL)
142
+ 58 cur_ops->cleanup();
143
+ 59 if (atomic_read(&n_rcu_torture_error))
144
+ 60 rcu_torture_print_module_parms("End of test: FAILURE");
145
+ 61 else
146
+ 62 rcu_torture_print_module_parms("End of test: SUCCESS");
147
+ 63 }
148
148
149
149
Line 6 sets a global variable that prevents any RCU callbacks from
150
150
re-posting themselves. This will not be necessary in most cases, since
@@ -193,16 +193,16 @@ which point, all earlier RCU callbacks are guaranteed to have completed.
193
193
194
194
The original code for rcu_barrier() was roughly as follows::
195
195
196
- 1 void rcu_barrier(void)
197
- 2 {
198
- 3 BUG_ON(in_interrupt());
199
- 4 /* Take cpucontrol mutex to protect against CPU hotplug */
200
- 5 mutex_lock(&rcu_barrier_mutex);
201
- 6 init_completion(&rcu_barrier_completion);
202
- 7 atomic_set(&rcu_barrier_cpu_count, 1);
203
- 8 on_each_cpu(rcu_barrier_func, NULL, 0, 1);
204
- 9 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
205
- 10 complete(&rcu_barrier_completion);
196
+ 1 void rcu_barrier(void)
197
+ 2 {
198
+ 3 BUG_ON(in_interrupt());
199
+ 4 /* Take cpucontrol mutex to protect against CPU hotplug */
200
+ 5 mutex_lock(&rcu_barrier_mutex);
201
+ 6 init_completion(&rcu_barrier_completion);
202
+ 7 atomic_set(&rcu_barrier_cpu_count, 1);
203
+ 8 on_each_cpu(rcu_barrier_func, NULL, 0, 1);
204
+ 9 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
205
+ 10 complete(&rcu_barrier_completion);
206
206
11 wait_for_completion(&rcu_barrier_completion);
207
207
12 mutex_unlock(&rcu_barrier_mutex);
208
208
13 }
@@ -232,16 +232,16 @@ still gives the general idea.
232
232
The rcu_barrier_func() runs on each CPU, where it invokes call_rcu()
233
233
to post an RCU callback, as follows::
234
234
235
- 1 static void rcu_barrier_func(void *notused)
236
- 2 {
237
- 3 int cpu = smp_processor_id();
238
- 4 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
239
- 5 struct rcu_head *head;
240
- 6
241
- 7 head = &rdp->barrier;
242
- 8 atomic_inc(&rcu_barrier_cpu_count);
243
- 9 call_rcu(head, rcu_barrier_callback);
244
- 10 }
235
+ 1 static void rcu_barrier_func(void *notused)
236
+ 2 {
237
+ 3 int cpu = smp_processor_id();
238
+ 4 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
239
+ 5 struct rcu_head *head;
240
+ 6
241
+ 7 head = &rdp->barrier;
242
+ 8 atomic_inc(&rcu_barrier_cpu_count);
243
+ 9 call_rcu(head, rcu_barrier_callback);
244
+ 10 }
245
245
246
246
Lines 3 and 4 locate RCU's internal per-CPU rcu_data structure,
247
247
which contains the struct rcu_head that needed for the later call to
@@ -254,11 +254,11 @@ The rcu_barrier_callback() function simply atomically decrements the
254
254
rcu_barrier_cpu_count variable and finalizes the completion when it
255
255
reaches zero, as follows::
256
256
257
- 1 static void rcu_barrier_callback(struct rcu_head *notused)
258
- 2 {
259
- 3 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
260
- 4 complete(&rcu_barrier_completion);
261
- 5 }
257
+ 1 static void rcu_barrier_callback(struct rcu_head *notused)
258
+ 2 {
259
+ 3 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
260
+ 4 complete(&rcu_barrier_completion);
261
+ 5 }
262
262
263
263
.. _rcubarrier_quiz_3 :
264
264
0 commit comments