@@ -17,7 +17,7 @@ typedef void (*pregp_func_t)(void);
17
17
typedef void (* pertask_func_t )(struct task_struct * t , struct list_head * hop );
18
18
typedef void (* postscan_func_t )(void );
19
19
typedef void (* holdouts_func_t )(struct list_head * hop , bool ndrpt , bool * frptp );
20
- typedef void (* postgp_func_t )(void );
20
+ typedef void (* postgp_func_t )(struct rcu_tasks * rtp );
21
21
22
22
/**
23
23
* Definition for a Tasks-RCU-like mechanism.
@@ -27,6 +27,9 @@ typedef void (*postgp_func_t)(void);
27
27
* @cbs_lock: Lock protecting callback list.
28
28
* @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
29
29
* @gp_func: This flavor's grace-period-wait function.
30
+ * @gp_state: Grace period's most recent state transition (debugging).
31
+ * @gp_jiffies: Time of last @gp_state transition.
32
+ * @gp_start: Most recent grace-period start in jiffies.
30
33
* @pregp_func: This flavor's pre-grace-period function (optional).
31
34
* @pertask_func: This flavor's per-task scan function (optional).
32
35
* @postscan_func: This flavor's post-task scan function (optional).
@@ -41,6 +44,8 @@ struct rcu_tasks {
41
44
struct rcu_head * * cbs_tail ;
42
45
struct wait_queue_head cbs_wq ;
43
46
raw_spinlock_t cbs_lock ;
47
+ int gp_state ;
48
+ unsigned long gp_jiffies ;
44
49
struct task_struct * kthread_ptr ;
45
50
rcu_tasks_gp_func_t gp_func ;
46
51
pregp_func_t pregp_func ;
@@ -73,10 +78,56 @@ DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
73
78
static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT ;
74
79
module_param (rcu_task_stall_timeout , int , 0644 );
75
80
81
+ /* RCU tasks grace-period state for debugging. */
82
+ #define RTGS_INIT 0
83
+ #define RTGS_WAIT_WAIT_CBS 1
84
+ #define RTGS_WAIT_GP 2
85
+ #define RTGS_PRE_WAIT_GP 3
86
+ #define RTGS_SCAN_TASKLIST 4
87
+ #define RTGS_POST_SCAN_TASKLIST 5
88
+ #define RTGS_WAIT_SCAN_HOLDOUTS 6
89
+ #define RTGS_SCAN_HOLDOUTS 7
90
+ #define RTGS_POST_GP 8
91
+ #define RTGS_WAIT_READERS 9
92
+ #define RTGS_INVOKE_CBS 10
93
+ #define RTGS_WAIT_CBS 11
94
+ static const char * const rcu_tasks_gp_state_names [] = {
95
+ "RTGS_INIT" ,
96
+ "RTGS_WAIT_WAIT_CBS" ,
97
+ "RTGS_WAIT_GP" ,
98
+ "RTGS_PRE_WAIT_GP" ,
99
+ "RTGS_SCAN_TASKLIST" ,
100
+ "RTGS_POST_SCAN_TASKLIST" ,
101
+ "RTGS_WAIT_SCAN_HOLDOUTS" ,
102
+ "RTGS_SCAN_HOLDOUTS" ,
103
+ "RTGS_POST_GP" ,
104
+ "RTGS_WAIT_READERS" ,
105
+ "RTGS_INVOKE_CBS" ,
106
+ "RTGS_WAIT_CBS" ,
107
+ };
108
+
76
109
////////////////////////////////////////////////////////////////////////
77
110
//
78
111
// Generic code.
79
112
113
+ /* Record grace-period phase and time. */
114
+ static void set_tasks_gp_state (struct rcu_tasks * rtp , int newstate )
115
+ {
116
+ rtp -> gp_state = newstate ;
117
+ rtp -> gp_jiffies = jiffies ;
118
+ }
119
+
120
+ /* Return state name. */
121
+ static const char * tasks_gp_state_getname (struct rcu_tasks * rtp )
122
+ {
123
+ int i = data_race (rtp -> gp_state ); // Let KCSAN detect update races
124
+ int j = READ_ONCE (i ); // Prevent the compiler from reading twice
125
+
126
+ if (j >= ARRAY_SIZE (rcu_tasks_gp_state_names ))
127
+ return "???" ;
128
+ return rcu_tasks_gp_state_names [j ];
129
+ }
130
+
80
131
// Enqueue a callback for the specified flavor of Tasks RCU.
81
132
static void call_rcu_tasks_generic (struct rcu_head * rhp , rcu_callback_t func ,
82
133
struct rcu_tasks * rtp )
@@ -141,15 +192,18 @@ static int __noreturn rcu_tasks_kthread(void *arg)
141
192
READ_ONCE (rtp -> cbs_head ));
142
193
if (!rtp -> cbs_head ) {
143
194
WARN_ON (signal_pending (current ));
195
+ set_tasks_gp_state (rtp , RTGS_WAIT_WAIT_CBS );
144
196
schedule_timeout_interruptible (HZ /10 );
145
197
}
146
198
continue ;
147
199
}
148
200
149
201
// Wait for one grace period.
202
+ set_tasks_gp_state (rtp , RTGS_WAIT_GP );
150
203
rtp -> gp_func (rtp );
151
204
152
205
/* Invoke the callbacks. */
206
+ set_tasks_gp_state (rtp , RTGS_INVOKE_CBS );
153
207
while (list ) {
154
208
next = list -> next ;
155
209
local_bh_disable ();
@@ -160,6 +214,8 @@ static int __noreturn rcu_tasks_kthread(void *arg)
160
214
}
161
215
/* Paranoid sleep to keep this from entering a tight loop */
162
216
schedule_timeout_uninterruptible (HZ /10 );
217
+
218
+ set_tasks_gp_state (rtp , RTGS_WAIT_CBS );
163
219
}
164
220
}
165
221
@@ -222,8 +278,11 @@ static void __init rcu_tasks_bootup_oddness(void)
222
278
/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
223
279
static void show_rcu_tasks_generic_gp_kthread (struct rcu_tasks * rtp , char * s )
224
280
{
225
- pr_info ("%s %c%c %s\n" ,
281
+ pr_info ("%s: %s(%d) since %lu %c%c %s\n" ,
226
282
rtp -> kname ,
283
+ tasks_gp_state_getname (rtp ),
284
+ data_race (rtp -> gp_state ),
285
+ jiffies - data_race (rtp -> gp_jiffies ),
227
286
".k" [!!data_race (rtp -> kthread_ptr )],
228
287
".C" [!!data_race (rtp -> cbs_head )],
229
288
s );
@@ -243,6 +302,7 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
243
302
LIST_HEAD (holdouts );
244
303
int fract ;
245
304
305
+ set_tasks_gp_state (rtp , RTGS_PRE_WAIT_GP );
246
306
rtp -> pregp_func ();
247
307
248
308
/*
@@ -251,11 +311,13 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
251
311
* that are not already voluntarily blocked. Mark these tasks
252
312
* and make a list of them in holdouts.
253
313
*/
314
+ set_tasks_gp_state (rtp , RTGS_SCAN_TASKLIST );
254
315
rcu_read_lock ();
255
316
for_each_process_thread (g , t )
256
317
rtp -> pertask_func (t , & holdouts );
257
318
rcu_read_unlock ();
258
319
320
+ set_tasks_gp_state (rtp , RTGS_POST_SCAN_TASKLIST );
259
321
rtp -> postscan_func ();
260
322
261
323
/*
@@ -277,6 +339,7 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
277
339
break ;
278
340
279
341
/* Slowly back off waiting for holdouts */
342
+ set_tasks_gp_state (rtp , RTGS_WAIT_SCAN_HOLDOUTS );
280
343
schedule_timeout_interruptible (HZ /fract );
281
344
282
345
if (fract > 1 )
@@ -288,10 +351,12 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
288
351
lastreport = jiffies ;
289
352
firstreport = true;
290
353
WARN_ON (signal_pending (current ));
354
+ set_tasks_gp_state (rtp , RTGS_SCAN_HOLDOUTS );
291
355
rtp -> holdouts_func (& holdouts , needreport , & firstreport );
292
356
}
293
357
294
- rtp -> postgp_func ();
358
+ set_tasks_gp_state (rtp , RTGS_POST_GP );
359
+ rtp -> postgp_func (rtp );
295
360
}
296
361
297
362
////////////////////////////////////////////////////////////////////////
@@ -394,7 +459,7 @@ static void check_all_holdout_tasks(struct list_head *hop,
394
459
}
395
460
396
461
/* Finish off the Tasks-RCU grace period. */
397
- static void rcu_tasks_postgp (void )
462
+ static void rcu_tasks_postgp (struct rcu_tasks * rtp )
398
463
{
399
464
/*
400
465
* Because ->on_rq and ->nvcsw are not guaranteed to have a full
@@ -881,7 +946,7 @@ static void check_all_holdout_tasks_trace(struct list_head *hop,
881
946
}
882
947
883
948
/* Wait for grace period to complete and provide ordering. */
884
- static void rcu_tasks_trace_postgp (void )
949
+ static void rcu_tasks_trace_postgp (struct rcu_tasks * rtp )
885
950
{
886
951
bool firstreport ;
887
952
struct task_struct * g , * t ;
@@ -894,13 +959,15 @@ static void rcu_tasks_trace_postgp(void)
894
959
smp_mb__after_atomic (); // Order vs. later atomics
895
960
896
961
// Wait for readers.
962
+ set_tasks_gp_state (rtp , RTGS_WAIT_READERS );
897
963
for (;;) {
898
964
ret = wait_event_idle_exclusive_timeout (
899
965
trc_wait ,
900
966
atomic_read (& trc_n_readers_need_end ) == 0 ,
901
967
READ_ONCE (rcu_task_stall_timeout ));
902
968
if (ret )
903
969
break ; // Count reached zero.
970
+ // Stall warning time, so make a list of the offenders.
904
971
for_each_process_thread (g , t )
905
972
if (READ_ONCE (t -> trc_reader_need_end ))
906
973
trc_add_holdout (t , & holdouts );
0 commit comments