Skip to content

Commit 4a5f133

Browse files
committed
rcutorture: Add races with task-exit processing
Several variants of Linux-kernel RCU interact with task-exit processing, including preemptible RCU, Tasks RCU, and Tasks Trace RCU. This commit therefore adds testing of this interaction to rcutorture by adding rcutorture.read_exit_burst and rcutorture.read_exit_delay kernel-boot parameters. These kernel parameters control the frequency and spacing of special read-then-exit kthreads that are spawned. [ paulmck: Apply feedback from Dan Carpenter's static checker. ] [ paulmck: Reduce latency to avoid false-positive shutdown hangs. ] Signed-off-by: Paul E. McKenney <[email protected]>
1 parent d02c6b5 commit 4a5f133

File tree

3 files changed

+128
-3
lines changed

3 files changed

+128
-3
lines changed

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4258,6 +4258,20 @@
42584258
Set time (jiffies) between CPU-hotplug operations,
42594259
or zero to disable CPU-hotplug testing.
42604260

4261+
rcutorture.read_exit= [KNL]
4262+
Set the number of read-then-exit kthreads used
4263+
to test the interaction of RCU updaters and
4264+
task-exit processing.
4265+
4266+
rcutorture.read_exit_burst= [KNL]
4267+
The number of times in a given read-then-exit
4268+
episode that a set of read-then-exit kthreads
4269+
is spawned.
4270+
4271+
rcutorture.read_exit_delay= [KNL]
4272+
The delay, in seconds, between successive
4273+
read-then-exit testing episodes.
4274+
42614275
rcutorture.shuffle_interval= [KNL]
42624276
Set task-shuffle interval (s). Shuffling tasks
42634277
allows some CPUs to go into dyntick-idle mode

include/linux/torture.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,11 @@ struct torture_random_state {
5555
#define DEFINE_TORTURE_RANDOM_PERCPU(name) \
5656
DEFINE_PER_CPU(struct torture_random_state, name)
5757
unsigned long torture_random(struct torture_random_state *trsp);
58+
static inline void torture_random_init(struct torture_random_state *trsp)
59+
{
60+
trsp->trs_state = 0;
61+
trsp->trs_count = 0;
62+
}
5863

5964
/* Task shuffler, which causes CPUs to occasionally go idle. */
6065
void torture_shuffle_task_register(struct task_struct *tp);

kernel/rcu/rcutorture.c

Lines changed: 109 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,10 @@ torture_param(int, object_debug, 0,
109109
torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
110110
torture_param(int, onoff_interval, 0,
111111
"Time between CPU hotplugs (jiffies), 0=disable");
112+
torture_param(int, read_exit_delay, 13,
113+
"Delay between read-then-exit episodes (s)");
114+
torture_param(int, read_exit_burst, 16,
115+
"# of read-then-exit bursts per episode, zero to disable");
112116
torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
113117
torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
114118
torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
@@ -146,6 +150,7 @@ static struct task_struct *stall_task;
146150
static struct task_struct *fwd_prog_task;
147151
static struct task_struct **barrier_cbs_tasks;
148152
static struct task_struct *barrier_task;
153+
static struct task_struct *read_exit_task;
149154

150155
#define RCU_TORTURE_PIPE_LEN 10
151156

@@ -177,6 +182,7 @@ static long n_rcu_torture_boosts;
177182
static atomic_long_t n_rcu_torture_timers;
178183
static long n_barrier_attempts;
179184
static long n_barrier_successes; /* did rcu_barrier test succeed? */
185+
static unsigned long n_read_exits;
180186
static struct list_head rcu_torture_removed;
181187
static unsigned long shutdown_jiffies;
182188

@@ -1539,10 +1545,11 @@ rcu_torture_stats_print(void)
15391545
n_rcu_torture_boosts,
15401546
atomic_long_read(&n_rcu_torture_timers));
15411547
torture_onoff_stats();
1542-
pr_cont("barrier: %ld/%ld:%ld\n",
1548+
pr_cont("barrier: %ld/%ld:%ld ",
15431549
data_race(n_barrier_successes),
15441550
data_race(n_barrier_attempts),
15451551
data_race(n_rcu_torture_barrier_error));
1552+
pr_cont("read-exits: %ld\n", data_race(n_read_exits));
15461553

15471554
pr_alert("%s%s ", torture_type, TORTURE_FLAG);
15481555
if (atomic_read(&n_rcu_torture_mberror) ||
@@ -1634,7 +1641,8 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
16341641
"stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
16351642
"stall_cpu_block=%d "
16361643
"n_barrier_cbs=%d "
1637-
"onoff_interval=%d onoff_holdoff=%d\n",
1644+
"onoff_interval=%d onoff_holdoff=%d "
1645+
"read_exit_delay=%d read_exit_burst=%d\n",
16381646
torture_type, tag, nrealreaders, nfakewriters,
16391647
stat_interval, verbose, test_no_idle_hz, shuffle_interval,
16401648
stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
@@ -1643,7 +1651,8 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
16431651
stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
16441652
stall_cpu_block,
16451653
n_barrier_cbs,
1646-
onoff_interval, onoff_holdoff);
1654+
onoff_interval, onoff_holdoff,
1655+
read_exit_delay, read_exit_burst);
16471656
}
16481657

16491658
static int rcutorture_booster_cleanup(unsigned int cpu)
@@ -2338,6 +2347,99 @@ static bool rcu_torture_can_boost(void)
23382347
return true;
23392348
}
23402349

2350+
static bool read_exit_child_stop;
2351+
static bool read_exit_child_stopped;
2352+
static wait_queue_head_t read_exit_wq;
2353+
2354+
// Child kthread which just does an rcutorture reader and exits.
2355+
static int rcu_torture_read_exit_child(void *trsp_in)
2356+
{
2357+
struct torture_random_state *trsp = trsp_in;
2358+
2359+
set_user_nice(current, MAX_NICE);
2360+
// Minimize time between reading and exiting.
2361+
while (!kthread_should_stop())
2362+
schedule_timeout_uninterruptible(1);
2363+
(void)rcu_torture_one_read(trsp);
2364+
return 0;
2365+
}
2366+
2367+
// Parent kthread which creates and destroys read-exit child kthreads.
2368+
static int rcu_torture_read_exit(void *unused)
2369+
{
2370+
int count = 0;
2371+
bool errexit = false;
2372+
int i;
2373+
struct task_struct *tsp;
2374+
DEFINE_TORTURE_RANDOM(trs);
2375+
2376+
// Allocate and initialize.
2377+
set_user_nice(current, MAX_NICE);
2378+
VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
2379+
2380+
// Each pass through this loop does one read-exit episode.
2381+
do {
2382+
if (++count > read_exit_burst) {
2383+
VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
2384+
rcu_barrier(); // Wait for task_struct free, avoid OOM.
2385+
for (i = 0; i < read_exit_delay; i++) {
2386+
schedule_timeout_uninterruptible(HZ);
2387+
if (READ_ONCE(read_exit_child_stop))
2388+
break;
2389+
}
2390+
if (!READ_ONCE(read_exit_child_stop))
2391+
VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
2392+
count = 0;
2393+
}
2394+
if (READ_ONCE(read_exit_child_stop))
2395+
break;
2396+
// Spawn child.
2397+
tsp = kthread_run(rcu_torture_read_exit_child,
2398+
&trs, "%s",
2399+
"rcu_torture_read_exit_child");
2400+
if (IS_ERR(tsp)) {
2401+
VERBOSE_TOROUT_ERRSTRING("out of memory");
2402+
errexit = true;
2403+
tsp = NULL;
2404+
break;
2405+
}
2406+
cond_resched();
2407+
kthread_stop(tsp);
2408+
n_read_exits ++;
2409+
stutter_wait("rcu_torture_read_exit");
2410+
} while (!errexit && !READ_ONCE(read_exit_child_stop));
2411+
2412+
// Clean up and exit.
2413+
smp_store_release(&read_exit_child_stopped, true); // After reaping.
2414+
smp_mb(); // Store before wakeup.
2415+
wake_up(&read_exit_wq);
2416+
while (!torture_must_stop())
2417+
schedule_timeout_uninterruptible(1);
2418+
torture_kthread_stopping("rcu_torture_read_exit");
2419+
return 0;
2420+
}
2421+
2422+
static int rcu_torture_read_exit_init(void)
2423+
{
2424+
if (read_exit_burst <= 0)
2425+
return -EINVAL;
2426+
init_waitqueue_head(&read_exit_wq);
2427+
read_exit_child_stop = false;
2428+
read_exit_child_stopped = false;
2429+
return torture_create_kthread(rcu_torture_read_exit, NULL,
2430+
read_exit_task);
2431+
}
2432+
2433+
static void rcu_torture_read_exit_cleanup(void)
2434+
{
2435+
if (!read_exit_task)
2436+
return;
2437+
WRITE_ONCE(read_exit_child_stop, true);
2438+
smp_mb(); // Above write before wait.
2439+
wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
2440+
torture_stop_kthread(rcutorture_read_exit, read_exit_task);
2441+
}
2442+
23412443
static enum cpuhp_state rcutor_hp;
23422444

23432445
static void
@@ -2359,6 +2461,7 @@ rcu_torture_cleanup(void)
23592461
}
23602462

23612463
show_rcu_gp_kthreads();
2464+
rcu_torture_read_exit_cleanup();
23622465
rcu_torture_barrier_cleanup();
23632466
torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
23642467
torture_stop_kthread(rcu_torture_stall, stall_task);
@@ -2680,6 +2783,9 @@ rcu_torture_init(void)
26802783
if (firsterr)
26812784
goto unwind;
26822785
firsterr = rcu_torture_barrier_init();
2786+
if (firsterr)
2787+
goto unwind;
2788+
firsterr = rcu_torture_read_exit_init();
26832789
if (firsterr)
26842790
goto unwind;
26852791
if (object_debug)

0 commit comments

Comments
 (0)