Skip to content

Commit 1b67e03

Browse files
paulmckrcuNeeraj Upadhyay (AMD)
authored andcommitted
rcutorture: Add tests for SRCU up/down reader primitives
This commit adds a new rcutorture.n_up_down kernel boot parameter that specifies the number of outstanding SRCU up/down readers, which begin in kthread context and end in an hrtimer handler. There is a new kthread ("rcu_torture_updown") that scans an per-reader array looking for elements whose readers have ended. This kthread sleeps between one and two milliseconds between consecutive scans. [ paulmck: Apply kernel test robot feedback. ] [ paulmck: Apply Z qiang feedback. ] [ joel: Fix build error: hrtimer_init is replaced by hrtimer_setup. ] [ joel: Apply Boqun bug fix to drop extra up_read() call in rcu_torture_updown()]. Signed-off-by: Paul E. McKenney <[email protected]> Signed-off-by: Joel Fernandes <[email protected]> Tested-by: kernel test robot <[email protected]> Signed-off-by: Neeraj Upadhyay (AMD) <[email protected]>
1 parent eec1f94 commit 1b67e03

File tree

1 file changed

+206
-19
lines changed

1 file changed

+206
-19
lines changed

kernel/rcu/rcutorture.c

Lines changed: 206 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -55,22 +55,24 @@ MODULE_DESCRIPTION("Read-Copy Update module-based torture test facility");
5555
MODULE_LICENSE("GPL");
5656
MODULE_AUTHOR("Paul E. McKenney <[email protected]> and Josh Triplett <[email protected]>");
5757

58-
/* Bits for ->extendables field, extendables param, and related definitions. */
59-
#define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */
60-
#define RCUTORTURE_RDR_MASK_1 (0xff << RCUTORTURE_RDR_SHIFT_1)
61-
#define RCUTORTURE_RDR_SHIFT_2 16 /* Put SRCU index in upper bits. */
62-
#define RCUTORTURE_RDR_MASK_2 (0xff << RCUTORTURE_RDR_SHIFT_2)
63-
#define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
64-
#define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
65-
#define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
66-
#define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
67-
#define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
68-
#define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */
69-
#define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */
70-
#define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */
71-
#define RCUTORTURE_MAX_EXTEND \
58+
// Bits for ->extendables field, extendables param, and related definitions.
59+
#define RCUTORTURE_RDR_SHIFT_1 8 // Put SRCU index in upper bits.
60+
#define RCUTORTURE_RDR_MASK_1 (0xff << RCUTORTURE_RDR_SHIFT_1)
61+
#define RCUTORTURE_RDR_SHIFT_2 16 // Put SRCU index in upper bits.
62+
#define RCUTORTURE_RDR_MASK_2 (0xff << RCUTORTURE_RDR_SHIFT_2)
63+
#define RCUTORTURE_RDR_BH 0x01 // Extend readers by disabling bh.
64+
#define RCUTORTURE_RDR_IRQ 0x02 // ... disabling interrupts.
65+
#define RCUTORTURE_RDR_PREEMPT 0x04 // ... disabling preemption.
66+
#define RCUTORTURE_RDR_RBH 0x08 // ... rcu_read_lock_bh().
67+
#define RCUTORTURE_RDR_SCHED 0x10 // ... rcu_read_lock_sched().
68+
#define RCUTORTURE_RDR_RCU_1 0x20 // ... entering another RCU reader.
69+
#define RCUTORTURE_RDR_RCU_2 0x40 // ... entering another RCU reader.
70+
#define RCUTORTURE_RDR_UPDOWN 0x80 // ... up-read from task, down-read from timer.
71+
// Note: Manual start, automatic end.
72+
#define RCUTORTURE_RDR_NBITS 8 // Number of bits defined above.
73+
#define RCUTORTURE_MAX_EXTEND \
7274
(RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
73-
RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
75+
RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) // Intentionally omit RCUTORTURE_RDR_UPDOWN.
7476
#define RCUTORTURE_RDR_ALLBITS \
7577
(RCUTORTURE_MAX_EXTEND | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2 | \
7678
RCUTORTURE_RDR_MASK_1 | RCUTORTURE_RDR_MASK_2)
@@ -110,6 +112,7 @@ torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
110112
torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
111113
torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
112114
torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing");
115+
torture_param(int, n_up_down, 32, "# of concurrent up/down hrtimer-based RCU readers");
113116
torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
114117
torture_param(int, nreaders, -1, "Number of RCU reader threads");
115118
torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing");
@@ -156,6 +159,7 @@ static int nrealfakewriters;
156159
static struct task_struct *writer_task;
157160
static struct task_struct **fakewriter_tasks;
158161
static struct task_struct **reader_tasks;
162+
static struct task_struct *updown_task;
159163
static struct task_struct **nocb_tasks;
160164
static struct task_struct *stats_task;
161165
static struct task_struct *fqs_task;
@@ -378,6 +382,8 @@ struct rcu_torture_ops {
378382
void (*readunlock)(int idx);
379383
int (*readlock_held)(void); // lockdep.
380384
int (*readlock_nesting)(void); // actual nesting, if available, -1 if not.
385+
int (*down_read)(void);
386+
void (*up_read)(int idx);
381387
unsigned long (*get_gp_seq)(void);
382388
unsigned long (*gp_diff)(unsigned long new, unsigned long old);
383389
void (*deferred_free)(struct rcu_torture *p);
@@ -427,6 +433,7 @@ struct rcu_torture_ops {
427433
int no_pi_lock;
428434
int debug_objects;
429435
int start_poll_irqsoff;
436+
int have_up_down;
430437
const char *name;
431438
};
432439

@@ -762,6 +769,50 @@ static int torture_srcu_read_lock_held(void)
762769
return srcu_read_lock_held(srcu_ctlp);
763770
}
764771

772+
static bool srcu_torture_have_up_down(void)
773+
{
774+
int rf = reader_flavor;
775+
776+
if (!rf)
777+
rf = SRCU_READ_FLAVOR_NORMAL;
778+
return !!(cur_ops->have_up_down & rf);
779+
}
780+
781+
static int srcu_torture_down_read(void)
782+
{
783+
int idx;
784+
struct srcu_ctr __percpu *scp;
785+
786+
WARN_ON_ONCE(reader_flavor & ~SRCU_READ_FLAVOR_ALL);
787+
WARN_ON_ONCE(reader_flavor & (reader_flavor - 1));
788+
789+
if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) {
790+
idx = srcu_down_read(srcu_ctlp);
791+
WARN_ON_ONCE(idx & ~0x1);
792+
return idx;
793+
}
794+
if (reader_flavor & SRCU_READ_FLAVOR_FAST) {
795+
scp = srcu_down_read_fast(srcu_ctlp);
796+
idx = __srcu_ptr_to_ctr(srcu_ctlp, scp);
797+
WARN_ON_ONCE(idx & ~0x1);
798+
return idx << 3;
799+
}
800+
WARN_ON_ONCE(1);
801+
return 0;
802+
}
803+
804+
static void srcu_torture_up_read(int idx)
805+
{
806+
WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1)));
807+
if (reader_flavor & SRCU_READ_FLAVOR_FAST)
808+
srcu_up_read_fast(srcu_ctlp, __srcu_ctr_to_ptr(srcu_ctlp, (idx & 0x8) >> 3));
809+
else if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) ||
810+
!(reader_flavor & SRCU_READ_FLAVOR_ALL))
811+
srcu_up_read(srcu_ctlp, idx & 0x1);
812+
else
813+
WARN_ON_ONCE(1);
814+
}
815+
765816
static unsigned long srcu_torture_completed(void)
766817
{
767818
return srcu_batches_completed(srcu_ctlp);
@@ -819,6 +870,8 @@ static struct rcu_torture_ops srcu_ops = {
819870
.readlock = srcu_torture_read_lock,
820871
.read_delay = srcu_read_delay,
821872
.readunlock = srcu_torture_read_unlock,
873+
.down_read = srcu_torture_down_read,
874+
.up_read = srcu_torture_up_read,
822875
.readlock_held = torture_srcu_read_lock_held,
823876
.get_gp_seq = srcu_torture_completed,
824877
.gp_diff = rcu_seq_diff,
@@ -839,6 +892,8 @@ static struct rcu_torture_ops srcu_ops = {
839892
.irq_capable = 1,
840893
.no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
841894
.debug_objects = 1,
895+
.have_up_down = IS_ENABLED(CONFIG_TINY_SRCU)
896+
? 0 : SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_FAST,
842897
.name = "srcu"
843898
};
844899

@@ -864,6 +919,8 @@ static struct rcu_torture_ops srcud_ops = {
864919
.read_delay = srcu_read_delay,
865920
.readunlock = srcu_torture_read_unlock,
866921
.readlock_held = torture_srcu_read_lock_held,
922+
.down_read = srcu_torture_down_read,
923+
.up_read = srcu_torture_up_read,
867924
.get_gp_seq = srcu_torture_completed,
868925
.gp_diff = rcu_seq_diff,
869926
.deferred_free = srcu_torture_deferred_free,
@@ -883,6 +940,8 @@ static struct rcu_torture_ops srcud_ops = {
883940
.irq_capable = 1,
884941
.no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
885942
.debug_objects = 1,
943+
.have_up_down = IS_ENABLED(CONFIG_TINY_SRCU)
944+
? 0 : SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_FAST,
886945
.name = "srcud"
887946
};
888947

@@ -1994,7 +2053,7 @@ static void rcutorture_one_extend(int *readstate, int newstate, bool insoftirq,
19942053

19952054
first = idxold1 == 0;
19962055
WARN_ON_ONCE(idxold2 < 0);
1997-
WARN_ON_ONCE(idxold2 & ~RCUTORTURE_RDR_ALLBITS);
2056+
WARN_ON_ONCE(idxold2 & ~(RCUTORTURE_RDR_ALLBITS | RCUTORTURE_RDR_UPDOWN));
19982057
rcutorture_one_extend_check("before change", idxold1, statesnew, statesold, insoftirq);
19992058
rtrsp->rt_readstate = newstate;
20002059

@@ -2070,6 +2129,11 @@ static void rcutorture_one_extend(int *readstate, int newstate, bool insoftirq,
20702129
if (lockit)
20712130
raw_spin_unlock_irqrestore(&current->pi_lock, flags);
20722131
}
2132+
if (statesold & RCUTORTURE_RDR_UPDOWN) {
2133+
cur_ops->up_read((idxold1 & RCUTORTURE_RDR_MASK_1) >> RCUTORTURE_RDR_SHIFT_1);
2134+
WARN_ON_ONCE(idxnew1 != -1);
2135+
idxold1 = 0;
2136+
}
20732137

20742138
/* Delay if neither beginning nor end and there was a change. */
20752139
if ((statesnew || statesold) && *readstate && newstate)
@@ -2210,7 +2274,8 @@ static bool rcu_torture_one_read_start(struct rcu_torture_one_read_state *rtorsp
22102274
rtorsp->started = cur_ops->get_gp_seq();
22112275
rtorsp->ts = rcu_trace_clock_local();
22122276
rtorsp->p = rcu_dereference_check(rcu_torture_current,
2213-
!cur_ops->readlock_held || cur_ops->readlock_held());
2277+
!cur_ops->readlock_held || cur_ops->readlock_held() ||
2278+
(rtorsp->readstate & RCUTORTURE_RDR_UPDOWN));
22142279
if (rtorsp->p == NULL) {
22152280
/* Wait for rcu_torture_writer to get underway */
22162281
rcutorture_one_extend(&rtorsp->readstate, 0, myid < 0, trsp, rtorsp->rtrsp);
@@ -2379,6 +2444,121 @@ rcu_torture_reader(void *arg)
23792444
return 0;
23802445
}
23812446

2447+
struct rcu_torture_one_read_state_updown {
2448+
struct hrtimer rtorsu_hrt;
2449+
bool rtorsu_inuse;
2450+
struct torture_random_state rtorsu_trs;
2451+
struct rcu_torture_one_read_state rtorsu_rtors;
2452+
};
2453+
2454+
static struct rcu_torture_one_read_state_updown *updownreaders;
2455+
static DEFINE_TORTURE_RANDOM(rcu_torture_updown_rand);
2456+
static int rcu_torture_updown(void *arg);
2457+
2458+
static enum hrtimer_restart rcu_torture_updown_hrt(struct hrtimer *hrtp)
2459+
{
2460+
struct rcu_torture_one_read_state_updown *rtorsup;
2461+
2462+
rtorsup = container_of(hrtp, struct rcu_torture_one_read_state_updown, rtorsu_hrt);
2463+
rcu_torture_one_read_end(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs, -1);
2464+
smp_store_release(&rtorsup->rtorsu_inuse, false);
2465+
return HRTIMER_NORESTART;
2466+
}
2467+
2468+
static int rcu_torture_updown_init(void)
2469+
{
2470+
int i;
2471+
struct torture_random_state *rand = &rcu_torture_updown_rand;
2472+
int ret;
2473+
2474+
if (n_up_down < 0)
2475+
return 0;
2476+
if (!srcu_torture_have_up_down()) {
2477+
VERBOSE_TOROUT_STRING("rcu_torture_updown_init: Disabling up/down reader tests due to lack of primitives");
2478+
return 0;
2479+
}
2480+
updownreaders = kcalloc(n_up_down, sizeof(*updownreaders), GFP_KERNEL);
2481+
if (!updownreaders) {
2482+
VERBOSE_TOROUT_STRING("rcu_torture_updown_init: Out of memory, disabling up/down reader tests");
2483+
return -ENOMEM;
2484+
}
2485+
for (i = 0; i < n_up_down; i++) {
2486+
init_rcu_torture_one_read_state(&updownreaders[i].rtorsu_rtors, rand);
2487+
hrtimer_setup(&updownreaders[i].rtorsu_hrt, rcu_torture_updown_hrt, CLOCK_MONOTONIC,
2488+
HRTIMER_MODE_REL | HRTIMER_MODE_SOFT);
2489+
torture_random_init(&updownreaders[i].rtorsu_trs);
2490+
init_rcu_torture_one_read_state(&updownreaders[i].rtorsu_rtors,
2491+
&updownreaders[i].rtorsu_trs);
2492+
}
2493+
ret = torture_create_kthread(rcu_torture_updown, rand, updown_task);
2494+
if (ret) {
2495+
kfree(updownreaders);
2496+
updownreaders = NULL;
2497+
}
2498+
return ret;
2499+
}
2500+
2501+
static void rcu_torture_updown_cleanup(void)
2502+
{
2503+
struct rcu_torture_one_read_state_updown *rtorsup;
2504+
2505+
for (rtorsup = updownreaders; rtorsup < &updownreaders[n_up_down]; rtorsup++) {
2506+
if (!smp_load_acquire(&rtorsup->rtorsu_inuse))
2507+
continue;
2508+
if (!hrtimer_cancel(&rtorsup->rtorsu_hrt))
2509+
WARN_ON_ONCE(rtorsup->rtorsu_inuse);
2510+
2511+
}
2512+
kfree(updownreaders);
2513+
updownreaders = NULL;
2514+
}
2515+
2516+
/*
2517+
* RCU torture up/down reader kthread, starting RCU readers in kthread
2518+
* context and ending them in hrtimer handlers. Otherwise similar to
2519+
* rcu_torture_reader().
2520+
*/
2521+
static int
2522+
rcu_torture_updown(void *arg)
2523+
{
2524+
int idx;
2525+
int rawidx;
2526+
struct rcu_torture_one_read_state_updown *rtorsup;
2527+
ktime_t t;
2528+
2529+
VERBOSE_TOROUT_STRING("rcu_torture_updown task started");
2530+
do {
2531+
for (rtorsup = updownreaders; rtorsup < &updownreaders[n_up_down]; rtorsup++) {
2532+
if (torture_must_stop())
2533+
break;
2534+
if (smp_load_acquire(&rtorsup->rtorsu_inuse))
2535+
continue;
2536+
init_rcu_torture_one_read_state(&rtorsup->rtorsu_rtors,
2537+
&rtorsup->rtorsu_trs);
2538+
rawidx = cur_ops->down_read();
2539+
idx = (rawidx << RCUTORTURE_RDR_SHIFT_1) & RCUTORTURE_RDR_MASK_1;
2540+
rtorsup->rtorsu_rtors.readstate = idx | RCUTORTURE_RDR_UPDOWN;
2541+
rtorsup->rtorsu_rtors.rtrsp++;
2542+
if (!rcu_torture_one_read_start(&rtorsup->rtorsu_rtors,
2543+
&rtorsup->rtorsu_trs, -1)) {
2544+
schedule_timeout_idle(HZ);
2545+
continue;
2546+
}
2547+
smp_store_release(&rtorsup->rtorsu_inuse, true);
2548+
t = torture_random(&rtorsup->rtorsu_trs) & 0xfffff; // One per million.
2549+
if (t < 10 * 1000)
2550+
t = 200 * 1000 * 1000;
2551+
hrtimer_start(&rtorsup->rtorsu_hrt, t,
2552+
HRTIMER_MODE_REL | HRTIMER_MODE_SOFT);
2553+
}
2554+
torture_hrtimeout_ms(1, 1000, &rcu_torture_updown_rand);
2555+
stutter_wait("rcu_torture_updown");
2556+
} while (!torture_must_stop());
2557+
rcu_torture_updown_cleanup();
2558+
torture_kthread_stopping("rcu_torture_updown");
2559+
return 0;
2560+
}
2561+
23822562
/*
23832563
* Randomly Toggle CPUs' callback-offload state. This uses hrtimers to
23842564
* increase race probabilities and fuzzes the interval between toggling.
@@ -2633,7 +2813,7 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
26332813
"reader_flavor=%x "
26342814
"nocbs_nthreads=%d nocbs_toggle=%d "
26352815
"test_nmis=%d "
2636-
"preempt_duration=%d preempt_interval=%d\n",
2816+
"preempt_duration=%d preempt_interval=%d n_up_down=%d\n",
26372817
torture_type, tag, nrealreaders, nrealfakewriters,
26382818
stat_interval, verbose, test_no_idle_hz, shuffle_interval,
26392819
stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
@@ -2647,7 +2827,7 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
26472827
reader_flavor,
26482828
nocbs_nthreads, nocbs_toggle,
26492829
test_nmis,
2650-
preempt_duration, preempt_interval);
2830+
preempt_duration, preempt_interval, n_up_down);
26512831
}
26522832

26532833
static int rcutorture_booster_cleanup(unsigned int cpu)
@@ -3750,6 +3930,10 @@ rcu_torture_cleanup(void)
37503930
nocb_tasks = NULL;
37513931
}
37523932

3933+
if (updown_task) {
3934+
torture_stop_kthread(rcu_torture_updown, updown_task);
3935+
updown_task = NULL;
3936+
}
37533937
if (reader_tasks) {
37543938
for (i = 0; i < nrealreaders; i++)
37553939
torture_stop_kthread(rcu_torture_reader,
@@ -4284,6 +4468,9 @@ rcu_torture_init(void)
42844468
if (torture_init_error(firsterr))
42854469
goto unwind;
42864470

4471+
firsterr = rcu_torture_updown_init();
4472+
if (torture_init_error(firsterr))
4473+
goto unwind;
42874474
nrealnocbers = nocbs_nthreads;
42884475
if (WARN_ON(nrealnocbers < 0))
42894476
nrealnocbers = 1;

0 commit comments

Comments
 (0)