Skip to content

Commit 72ed29f

Browse files
paulmckrcuNeeraj Upadhyay
authored andcommitted
rcutorture: Generic test for NUM_ACTIVE_*RCU_POLL*
The rcutorture test suite has specific tests for both of the NUM_ACTIVE_RCU_POLL_OLDSTATE and NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE macros provided for RCU polled grace periods. However, with the advent of NUM_ACTIVE_SRCU_POLL_OLDSTATE, a more generic test is needed. This commit therefore adds ->poll_active and ->poll_active_full fields to the rcu_torture_ops structure and converts the existing specific tests to use these fields, when present. Signed-off-by: Paul E. McKenney <[email protected]> Signed-off-by: Neeraj Upadhyay <[email protected]>
1 parent 1bc5bb9 commit 72ed29f

File tree

1 file changed

+28
-8
lines changed

1 file changed

+28
-8
lines changed

kernel/rcu/rcutorture.c

Lines changed: 28 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -373,6 +373,8 @@ struct rcu_torture_ops {
373373
bool (*poll_need_2gp)(bool poll, bool poll_full);
374374
void (*cond_sync)(unsigned long oldstate);
375375
void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp);
376+
int poll_active;
377+
int poll_active_full;
376378
call_rcu_func_t call;
377379
void (*cb_barrier)(void);
378380
void (*fqs)(void);
@@ -558,6 +560,8 @@ static struct rcu_torture_ops rcu_ops = {
558560
.poll_need_2gp = rcu_poll_need_2gp,
559561
.cond_sync = cond_synchronize_rcu,
560562
.cond_sync_full = cond_synchronize_rcu_full,
563+
.poll_active = NUM_ACTIVE_RCU_POLL_OLDSTATE,
564+
.poll_active_full = NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE,
561565
.get_gp_state_exp = get_state_synchronize_rcu,
562566
.start_gp_poll_exp = start_poll_synchronize_rcu_expedited,
563567
.start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full,
@@ -741,6 +745,7 @@ static struct rcu_torture_ops srcu_ops = {
741745
.get_gp_state = srcu_torture_get_gp_state,
742746
.start_gp_poll = srcu_torture_start_gp_poll,
743747
.poll_gp_state = srcu_torture_poll_gp_state,
748+
.poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE,
744749
.call = srcu_torture_call,
745750
.cb_barrier = srcu_torture_barrier,
746751
.stats = srcu_torture_stats,
@@ -783,6 +788,7 @@ static struct rcu_torture_ops srcud_ops = {
783788
.get_gp_state = srcu_torture_get_gp_state,
784789
.start_gp_poll = srcu_torture_start_gp_poll,
785790
.poll_gp_state = srcu_torture_poll_gp_state,
791+
.poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE,
786792
.call = srcu_torture_call,
787793
.cb_barrier = srcu_torture_barrier,
788794
.stats = srcu_torture_stats,
@@ -1374,13 +1380,15 @@ rcu_torture_writer(void *arg)
13741380
int i;
13751381
int idx;
13761382
int oldnice = task_nice(current);
1377-
struct rcu_gp_oldstate rgo[NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE];
1383+
struct rcu_gp_oldstate *rgo = NULL;
1384+
int rgo_size = 0;
13781385
struct rcu_torture *rp;
13791386
struct rcu_torture *old_rp;
13801387
static DEFINE_TORTURE_RANDOM(rand);
13811388
unsigned long stallsdone = jiffies;
13821389
bool stutter_waited;
1383-
unsigned long ulo[NUM_ACTIVE_RCU_POLL_OLDSTATE];
1390+
unsigned long *ulo = NULL;
1391+
int ulo_size = 0;
13841392

13851393
// If a new stall test is added, this must be adjusted.
13861394
if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu)
@@ -1401,6 +1409,16 @@ rcu_torture_writer(void *arg)
14011409
torture_kthread_stopping("rcu_torture_writer");
14021410
return 0;
14031411
}
1412+
if (cur_ops->poll_active > 0) {
1413+
ulo = kzalloc(cur_ops->poll_active * sizeof(ulo[0]), GFP_KERNEL);
1414+
if (!WARN_ON(!ulo))
1415+
ulo_size = cur_ops->poll_active;
1416+
}
1417+
if (cur_ops->poll_active_full > 0) {
1418+
rgo = kzalloc(cur_ops->poll_active_full * sizeof(rgo[0]), GFP_KERNEL);
1419+
if (!WARN_ON(!rgo))
1420+
rgo_size = cur_ops->poll_active_full;
1421+
}
14041422

14051423
do {
14061424
rcu_torture_writer_state = RTWS_FIXED_DELAY;
@@ -1502,40 +1520,40 @@ rcu_torture_writer(void *arg)
15021520
break;
15031521
case RTWS_POLL_GET:
15041522
rcu_torture_writer_state = RTWS_POLL_GET;
1505-
for (i = 0; i < ARRAY_SIZE(ulo); i++)
1523+
for (i = 0; i < ulo_size; i++)
15061524
ulo[i] = cur_ops->get_comp_state();
15071525
gp_snap = cur_ops->start_gp_poll();
15081526
rcu_torture_writer_state = RTWS_POLL_WAIT;
15091527
while (!cur_ops->poll_gp_state(gp_snap)) {
15101528
gp_snap1 = cur_ops->get_gp_state();
1511-
for (i = 0; i < ARRAY_SIZE(ulo); i++)
1529+
for (i = 0; i < ulo_size; i++)
15121530
if (cur_ops->poll_gp_state(ulo[i]) ||
15131531
cur_ops->same_gp_state(ulo[i], gp_snap1)) {
15141532
ulo[i] = gp_snap1;
15151533
break;
15161534
}
1517-
WARN_ON_ONCE(i >= ARRAY_SIZE(ulo));
1535+
WARN_ON_ONCE(ulo_size > 0 && i >= ulo_size);
15181536
torture_hrtimeout_jiffies(torture_random(&rand) % 16,
15191537
&rand);
15201538
}
15211539
rcu_torture_pipe_update(old_rp);
15221540
break;
15231541
case RTWS_POLL_GET_FULL:
15241542
rcu_torture_writer_state = RTWS_POLL_GET_FULL;
1525-
for (i = 0; i < ARRAY_SIZE(rgo); i++)
1543+
for (i = 0; i < rgo_size; i++)
15261544
cur_ops->get_comp_state_full(&rgo[i]);
15271545
cur_ops->start_gp_poll_full(&gp_snap_full);
15281546
rcu_torture_writer_state = RTWS_POLL_WAIT_FULL;
15291547
while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
15301548
cur_ops->get_gp_state_full(&gp_snap1_full);
1531-
for (i = 0; i < ARRAY_SIZE(rgo); i++)
1549+
for (i = 0; i < rgo_size; i++)
15321550
if (cur_ops->poll_gp_state_full(&rgo[i]) ||
15331551
cur_ops->same_gp_state_full(&rgo[i],
15341552
&gp_snap1_full)) {
15351553
rgo[i] = gp_snap1_full;
15361554
break;
15371555
}
1538-
WARN_ON_ONCE(i >= ARRAY_SIZE(rgo));
1556+
WARN_ON_ONCE(rgo_size > 0 && i >= rgo_size);
15391557
torture_hrtimeout_jiffies(torture_random(&rand) % 16,
15401558
&rand);
15411559
}
@@ -1617,6 +1635,8 @@ rcu_torture_writer(void *arg)
16171635
pr_alert("%s" TORTURE_FLAG
16181636
" Dynamic grace-period expediting was disabled.\n",
16191637
torture_type);
1638+
kfree(ulo);
1639+
kfree(rgo);
16201640
rcu_torture_writer_state = RTWS_STOPPING;
16211641
torture_kthread_stopping("rcu_torture_writer");
16221642
return 0;

0 commit comments

Comments
 (0)