@@ -471,7 +471,7 @@ rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
471
471
!(torture_random (rrsp ) % (nrealreaders * 2000 * longdelay_ms ))) {
472
472
started = cur_ops -> get_gp_seq ();
473
473
ts = rcu_trace_clock_local ();
474
- if (preempt_count () & ( SOFTIRQ_MASK | HARDIRQ_MASK ))
474
+ if (( preempt_count () & HARDIRQ_MASK ) || softirq_count ( ))
475
475
longdelay_ms = 5 ; /* Avoid triggering BH limits. */
476
476
mdelay (longdelay_ms );
477
477
rtrsp -> rt_delay_ms = longdelay_ms ;
@@ -2001,7 +2001,7 @@ static void rcutorture_one_extend_check(char *s, int curstate, int new, int old)
2001
2001
return ;
2002
2002
2003
2003
WARN_ONCE ((curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH )) &&
2004
- !( preempt_count () & SOFTIRQ_MASK ), ROEC_ARGS );
2004
+ !softirq_count ( ), ROEC_ARGS );
2005
2005
WARN_ONCE ((curstate & (RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED )) &&
2006
2006
!(preempt_count () & PREEMPT_MASK ), ROEC_ARGS );
2007
2007
WARN_ONCE (cur_ops -> readlock_nesting &&
@@ -2015,7 +2015,7 @@ static void rcutorture_one_extend_check(char *s, int curstate, int new, int old)
2015
2015
2016
2016
WARN_ONCE (cur_ops -> extendables &&
2017
2017
!(curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH )) &&
2018
- ( preempt_count () & SOFTIRQ_MASK ), ROEC_ARGS );
2018
+ softirq_count ( ), ROEC_ARGS );
2019
2019
2020
2020
/*
2021
2021
* non-preemptible RCU in a preemptible kernel uses preempt_disable()
@@ -2036,6 +2036,9 @@ static void rcutorture_one_extend_check(char *s, int curstate, int new, int old)
2036
2036
if (!IS_ENABLED (CONFIG_PREEMPT_RCU ))
2037
2037
mask |= RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED ;
2038
2038
2039
+ if (IS_ENABLED (CONFIG_PREEMPT_RT ) && softirq_count ())
2040
+ mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH ;
2041
+
2039
2042
WARN_ONCE (cur_ops -> readlock_nesting && !(curstate & mask ) &&
2040
2043
cur_ops -> readlock_nesting () > 0 , ROEC_ARGS );
2041
2044
}
0 commit comments