@@ -651,6 +651,12 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
651
651
*/
652
652
if (rdp -> dynticks_nmi_nesting != 1 ) {
653
653
trace_rcu_dyntick (TPS ("--=" ), rdp -> dynticks_nmi_nesting , rdp -> dynticks_nmi_nesting - 2 , rdp -> dynticks );
654
+ if (tick_nohz_full_cpu (rdp -> cpu ) &&
655
+ rdp -> dynticks_nmi_nesting == 2 &&
656
+ rdp -> rcu_urgent_qs && !rdp -> rcu_forced_tick ) {
657
+ rdp -> rcu_forced_tick = true;
658
+ tick_dep_set_cpu (rdp -> cpu , TICK_DEP_MASK_RCU );
659
+ }
654
660
WRITE_ONCE (rdp -> dynticks_nmi_nesting , /* No store tearing. */
655
661
rdp -> dynticks_nmi_nesting - 2 );
656
662
return ;
@@ -886,6 +892,18 @@ void rcu_irq_enter_irqson(void)
886
892
local_irq_restore (flags );
887
893
}
888
894
895
+ /*
896
+ * If the scheduler-clock interrupt was enabled on a nohz_full CPU
897
+ * in order to get to a quiescent state, disable it.
898
+ */
899
+ void rcu_disable_tick_upon_qs (struct rcu_data * rdp )
900
+ {
901
+ if (tick_nohz_full_cpu (rdp -> cpu ) && rdp -> rcu_forced_tick ) {
902
+ tick_dep_clear_cpu (rdp -> cpu , TICK_DEP_BIT_RCU );
903
+ rdp -> rcu_forced_tick = false;
904
+ }
905
+ }
906
+
889
907
/**
890
908
* rcu_is_watching - see if RCU thinks that the current CPU is not idle
891
909
*
@@ -1980,6 +1998,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
1980
1998
if (!offloaded )
1981
1999
needwake = rcu_accelerate_cbs (rnp , rdp );
1982
2000
2001
+ rcu_disable_tick_upon_qs (rdp );
1983
2002
rcu_report_qs_rnp (mask , rnp , rnp -> gp_seq , flags );
1984
2003
/* ^^^ Released rnp->lock */
1985
2004
if (needwake )
@@ -2265,6 +2284,7 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2265
2284
int cpu ;
2266
2285
unsigned long flags ;
2267
2286
unsigned long mask ;
2287
+ struct rcu_data * rdp ;
2268
2288
struct rcu_node * rnp ;
2269
2289
2270
2290
rcu_for_each_leaf_node (rnp ) {
@@ -2289,8 +2309,11 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2289
2309
for_each_leaf_node_possible_cpu (rnp , cpu ) {
2290
2310
unsigned long bit = leaf_node_cpu_bit (rnp , cpu );
2291
2311
if ((rnp -> qsmask & bit ) != 0 ) {
2292
- if (f (per_cpu_ptr (& rcu_data , cpu )))
2312
+ rdp = per_cpu_ptr (& rcu_data , cpu );
2313
+ if (f (rdp )) {
2293
2314
mask |= bit ;
2315
+ rcu_disable_tick_upon_qs (rdp );
2316
+ }
2294
2317
}
2295
2318
}
2296
2319
if (mask != 0 ) {
@@ -2318,7 +2341,7 @@ void rcu_force_quiescent_state(void)
2318
2341
rnp = __this_cpu_read (rcu_data .mynode );
2319
2342
for (; rnp != NULL ; rnp = rnp -> parent ) {
2320
2343
ret = (READ_ONCE (rcu_state .gp_flags ) & RCU_GP_FLAG_FQS ) ||
2321
- !raw_spin_trylock (& rnp -> fqslock );
2344
+ !raw_spin_trylock (& rnp -> fqslock );
2322
2345
if (rnp_old != NULL )
2323
2346
raw_spin_unlock (& rnp_old -> fqslock );
2324
2347
if (ret )
@@ -2851,7 +2874,7 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
2851
2874
{
2852
2875
if (atomic_dec_and_test (& rcu_state .barrier_cpu_count )) {
2853
2876
rcu_barrier_trace (TPS ("LastCB" ), -1 ,
2854
- rcu_state .barrier_sequence );
2877
+ rcu_state .barrier_sequence );
2855
2878
complete (& rcu_state .barrier_completion );
2856
2879
} else {
2857
2880
rcu_barrier_trace (TPS ("CB" ), -1 , rcu_state .barrier_sequence );
@@ -2875,7 +2898,7 @@ static void rcu_barrier_func(void *unused)
2875
2898
} else {
2876
2899
debug_rcu_head_unqueue (& rdp -> barrier_head );
2877
2900
rcu_barrier_trace (TPS ("IRQNQ" ), -1 ,
2878
- rcu_state .barrier_sequence );
2901
+ rcu_state .barrier_sequence );
2879
2902
}
2880
2903
rcu_nocb_unlock (rdp );
2881
2904
}
@@ -2902,7 +2925,7 @@ void rcu_barrier(void)
2902
2925
/* Did someone else do our work for us? */
2903
2926
if (rcu_seq_done (& rcu_state .barrier_sequence , s )) {
2904
2927
rcu_barrier_trace (TPS ("EarlyExit" ), -1 ,
2905
- rcu_state .barrier_sequence );
2928
+ rcu_state .barrier_sequence );
2906
2929
smp_mb (); /* caller's subsequent code after above check. */
2907
2930
mutex_unlock (& rcu_state .barrier_mutex );
2908
2931
return ;
@@ -2934,11 +2957,11 @@ void rcu_barrier(void)
2934
2957
continue ;
2935
2958
if (rcu_segcblist_n_cbs (& rdp -> cblist )) {
2936
2959
rcu_barrier_trace (TPS ("OnlineQ" ), cpu ,
2937
- rcu_state .barrier_sequence );
2960
+ rcu_state .barrier_sequence );
2938
2961
smp_call_function_single (cpu , rcu_barrier_func , NULL , 1 );
2939
2962
} else {
2940
2963
rcu_barrier_trace (TPS ("OnlineNQ" ), cpu ,
2941
- rcu_state .barrier_sequence );
2964
+ rcu_state .barrier_sequence );
2942
2965
}
2943
2966
}
2944
2967
put_online_cpus ();
@@ -3160,6 +3183,7 @@ void rcu_cpu_starting(unsigned int cpu)
3160
3183
rdp -> rcu_onl_gp_seq = READ_ONCE (rcu_state .gp_seq );
3161
3184
rdp -> rcu_onl_gp_flags = READ_ONCE (rcu_state .gp_flags );
3162
3185
if (rnp -> qsmask & mask ) { /* RCU waiting on incoming CPU? */
3186
+ rcu_disable_tick_upon_qs (rdp );
3163
3187
/* Report QS -after- changing ->qsmaskinitnext! */
3164
3188
rcu_report_qs_rnp (mask , rnp , rnp -> gp_seq , flags );
3165
3189
} else {
0 commit comments