Skip to content

Commit 66e4c33

Browse files
committed
rcu: Force tick on for nohz_full CPUs not reaching quiescent states
CPUs running for long time periods in the kernel in nohz_full mode might leave the scheduling-clock interrupt disabled for then full duration of their in-kernel execution. This can (among other things) delay grace periods. This commit therefore forces the tick back on for any nohz_full CPU that is failing to pass through a quiescent state upon return from interrupt, which the resched_cpu() will induce. Reported-by: Joel Fernandes <[email protected]> [ paulmck: Clear ->rcu_forced_tick as reported by Joel Fernandes testing. ] [ paulmck: Apply Joel Fernandes TICK_DEP_MASK_RCU->TICK_DEP_BIT_RCU fix. ] Signed-off-by: Paul E. McKenney <[email protected]>
1 parent 79ba7ff commit 66e4c33

File tree

2 files changed

+32
-7
lines changed

2 files changed

+32
-7
lines changed

kernel/rcu/tree.c

Lines changed: 31 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -651,6 +651,12 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
651651
*/
652652
if (rdp->dynticks_nmi_nesting != 1) {
653653
trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdp->dynticks);
654+
if (tick_nohz_full_cpu(rdp->cpu) &&
655+
rdp->dynticks_nmi_nesting == 2 &&
656+
rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
657+
rdp->rcu_forced_tick = true;
658+
tick_dep_set_cpu(rdp->cpu, TICK_DEP_MASK_RCU);
659+
}
654660
WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
655661
rdp->dynticks_nmi_nesting - 2);
656662
return;
@@ -886,6 +892,18 @@ void rcu_irq_enter_irqson(void)
886892
local_irq_restore(flags);
887893
}
888894

895+
/*
896+
* If the scheduler-clock interrupt was enabled on a nohz_full CPU
897+
* in order to get to a quiescent state, disable it.
898+
*/
899+
void rcu_disable_tick_upon_qs(struct rcu_data *rdp)
900+
{
901+
if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
902+
tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
903+
rdp->rcu_forced_tick = false;
904+
}
905+
}
906+
889907
/**
890908
* rcu_is_watching - see if RCU thinks that the current CPU is not idle
891909
*
@@ -1980,6 +1998,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
19801998
if (!offloaded)
19811999
needwake = rcu_accelerate_cbs(rnp, rdp);
19822000

2001+
rcu_disable_tick_upon_qs(rdp);
19832002
rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
19842003
/* ^^^ Released rnp->lock */
19852004
if (needwake)
@@ -2265,6 +2284,7 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
22652284
int cpu;
22662285
unsigned long flags;
22672286
unsigned long mask;
2287+
struct rcu_data *rdp;
22682288
struct rcu_node *rnp;
22692289

22702290
rcu_for_each_leaf_node(rnp) {
@@ -2289,8 +2309,11 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
22892309
for_each_leaf_node_possible_cpu(rnp, cpu) {
22902310
unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
22912311
if ((rnp->qsmask & bit) != 0) {
2292-
if (f(per_cpu_ptr(&rcu_data, cpu)))
2312+
rdp = per_cpu_ptr(&rcu_data, cpu);
2313+
if (f(rdp)) {
22932314
mask |= bit;
2315+
rcu_disable_tick_upon_qs(rdp);
2316+
}
22942317
}
22952318
}
22962319
if (mask != 0) {
@@ -2318,7 +2341,7 @@ void rcu_force_quiescent_state(void)
23182341
rnp = __this_cpu_read(rcu_data.mynode);
23192342
for (; rnp != NULL; rnp = rnp->parent) {
23202343
ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2321-
!raw_spin_trylock(&rnp->fqslock);
2344+
!raw_spin_trylock(&rnp->fqslock);
23222345
if (rnp_old != NULL)
23232346
raw_spin_unlock(&rnp_old->fqslock);
23242347
if (ret)
@@ -2851,7 +2874,7 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
28512874
{
28522875
if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
28532876
rcu_barrier_trace(TPS("LastCB"), -1,
2854-
rcu_state.barrier_sequence);
2877+
rcu_state.barrier_sequence);
28552878
complete(&rcu_state.barrier_completion);
28562879
} else {
28572880
rcu_barrier_trace(TPS("CB"), -1, rcu_state.barrier_sequence);
@@ -2875,7 +2898,7 @@ static void rcu_barrier_func(void *unused)
28752898
} else {
28762899
debug_rcu_head_unqueue(&rdp->barrier_head);
28772900
rcu_barrier_trace(TPS("IRQNQ"), -1,
2878-
rcu_state.barrier_sequence);
2901+
rcu_state.barrier_sequence);
28792902
}
28802903
rcu_nocb_unlock(rdp);
28812904
}
@@ -2902,7 +2925,7 @@ void rcu_barrier(void)
29022925
/* Did someone else do our work for us? */
29032926
if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
29042927
rcu_barrier_trace(TPS("EarlyExit"), -1,
2905-
rcu_state.barrier_sequence);
2928+
rcu_state.barrier_sequence);
29062929
smp_mb(); /* caller's subsequent code after above check. */
29072930
mutex_unlock(&rcu_state.barrier_mutex);
29082931
return;
@@ -2934,11 +2957,11 @@ void rcu_barrier(void)
29342957
continue;
29352958
if (rcu_segcblist_n_cbs(&rdp->cblist)) {
29362959
rcu_barrier_trace(TPS("OnlineQ"), cpu,
2937-
rcu_state.barrier_sequence);
2960+
rcu_state.barrier_sequence);
29382961
smp_call_function_single(cpu, rcu_barrier_func, NULL, 1);
29392962
} else {
29402963
rcu_barrier_trace(TPS("OnlineNQ"), cpu,
2941-
rcu_state.barrier_sequence);
2964+
rcu_state.barrier_sequence);
29422965
}
29432966
}
29442967
put_online_cpus();
@@ -3160,6 +3183,7 @@ void rcu_cpu_starting(unsigned int cpu)
31603183
rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
31613184
rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
31623185
if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
3186+
rcu_disable_tick_upon_qs(rdp);
31633187
/* Report QS -after- changing ->qsmaskinitnext! */
31643188
rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
31653189
} else {

kernel/rcu/tree.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -181,6 +181,7 @@ struct rcu_data {
181181
atomic_t dynticks; /* Even value for idle, else odd. */
182182
bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
183183
bool rcu_urgent_qs; /* GP old need light quiescent state. */
184+
bool rcu_forced_tick; /* Forced tick to provide QS. */
184185
#ifdef CONFIG_RCU_FAST_NO_HZ
185186
bool all_lazy; /* All CPU's CBs lazy at idle start? */
186187
unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */

0 commit comments

Comments
 (0)