Skip to content

Commit 127e298

Browse files
committed
rcu: Make rcu_barrier() account for offline no-CBs CPUs
Currently, rcu_barrier() ignores offline CPUs, However, it is possible for an offline no-CBs CPU to have callbacks queued, and rcu_barrier() must wait for those callbacks. This commit therefore makes rcu_barrier() directly invoke the rcu_barrier_func() with interrupts disabled for such CPUs. This requires passing the CPU number into this function so that it can entrain the rcu_barrier() callback onto the correct CPU's callback list, given that the code must instead execute on the current CPU. While in the area, this commit fixes a bug where the first CPU's callback might have been invoked before rcu_segcblist_entrain() returned, which would also result in an early wakeup. Fixes: 5d6742b ("rcu/nocb: Use rcu_segcblist for no-CBs CPUs") Signed-off-by: Paul E. McKenney <[email protected]> [ paulmck: Apply optimization feedback from Boqun Feng. ] Cc: <[email protected]> # 5.5.x
1 parent 0f11ad3 commit 127e298

File tree

2 files changed

+25
-12
lines changed

2 files changed

+25
-12
lines changed

include/trace/events/rcu.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -712,6 +712,7 @@ TRACE_EVENT_RCU(rcu_torture_read,
712712
* "Begin": rcu_barrier() started.
713713
* "EarlyExit": rcu_barrier() piggybacked, thus early exit.
714714
* "Inc1": rcu_barrier() piggyback check counter incremented.
715+
* "OfflineNoCBQ": rcu_barrier() found offline no-CBs CPU with callbacks.
715716
* "OnlineQ": rcu_barrier() found online CPU with callbacks.
716717
* "OnlineNQ": rcu_barrier() found online CPU, no callbacks.
717718
* "IRQ": An rcu_barrier_callback() callback posted on remote CPU.

kernel/rcu/tree.c

Lines changed: 24 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -3097,9 +3097,10 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
30973097
/*
30983098
* Called with preemption disabled, and from cross-cpu IRQ context.
30993099
*/
3100-
static void rcu_barrier_func(void *unused)
3100+
static void rcu_barrier_func(void *cpu_in)
31013101
{
3102-
struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
3102+
uintptr_t cpu = (uintptr_t)cpu_in;
3103+
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
31033104

31043105
rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
31053106
rdp->barrier_head.func = rcu_barrier_callback;
@@ -3126,7 +3127,7 @@ static void rcu_barrier_func(void *unused)
31263127
*/
31273128
void rcu_barrier(void)
31283129
{
3129-
int cpu;
3130+
uintptr_t cpu;
31303131
struct rcu_data *rdp;
31313132
unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
31323133

@@ -3149,13 +3150,14 @@ void rcu_barrier(void)
31493150
rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
31503151

31513152
/*
3152-
* Initialize the count to one rather than to zero in order to
3153-
* avoid a too-soon return to zero in case of a short grace period
3154-
* (or preemption of this task). Exclude CPU-hotplug operations
3155-
* to ensure that no offline CPU has callbacks queued.
3153+
* Initialize the count to two rather than to zero in order
3154+
* to avoid a too-soon return to zero in case of an immediate
3155+
* invocation of the just-enqueued callback (or preemption of
3156+
* this task). Exclude CPU-hotplug operations to ensure that no
3157+
* offline non-offloaded CPU has callbacks queued.
31563158
*/
31573159
init_completion(&rcu_state.barrier_completion);
3158-
atomic_set(&rcu_state.barrier_cpu_count, 1);
3160+
atomic_set(&rcu_state.barrier_cpu_count, 2);
31593161
get_online_cpus();
31603162

31613163
/*
@@ -3165,13 +3167,23 @@ void rcu_barrier(void)
31653167
*/
31663168
for_each_possible_cpu(cpu) {
31673169
rdp = per_cpu_ptr(&rcu_data, cpu);
3168-
if (!cpu_online(cpu) &&
3170+
if (cpu_is_offline(cpu) &&
31693171
!rcu_segcblist_is_offloaded(&rdp->cblist))
31703172
continue;
3171-
if (rcu_segcblist_n_cbs(&rdp->cblist)) {
3173+
if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) {
31723174
rcu_barrier_trace(TPS("OnlineQ"), cpu,
31733175
rcu_state.barrier_sequence);
3174-
smp_call_function_single(cpu, rcu_barrier_func, NULL, 1);
3176+
smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1);
3177+
} else if (rcu_segcblist_n_cbs(&rdp->cblist) &&
3178+
cpu_is_offline(cpu)) {
3179+
rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu,
3180+
rcu_state.barrier_sequence);
3181+
local_irq_disable();
3182+
rcu_barrier_func((void *)cpu);
3183+
local_irq_enable();
3184+
} else if (cpu_is_offline(cpu)) {
3185+
rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu,
3186+
rcu_state.barrier_sequence);
31753187
} else {
31763188
rcu_barrier_trace(TPS("OnlineNQ"), cpu,
31773189
rcu_state.barrier_sequence);
@@ -3183,7 +3195,7 @@ void rcu_barrier(void)
31833195
* Now that we have an rcu_barrier_callback() callback on each
31843196
* CPU, and thus each counted, remove the initial count.
31853197
*/
3186-
if (atomic_dec_and_test(&rcu_state.barrier_cpu_count))
3198+
if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
31873199
complete(&rcu_state.barrier_completion);
31883200

31893201
/* Wait for all rcu_barrier_callback() callbacks to be invoked. */

0 commit comments

Comments
 (0)