Skip to content

Commit a773abf

Browse files
committed
Merge tag 'rcu-urgent.2022.01.26a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu
Pull RCU fix from Paul McKenney: "This fixes a brown-paper-bag bug in RCU tasks that causes things like BPF and ftrace to fail miserably on systems with non-power-of-two numbers of CPUs. It fixes a math error added in 7a30871 ("rcu-tasks: Introduce ->percpu_enqueue_shift for dynamic queue selection') during the v5.17 merge window. This commit works correctly only on systems with a power-of-two number of CPUs, which just so happens to be the kind that rcutorture always uses by default. This pull request fixes the math so that things also work on systems that don't happen to have a power-of-two number of CPUs" * tag 'rcu-urgent.2022.01.26a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu: rcu-tasks: Fix computation of CPU-to-list shift counts
2 parents 56a14c6 + da12301 commit a773abf

File tree

1 file changed

+8
-4
lines changed

1 file changed

+8
-4
lines changed

kernel/rcu/tasks.h

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ static struct rcu_tasks rt_name = \
123123
.call_func = call, \
124124
.rtpcpu = &rt_name ## __percpu, \
125125
.name = n, \
126-
.percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS), \
126+
.percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS) + 1, \
127127
.percpu_enqueue_lim = 1, \
128128
.percpu_dequeue_lim = 1, \
129129
.barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
@@ -216,6 +216,7 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
216216
int cpu;
217217
unsigned long flags;
218218
int lim;
219+
int shift;
219220

220221
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
221222
if (rcu_task_enqueue_lim < 0) {
@@ -229,7 +230,10 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
229230

230231
if (lim > nr_cpu_ids)
231232
lim = nr_cpu_ids;
232-
WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids / lim));
233+
shift = ilog2(nr_cpu_ids / lim);
234+
if (((nr_cpu_ids - 1) >> shift) >= lim)
235+
shift++;
236+
WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
233237
WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
234238
smp_store_release(&rtp->percpu_enqueue_lim, lim);
235239
for_each_possible_cpu(cpu) {
@@ -298,7 +302,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
298302
if (unlikely(needadjust)) {
299303
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
300304
if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
301-
WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids));
305+
WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids) + 1);
302306
WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
303307
smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
304308
pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
@@ -413,7 +417,7 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
413417
if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
414418
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
415419
if (rtp->percpu_enqueue_lim > 1) {
416-
WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids));
420+
WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids) + 1);
417421
smp_store_release(&rtp->percpu_enqueue_lim, 1);
418422
rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
419423
pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);

0 commit comments

Comments
 (0)