Skip to content

Commit cb88f7f

Browse files
committed
rcu-tasks: Permit use of debug-objects with RCU Tasks flavors
Currently, cblist_init_generic() holds a raw spinlock when invoking INIT_WORK(). This fails in kernels built with CONFIG_DEBUG_OBJECTS=y due to memory allocation being forbidden while holding a raw spinlock. But the only reason for holding the raw spinlock is to synchronize with early boot calls to call_rcu_tasks(), call_rcu_tasks_rude, and, last but not least, call_rcu_tasks_trace(). These calls also invoke cblist_init_generic() in order to support early boot queueing of callbacks. Except that there are no early boot calls to either of these three functions, and the BPF guys confirm that they have no plans to add any such calls. This commit therefore removes the synchronization and adds a WARN_ON_ONCE() to catch the case of now-prohibited early boot RCU Tasks callback queueing. If early boot queueing is needed, an "initialized" flag may be added to the rcu_tasks structure. Then queueing a callback before this flag is set would initialize the callback list (if needed) and queue the callback. The decision as to where to queue the callback given the possibility of non-zero boot CPUs is left as an exercise for the reader. Reported-by: Jakub Kicinski <[email protected]> Signed-off-by: Paul E. McKenney <[email protected]>
1 parent 84dd7f1 commit cb88f7f

File tree

1 file changed

+6
-10
lines changed

1 file changed

+6
-10
lines changed

kernel/rcu/tasks.h

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -236,15 +236,14 @@ static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
236236
#endif /* #ifndef CONFIG_TINY_RCU */
237237

238238
// Initialize per-CPU callback lists for the specified flavor of
239-
// Tasks RCU.
239+
// Tasks RCU. Do not enqueue callbacks before this function is invoked.
240240
static void cblist_init_generic(struct rcu_tasks *rtp)
241241
{
242242
int cpu;
243243
unsigned long flags;
244244
int lim;
245245
int shift;
246246

247-
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
248247
if (rcu_task_enqueue_lim < 0) {
249248
rcu_task_enqueue_lim = 1;
250249
rcu_task_cb_adjust = true;
@@ -267,17 +266,16 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
267266
WARN_ON_ONCE(!rtpcp);
268267
if (cpu)
269268
raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
270-
raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
269+
local_irq_save(flags); // serialize initialization
271270
if (rcu_segcblist_empty(&rtpcp->cblist))
272271
rcu_segcblist_init(&rtpcp->cblist);
272+
local_irq_restore(flags);
273273
INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
274274
rtpcp->cpu = cpu;
275275
rtpcp->rtpp = rtp;
276276
if (!rtpcp->rtp_blkd_tasks.next)
277277
INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
278-
raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
279278
}
280-
raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
281279

282280
pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n", rtp->name,
283281
data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust);
@@ -351,11 +349,9 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
351349
READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
352350
needadjust = true; // Defer adjustment to avoid deadlock.
353351
}
354-
if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) {
355-
raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
356-
cblist_init_generic(rtp);
357-
raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
358-
}
352+
// Queuing callbacks before initialization not yet supported.
353+
if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist)))
354+
rcu_segcblist_init(&rtpcp->cblist);
359355
needwake = (func == wakeme_after_rcu) ||
360356
(rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim);
361357
if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) {

0 commit comments

Comments
 (0)