Skip to content

Commit 174caf7

Browse files
Frederic Weisbeckergregkh
authored andcommitted
rcu/nocb: Make IRQs disablement symmetric
[ Upstream commit b913c3f ] Currently IRQs are disabled on call_rcu() and then depending on the context: * If the CPU is in nocb mode: - If the callback is enqueued in the bypass list, IRQs are re-enabled implictly by rcu_nocb_try_bypass() - If the callback is enqueued in the normal list, IRQs are re-enabled implicitly by __call_rcu_nocb_wake() * If the CPU is NOT in nocb mode, IRQs are reenabled explicitly from call_rcu() This makes the code a bit hard to follow, especially as it interleaves with nocb locking. To make the IRQ flags coverage clearer and also in order to prepare for moving all the nocb enqueue code to its own function, always re-enable the IRQ flags explicitly from call_rcu(). Reviewed-by: Neeraj Upadhyay (AMD) <[email protected]> Signed-off-by: Frederic Weisbecker <[email protected]> Reviewed-by: Paul E. McKenney <[email protected]> Signed-off-by: Boqun Feng <[email protected]> Stable-dep-of: f7345cc ("rcu/nocb: Fix rcuog wake-up from offline softirq") Signed-off-by: Sasha Levin <[email protected]>
1 parent 8bb79eb commit 174caf7

File tree

2 files changed

+15
-14
lines changed

2 files changed

+15
-14
lines changed

kernel/rcu/tree.c

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2727,8 +2727,10 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
27272727
}
27282728

27292729
check_cb_ovld(rdp);
2730-
if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy))
2730+
if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) {
2731+
local_irq_restore(flags);
27312732
return; // Enqueued onto ->nocb_bypass, so just leave.
2733+
}
27322734
// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
27332735
rcu_segcblist_enqueue(&rdp->cblist, head);
27342736
if (__is_kvfree_rcu_offset((unsigned long)func))
@@ -2746,8 +2748,8 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
27462748
__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
27472749
} else {
27482750
__call_rcu_core(rdp, head, flags);
2749-
local_irq_restore(flags);
27502751
}
2752+
local_irq_restore(flags);
27512753
}
27522754

27532755
#ifdef CONFIG_RCU_LAZY
@@ -4626,8 +4628,9 @@ void rcutree_migrate_callbacks(int cpu)
46264628
__call_rcu_nocb_wake(my_rdp, true, flags);
46274629
} else {
46284630
rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4629-
raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4631+
raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
46304632
}
4633+
local_irq_restore(flags);
46314634
if (needwake)
46324635
rcu_gp_kthread_wake();
46334636
lockdep_assert_irqs_enabled();

kernel/rcu/tree_nocb.h

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -516,9 +516,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
516516
// 2. Both of these conditions are met:
517517
// a. The bypass list previously had only lazy CBs, and:
518518
// b. The new CB is non-lazy.
519-
if (ncbs && (!bypass_is_lazy || lazy)) {
520-
local_irq_restore(flags);
521-
} else {
519+
if (!ncbs || (bypass_is_lazy && !lazy)) {
522520
// No-CBs GP kthread might be indefinitely asleep, if so, wake.
523521
rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
524522
if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
@@ -528,7 +526,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
528526
} else {
529527
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
530528
TPS("FirstBQnoWake"));
531-
rcu_nocb_unlock_irqrestore(rdp, flags);
529+
rcu_nocb_unlock(rdp);
532530
}
533531
}
534532
return true; // Callback already enqueued.
@@ -554,7 +552,7 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
554552
// If we are being polled or there is no kthread, just leave.
555553
t = READ_ONCE(rdp->nocb_gp_kthread);
556554
if (rcu_nocb_poll || !t) {
557-
rcu_nocb_unlock_irqrestore(rdp, flags);
555+
rcu_nocb_unlock(rdp);
558556
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
559557
TPS("WakeNotPoll"));
560558
return;
@@ -567,17 +565,17 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
567565
rdp->qlen_last_fqs_check = len;
568566
// Only lazy CBs in bypass list
569567
if (lazy_len && bypass_len == lazy_len) {
570-
rcu_nocb_unlock_irqrestore(rdp, flags);
568+
rcu_nocb_unlock(rdp);
571569
wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_LAZY,
572570
TPS("WakeLazy"));
573571
} else if (!irqs_disabled_flags(flags)) {
574572
/* ... if queue was empty ... */
575-
rcu_nocb_unlock_irqrestore(rdp, flags);
573+
rcu_nocb_unlock(rdp);
576574
wake_nocb_gp(rdp, false);
577575
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
578576
TPS("WakeEmpty"));
579577
} else {
580-
rcu_nocb_unlock_irqrestore(rdp, flags);
578+
rcu_nocb_unlock(rdp);
581579
wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
582580
TPS("WakeEmptyIsDeferred"));
583581
}
@@ -595,15 +593,15 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
595593
if ((rdp->nocb_cb_sleep ||
596594
!rcu_segcblist_ready_cbs(&rdp->cblist)) &&
597595
!timer_pending(&rdp->nocb_timer)) {
598-
rcu_nocb_unlock_irqrestore(rdp, flags);
596+
rcu_nocb_unlock(rdp);
599597
wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
600598
TPS("WakeOvfIsDeferred"));
601599
} else {
602-
rcu_nocb_unlock_irqrestore(rdp, flags);
600+
rcu_nocb_unlock(rdp);
603601
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
604602
}
605603
} else {
606-
rcu_nocb_unlock_irqrestore(rdp, flags);
604+
rcu_nocb_unlock(rdp);
607605
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
608606
}
609607
}

0 commit comments

Comments
 (0)