Skip to content

Commit b41642c

Browse files
Joel FernandesNeeraj Upadhyay (AMD)
authored andcommitted
rcu: Fix rcu_read_unlock() deadloop due to IRQ work
During rcu_read_unlock_special(), if this happens during irq_exit(), we can lockup if an IPI is issued. This is because the IPI itself triggers the irq_exit() path causing a recursive lock up. This is precisely what Xiongfeng found when invoking a BPF program on the trace_tick_stop() tracepoint As shown in the trace below. Fix by managing the irq_work state correctly. irq_exit() __irq_exit_rcu() /* in_hardirq() returns false after this */ preempt_count_sub(HARDIRQ_OFFSET) tick_irq_exit() tick_nohz_irq_exit() tick_nohz_stop_sched_tick() trace_tick_stop() /* a bpf prog is hooked on this trace point */ __bpf_trace_tick_stop() bpf_trace_run2() rcu_read_unlock_special() /* will send a IPI to itself */ irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu); A simple reproducer can also be obtained by doing the following in tick_irq_exit(). It will hang on boot without the patch: static inline void tick_irq_exit(void) { + rcu_read_lock(); + WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true); + rcu_read_unlock(); + Reported-by: Xiongfeng Wang <[email protected]> Closes: https://lore.kernel.org/all/[email protected]/ Tested-by: Qi Xi <[email protected]> Signed-off-by: Joel Fernandes <[email protected]> Reviewed-by: "Paul E. McKenney" <[email protected]> Reported-by: Linux Kernel Functional Testing <[email protected]> [neeraj: Apply Frederic's suggested fix for PREEMPT_RT] Signed-off-by: Neeraj Upadhyay (AMD) <[email protected]>
1 parent d827673 commit b41642c

File tree

2 files changed

+38
-12
lines changed

2 files changed

+38
-12
lines changed

kernel/rcu/tree.h

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,6 +174,17 @@ struct rcu_snap_record {
174174
unsigned long jiffies; /* Track jiffies value */
175175
};
176176

177+
/*
178+
* An IRQ work (deferred_qs_iw) is used by RCU to get the scheduler's attention.
179+
* to report quiescent states at the soonest possible time.
180+
* The request can be in one of the following states:
181+
* - DEFER_QS_IDLE: An IRQ work is yet to be scheduled.
182+
* - DEFER_QS_PENDING: An IRQ work was scheduled but either not yet run, or it
183+
* ran and we still haven't reported a quiescent state.
184+
*/
185+
#define DEFER_QS_IDLE 0
186+
#define DEFER_QS_PENDING 1
187+
177188
/* Per-CPU data for read-copy update. */
178189
struct rcu_data {
179190
/* 1) quiescent-state and grace-period handling : */
@@ -192,7 +203,7 @@ struct rcu_data {
192203
/* during and after the last grace */
193204
/* period it is aware of. */
194205
struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */
195-
bool defer_qs_iw_pending; /* Scheduler attention pending? */
206+
int defer_qs_iw_pending; /* Scheduler attention pending? */
196207
struct work_struct strict_work; /* Schedule readers for strict GPs. */
197208

198209
/* 2) batch handling */

kernel/rcu/tree_plugin.h

Lines changed: 26 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -486,13 +486,16 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
486486
struct rcu_node *rnp;
487487
union rcu_special special;
488488

489+
rdp = this_cpu_ptr(&rcu_data);
490+
if (rdp->defer_qs_iw_pending == DEFER_QS_PENDING)
491+
rdp->defer_qs_iw_pending = DEFER_QS_IDLE;
492+
489493
/*
490494
* If RCU core is waiting for this CPU to exit its critical section,
491495
* report the fact that it has exited. Because irqs are disabled,
492496
* t->rcu_read_unlock_special cannot change.
493497
*/
494498
special = t->rcu_read_unlock_special;
495-
rdp = this_cpu_ptr(&rcu_data);
496499
if (!special.s && !rdp->cpu_no_qs.b.exp) {
497500
local_irq_restore(flags);
498501
return;
@@ -629,7 +632,23 @@ static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
629632

630633
rdp = container_of(iwp, struct rcu_data, defer_qs_iw);
631634
local_irq_save(flags);
632-
rdp->defer_qs_iw_pending = false;
635+
636+
/*
637+
* If the IRQ work handler happens to run in the middle of RCU read-side
638+
* critical section, it could be ineffective in getting the scheduler's
639+
* attention to report a deferred quiescent state (the whole point of the
640+
* IRQ work). For this reason, requeue the IRQ work.
641+
*
642+
* Basically, we want to avoid following situation:
643+
* 1. rcu_read_unlock() queues IRQ work (state -> DEFER_QS_PENDING)
644+
* 2. CPU enters new rcu_read_lock()
645+
* 3. IRQ work runs but cannot report QS due to rcu_preempt_depth() > 0
646+
* 4. rcu_read_unlock() does not re-queue work (state still PENDING)
647+
* 5. Deferred QS reporting does not happen.
648+
*/
649+
if (rcu_preempt_depth() > 0)
650+
WRITE_ONCE(rdp->defer_qs_iw_pending, DEFER_QS_IDLE);
651+
633652
local_irq_restore(flags);
634653
}
635654

@@ -676,17 +695,13 @@ static void rcu_read_unlock_special(struct task_struct *t)
676695
set_tsk_need_resched(current);
677696
set_preempt_need_resched();
678697
if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
679-
expboost && !rdp->defer_qs_iw_pending && cpu_online(rdp->cpu)) {
698+
expboost && rdp->defer_qs_iw_pending != DEFER_QS_PENDING &&
699+
cpu_online(rdp->cpu)) {
680700
// Get scheduler to re-evaluate and call hooks.
681701
// If !IRQ_WORK, FQS scan will eventually IPI.
682-
if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) &&
683-
IS_ENABLED(CONFIG_PREEMPT_RT))
684-
rdp->defer_qs_iw = IRQ_WORK_INIT_HARD(
685-
rcu_preempt_deferred_qs_handler);
686-
else
687-
init_irq_work(&rdp->defer_qs_iw,
688-
rcu_preempt_deferred_qs_handler);
689-
rdp->defer_qs_iw_pending = true;
702+
rdp->defer_qs_iw =
703+
IRQ_WORK_INIT_HARD(rcu_preempt_deferred_qs_handler);
704+
rdp->defer_qs_iw_pending = DEFER_QS_PENDING;
690705
irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
691706
}
692707
}

0 commit comments

Comments
 (0)