Skip to content

Commit 77339e6

Browse files
Lai Jiangshanpaulmckrcu
authored andcommitted
rcu: Provide wrappers for uses of ->rcu_read_lock_nesting
This commit provides wrapper functions for uses of ->rcu_read_lock_nesting to improve readability and to ease future changes to support inlining of __rcu_read_lock() and __rcu_read_unlock(). Signed-off-by: Lai Jiangshan <[email protected]> Signed-off-by: Paul E. McKenney <[email protected]>
1 parent c51f83c commit 77339e6

File tree

2 files changed

+36
-21
lines changed

2 files changed

+36
-21
lines changed

kernel/rcu/tree_exp.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -610,7 +610,7 @@ static void rcu_exp_handler(void *unused)
610610
* critical section. If also enabled or idle, immediately
611611
* report the quiescent state, otherwise defer.
612612
*/
613-
if (!t->rcu_read_lock_nesting) {
613+
if (!rcu_preempt_depth()) {
614614
if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
615615
rcu_dynticks_curr_cpu_in_eqs()) {
616616
rcu_report_exp_rdp(rdp);
@@ -634,7 +634,7 @@ static void rcu_exp_handler(void *unused)
634634
* can have caused this quiescent state to already have been
635635
* reported, so we really do need to check ->expmask.
636636
*/
637-
if (t->rcu_read_lock_nesting > 0) {
637+
if (rcu_preempt_depth() > 0) {
638638
raw_spin_lock_irqsave_rcu_node(rnp, flags);
639639
if (rnp->expmask & rdp->grpmask) {
640640
rdp->exp_deferred_qs = true;

kernel/rcu/tree_plugin.h

Lines changed: 34 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -290,8 +290,8 @@ void rcu_note_context_switch(bool preempt)
290290

291291
trace_rcu_utilization(TPS("Start context switch"));
292292
lockdep_assert_irqs_disabled();
293-
WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
294-
if (t->rcu_read_lock_nesting > 0 &&
293+
WARN_ON_ONCE(!preempt && rcu_preempt_depth() > 0);
294+
if (rcu_preempt_depth() > 0 &&
295295
!t->rcu_read_unlock_special.b.blocked) {
296296

297297
/* Possibly blocking in an RCU read-side critical section. */
@@ -348,16 +348,31 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
348348
#define RCU_NEST_NMAX (-INT_MAX / 2)
349349
#define RCU_NEST_PMAX (INT_MAX / 2)
350350

351+
static void rcu_preempt_read_enter(void)
352+
{
353+
current->rcu_read_lock_nesting++;
354+
}
355+
356+
static void rcu_preempt_read_exit(void)
357+
{
358+
current->rcu_read_lock_nesting--;
359+
}
360+
361+
static void rcu_preempt_depth_set(int val)
362+
{
363+
current->rcu_read_lock_nesting = val;
364+
}
365+
351366
/*
352367
* Preemptible RCU implementation for rcu_read_lock().
353368
* Just increment ->rcu_read_lock_nesting, shared state will be updated
354369
* if we block.
355370
*/
356371
void __rcu_read_lock(void)
357372
{
358-
current->rcu_read_lock_nesting++;
373+
rcu_preempt_read_enter();
359374
if (IS_ENABLED(CONFIG_PROVE_LOCKING))
360-
WARN_ON_ONCE(current->rcu_read_lock_nesting > RCU_NEST_PMAX);
375+
WARN_ON_ONCE(rcu_preempt_depth() > RCU_NEST_PMAX);
361376
barrier(); /* critical section after entry code. */
362377
}
363378
EXPORT_SYMBOL_GPL(__rcu_read_lock);
@@ -373,19 +388,19 @@ void __rcu_read_unlock(void)
373388
{
374389
struct task_struct *t = current;
375390

376-
if (t->rcu_read_lock_nesting != 1) {
377-
--t->rcu_read_lock_nesting;
391+
if (rcu_preempt_depth() != 1) {
392+
rcu_preempt_read_exit();
378393
} else {
379394
barrier(); /* critical section before exit code. */
380-
t->rcu_read_lock_nesting = -RCU_NEST_BIAS;
395+
rcu_preempt_depth_set(-RCU_NEST_BIAS);
381396
barrier(); /* assign before ->rcu_read_unlock_special load */
382397
if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
383398
rcu_read_unlock_special(t);
384399
barrier(); /* ->rcu_read_unlock_special load before assign */
385-
t->rcu_read_lock_nesting = 0;
400+
rcu_preempt_depth_set(0);
386401
}
387402
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
388-
int rrln = t->rcu_read_lock_nesting;
403+
int rrln = rcu_preempt_depth();
389404

390405
WARN_ON_ONCE(rrln < 0 && rrln > RCU_NEST_NMAX);
391406
}
@@ -539,7 +554,7 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
539554
{
540555
return (__this_cpu_read(rcu_data.exp_deferred_qs) ||
541556
READ_ONCE(t->rcu_read_unlock_special.s)) &&
542-
t->rcu_read_lock_nesting <= 0;
557+
rcu_preempt_depth() <= 0;
543558
}
544559

545560
/*
@@ -552,16 +567,16 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
552567
static void rcu_preempt_deferred_qs(struct task_struct *t)
553568
{
554569
unsigned long flags;
555-
bool couldrecurse = t->rcu_read_lock_nesting >= 0;
570+
bool couldrecurse = rcu_preempt_depth() >= 0;
556571

557572
if (!rcu_preempt_need_deferred_qs(t))
558573
return;
559574
if (couldrecurse)
560-
t->rcu_read_lock_nesting -= RCU_NEST_BIAS;
575+
rcu_preempt_depth_set(rcu_preempt_depth() - RCU_NEST_BIAS);
561576
local_irq_save(flags);
562577
rcu_preempt_deferred_qs_irqrestore(t, flags);
563578
if (couldrecurse)
564-
t->rcu_read_lock_nesting += RCU_NEST_BIAS;
579+
rcu_preempt_depth_set(rcu_preempt_depth() + RCU_NEST_BIAS);
565580
}
566581

567582
/*
@@ -672,7 +687,7 @@ static void rcu_flavor_sched_clock_irq(int user)
672687
if (user || rcu_is_cpu_rrupt_from_idle()) {
673688
rcu_note_voluntary_context_switch(current);
674689
}
675-
if (t->rcu_read_lock_nesting > 0 ||
690+
if (rcu_preempt_depth() > 0 ||
676691
(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
677692
/* No QS, force context switch if deferred. */
678693
if (rcu_preempt_need_deferred_qs(t)) {
@@ -682,13 +697,13 @@ static void rcu_flavor_sched_clock_irq(int user)
682697
} else if (rcu_preempt_need_deferred_qs(t)) {
683698
rcu_preempt_deferred_qs(t); /* Report deferred QS. */
684699
return;
685-
} else if (!t->rcu_read_lock_nesting) {
700+
} else if (!rcu_preempt_depth()) {
686701
rcu_qs(); /* Report immediate QS. */
687702
return;
688703
}
689704

690705
/* If GP is oldish, ask for help from rcu_read_unlock_special(). */
691-
if (t->rcu_read_lock_nesting > 0 &&
706+
if (rcu_preempt_depth() > 0 &&
692707
__this_cpu_read(rcu_data.core_needs_qs) &&
693708
__this_cpu_read(rcu_data.cpu_no_qs.b.norm) &&
694709
!t->rcu_read_unlock_special.b.need_qs &&
@@ -709,11 +724,11 @@ void exit_rcu(void)
709724
struct task_struct *t = current;
710725

711726
if (unlikely(!list_empty(&current->rcu_node_entry))) {
712-
t->rcu_read_lock_nesting = 1;
727+
rcu_preempt_depth_set(1);
713728
barrier();
714729
WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
715-
} else if (unlikely(t->rcu_read_lock_nesting)) {
716-
t->rcu_read_lock_nesting = 1;
730+
} else if (unlikely(rcu_preempt_depth())) {
731+
rcu_preempt_depth_set(1);
717732
} else {
718733
return;
719734
}

0 commit comments

Comments
 (0)