@@ -290,8 +290,8 @@ void rcu_note_context_switch(bool preempt)
290
290
291
291
trace_rcu_utilization (TPS ("Start context switch" ));
292
292
lockdep_assert_irqs_disabled ();
293
- WARN_ON_ONCE (!preempt && t -> rcu_read_lock_nesting > 0 );
294
- if (t -> rcu_read_lock_nesting > 0 &&
293
+ WARN_ON_ONCE (!preempt && rcu_preempt_depth () > 0 );
294
+ if (rcu_preempt_depth () > 0 &&
295
295
!t -> rcu_read_unlock_special .b .blocked ) {
296
296
297
297
/* Possibly blocking in an RCU read-side critical section. */
@@ -348,16 +348,31 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
348
348
#define RCU_NEST_NMAX (-INT_MAX / 2)
349
349
#define RCU_NEST_PMAX (INT_MAX / 2)
350
350
351
+ static void rcu_preempt_read_enter (void )
352
+ {
353
+ current -> rcu_read_lock_nesting ++ ;
354
+ }
355
+
356
+ static void rcu_preempt_read_exit (void )
357
+ {
358
+ current -> rcu_read_lock_nesting -- ;
359
+ }
360
+
361
+ static void rcu_preempt_depth_set (int val )
362
+ {
363
+ current -> rcu_read_lock_nesting = val ;
364
+ }
365
+
351
366
/*
352
367
* Preemptible RCU implementation for rcu_read_lock().
353
368
* Just increment ->rcu_read_lock_nesting, shared state will be updated
354
369
* if we block.
355
370
*/
356
371
void __rcu_read_lock (void )
357
372
{
358
- current -> rcu_read_lock_nesting ++ ;
373
+ rcu_preempt_read_enter () ;
359
374
if (IS_ENABLED (CONFIG_PROVE_LOCKING ))
360
- WARN_ON_ONCE (current -> rcu_read_lock_nesting > RCU_NEST_PMAX );
375
+ WARN_ON_ONCE (rcu_preempt_depth () > RCU_NEST_PMAX );
361
376
barrier (); /* critical section after entry code. */
362
377
}
363
378
EXPORT_SYMBOL_GPL (__rcu_read_lock );
@@ -373,19 +388,19 @@ void __rcu_read_unlock(void)
373
388
{
374
389
struct task_struct * t = current ;
375
390
376
- if (t -> rcu_read_lock_nesting != 1 ) {
377
- -- t -> rcu_read_lock_nesting ;
391
+ if (rcu_preempt_depth () != 1 ) {
392
+ rcu_preempt_read_exit () ;
378
393
} else {
379
394
barrier (); /* critical section before exit code. */
380
- t -> rcu_read_lock_nesting = - RCU_NEST_BIAS ;
395
+ rcu_preempt_depth_set ( - RCU_NEST_BIAS ) ;
381
396
barrier (); /* assign before ->rcu_read_unlock_special load */
382
397
if (unlikely (READ_ONCE (t -> rcu_read_unlock_special .s )))
383
398
rcu_read_unlock_special (t );
384
399
barrier (); /* ->rcu_read_unlock_special load before assign */
385
- t -> rcu_read_lock_nesting = 0 ;
400
+ rcu_preempt_depth_set ( 0 ) ;
386
401
}
387
402
if (IS_ENABLED (CONFIG_PROVE_LOCKING )) {
388
- int rrln = t -> rcu_read_lock_nesting ;
403
+ int rrln = rcu_preempt_depth () ;
389
404
390
405
WARN_ON_ONCE (rrln < 0 && rrln > RCU_NEST_NMAX );
391
406
}
@@ -539,7 +554,7 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
539
554
{
540
555
return (__this_cpu_read (rcu_data .exp_deferred_qs ) ||
541
556
READ_ONCE (t -> rcu_read_unlock_special .s )) &&
542
- t -> rcu_read_lock_nesting <= 0 ;
557
+ rcu_preempt_depth () <= 0 ;
543
558
}
544
559
545
560
/*
@@ -552,16 +567,16 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
552
567
static void rcu_preempt_deferred_qs (struct task_struct * t )
553
568
{
554
569
unsigned long flags ;
555
- bool couldrecurse = t -> rcu_read_lock_nesting >= 0 ;
570
+ bool couldrecurse = rcu_preempt_depth () >= 0 ;
556
571
557
572
if (!rcu_preempt_need_deferred_qs (t ))
558
573
return ;
559
574
if (couldrecurse )
560
- t -> rcu_read_lock_nesting -= RCU_NEST_BIAS ;
575
+ rcu_preempt_depth_set ( rcu_preempt_depth () - RCU_NEST_BIAS ) ;
561
576
local_irq_save (flags );
562
577
rcu_preempt_deferred_qs_irqrestore (t , flags );
563
578
if (couldrecurse )
564
- t -> rcu_read_lock_nesting += RCU_NEST_BIAS ;
579
+ rcu_preempt_depth_set ( rcu_preempt_depth () + RCU_NEST_BIAS ) ;
565
580
}
566
581
567
582
/*
@@ -672,7 +687,7 @@ static void rcu_flavor_sched_clock_irq(int user)
672
687
if (user || rcu_is_cpu_rrupt_from_idle ()) {
673
688
rcu_note_voluntary_context_switch (current );
674
689
}
675
- if (t -> rcu_read_lock_nesting > 0 ||
690
+ if (rcu_preempt_depth () > 0 ||
676
691
(preempt_count () & (PREEMPT_MASK | SOFTIRQ_MASK ))) {
677
692
/* No QS, force context switch if deferred. */
678
693
if (rcu_preempt_need_deferred_qs (t )) {
@@ -682,13 +697,13 @@ static void rcu_flavor_sched_clock_irq(int user)
682
697
} else if (rcu_preempt_need_deferred_qs (t )) {
683
698
rcu_preempt_deferred_qs (t ); /* Report deferred QS. */
684
699
return ;
685
- } else if (!t -> rcu_read_lock_nesting ) {
700
+ } else if (!rcu_preempt_depth () ) {
686
701
rcu_qs (); /* Report immediate QS. */
687
702
return ;
688
703
}
689
704
690
705
/* If GP is oldish, ask for help from rcu_read_unlock_special(). */
691
- if (t -> rcu_read_lock_nesting > 0 &&
706
+ if (rcu_preempt_depth () > 0 &&
692
707
__this_cpu_read (rcu_data .core_needs_qs ) &&
693
708
__this_cpu_read (rcu_data .cpu_no_qs .b .norm ) &&
694
709
!t -> rcu_read_unlock_special .b .need_qs &&
@@ -709,11 +724,11 @@ void exit_rcu(void)
709
724
struct task_struct * t = current ;
710
725
711
726
if (unlikely (!list_empty (& current -> rcu_node_entry ))) {
712
- t -> rcu_read_lock_nesting = 1 ;
727
+ rcu_preempt_depth_set ( 1 ) ;
713
728
barrier ();
714
729
WRITE_ONCE (t -> rcu_read_unlock_special .b .blocked , true);
715
- } else if (unlikely (t -> rcu_read_lock_nesting )) {
716
- t -> rcu_read_lock_nesting = 1 ;
730
+ } else if (unlikely (rcu_preempt_depth () )) {
731
+ rcu_preempt_depth_set ( 1 ) ;
717
732
} else {
718
733
return ;
719
734
}
0 commit comments