88
88
*/
89
89
#define RCU_DYNTICK_CTRL_MASK 0x1
90
90
#define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
91
- #ifndef rcu_eqs_special_exit
92
- #define rcu_eqs_special_exit () do { } while (0)
93
- #endif
94
91
95
92
static DEFINE_PER_CPU_SHARED_ALIGNED (struct rcu_data , rcu_data ) = {
96
93
.dynticks_nesting = 1 ,
@@ -242,7 +239,7 @@ void rcu_softirq_qs(void)
242
239
* RCU is watching prior to the call to this function and is no longer
243
240
* watching upon return.
244
241
*/
245
- static void rcu_dynticks_eqs_enter (void )
242
+ static noinstr void rcu_dynticks_eqs_enter (void )
246
243
{
247
244
struct rcu_data * rdp = this_cpu_ptr (& rcu_data );
248
245
int seq ;
@@ -267,7 +264,7 @@ static void rcu_dynticks_eqs_enter(void)
267
264
* called from an extended quiescent state, that is, RCU is not watching
268
265
* prior to the call to this function and is watching upon return.
269
266
*/
270
- static void rcu_dynticks_eqs_exit (void )
267
+ static noinstr void rcu_dynticks_eqs_exit (void )
271
268
{
272
269
struct rcu_data * rdp = this_cpu_ptr (& rcu_data );
273
270
int seq ;
@@ -285,8 +282,6 @@ static void rcu_dynticks_eqs_exit(void)
285
282
if (seq & RCU_DYNTICK_CTRL_MASK ) {
286
283
atomic_andnot (RCU_DYNTICK_CTRL_MASK , & rdp -> dynticks );
287
284
smp_mb__after_atomic (); /* _exit after clearing mask. */
288
- /* Prefer duplicate flushes to losing a flush. */
289
- rcu_eqs_special_exit ();
290
285
}
291
286
}
292
287
@@ -314,7 +309,7 @@ static void rcu_dynticks_eqs_online(void)
314
309
*
315
310
* No ordering, as we are sampling CPU-local information.
316
311
*/
317
- static bool rcu_dynticks_curr_cpu_in_eqs (void )
312
+ static __always_inline bool rcu_dynticks_curr_cpu_in_eqs (void )
318
313
{
319
314
struct rcu_data * rdp = this_cpu_ptr (& rcu_data );
320
315
@@ -603,7 +598,7 @@ EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
603
598
* the possibility of usermode upcalls having messed up our count
604
599
* of interrupt nesting level during the prior busy period.
605
600
*/
606
- static void rcu_eqs_enter (bool user )
601
+ static noinstr void rcu_eqs_enter (bool user )
607
602
{
608
603
struct rcu_data * rdp = this_cpu_ptr (& rcu_data );
609
604
@@ -618,12 +613,14 @@ static void rcu_eqs_enter(bool user)
618
613
}
619
614
620
615
lockdep_assert_irqs_disabled ();
616
+ instrumentation_begin ();
621
617
trace_rcu_dyntick (TPS ("Start" ), rdp -> dynticks_nesting , 0 , atomic_read (& rdp -> dynticks ));
622
618
WARN_ON_ONCE (IS_ENABLED (CONFIG_RCU_EQS_DEBUG ) && !user && !is_idle_task (current ));
623
619
rdp = this_cpu_ptr (& rcu_data );
624
620
do_nocb_deferred_wakeup (rdp );
625
621
rcu_prepare_for_idle ();
626
622
rcu_preempt_deferred_qs (current );
623
+ instrumentation_end ();
627
624
WRITE_ONCE (rdp -> dynticks_nesting , 0 ); /* Avoid irq-access tearing. */
628
625
// RCU is watching here ...
629
626
rcu_dynticks_eqs_enter ();
@@ -660,7 +657,7 @@ void rcu_idle_enter(void)
660
657
* If you add or remove a call to rcu_user_enter(), be sure to test with
661
658
* CONFIG_RCU_EQS_DEBUG=y.
662
659
*/
663
- void rcu_user_enter (void )
660
+ noinstr void rcu_user_enter (void )
664
661
{
665
662
lockdep_assert_irqs_disabled ();
666
663
rcu_eqs_enter (true);
@@ -693,19 +690,23 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
693
690
* leave it in non-RCU-idle state.
694
691
*/
695
692
if (rdp -> dynticks_nmi_nesting != 1 ) {
693
+ instrumentation_begin ();
696
694
trace_rcu_dyntick (TPS ("--=" ), rdp -> dynticks_nmi_nesting , rdp -> dynticks_nmi_nesting - 2 ,
697
695
atomic_read (& rdp -> dynticks ));
698
696
WRITE_ONCE (rdp -> dynticks_nmi_nesting , /* No store tearing. */
699
697
rdp -> dynticks_nmi_nesting - 2 );
698
+ instrumentation_end ();
700
699
return ;
701
700
}
702
701
702
+ instrumentation_begin ();
703
703
/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
704
704
trace_rcu_dyntick (TPS ("Startirq" ), rdp -> dynticks_nmi_nesting , 0 , atomic_read (& rdp -> dynticks ));
705
705
WRITE_ONCE (rdp -> dynticks_nmi_nesting , 0 ); /* Avoid store tearing. */
706
706
707
707
if (irq )
708
708
rcu_prepare_for_idle ();
709
+ instrumentation_end ();
709
710
710
711
// RCU is watching here ...
711
712
rcu_dynticks_eqs_enter ();
@@ -721,7 +722,7 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
721
722
* If you add or remove a call to rcu_nmi_exit(), be sure to test
722
723
* with CONFIG_RCU_EQS_DEBUG=y.
723
724
*/
724
- void rcu_nmi_exit (void )
725
+ void noinstr rcu_nmi_exit (void )
725
726
{
726
727
rcu_nmi_exit_common (false);
727
728
}
@@ -745,7 +746,7 @@ void rcu_nmi_exit(void)
745
746
* If you add or remove a call to rcu_irq_exit(), be sure to test with
746
747
* CONFIG_RCU_EQS_DEBUG=y.
747
748
*/
748
- void rcu_irq_exit (void )
749
+ void noinstr rcu_irq_exit (void )
749
750
{
750
751
lockdep_assert_irqs_disabled ();
751
752
rcu_nmi_exit_common (true);
@@ -774,7 +775,7 @@ void rcu_irq_exit_irqson(void)
774
775
* allow for the possibility of usermode upcalls messing up our count of
775
776
* interrupt nesting level during the busy period that is just now starting.
776
777
*/
777
- static void rcu_eqs_exit (bool user )
778
+ static void noinstr rcu_eqs_exit (bool user )
778
779
{
779
780
struct rcu_data * rdp ;
780
781
long oldval ;
@@ -792,12 +793,14 @@ static void rcu_eqs_exit(bool user)
792
793
// RCU is not watching here ...
793
794
rcu_dynticks_eqs_exit ();
794
795
// ... but is watching here.
796
+ instrumentation_begin ();
795
797
rcu_cleanup_after_idle ();
796
798
trace_rcu_dyntick (TPS ("End" ), rdp -> dynticks_nesting , 1 , atomic_read (& rdp -> dynticks ));
797
799
WARN_ON_ONCE (IS_ENABLED (CONFIG_RCU_EQS_DEBUG ) && !user && !is_idle_task (current ));
798
800
WRITE_ONCE (rdp -> dynticks_nesting , 1 );
799
801
WARN_ON_ONCE (rdp -> dynticks_nmi_nesting );
800
802
WRITE_ONCE (rdp -> dynticks_nmi_nesting , DYNTICK_IRQ_NONIDLE );
803
+ instrumentation_end ();
801
804
}
802
805
803
806
/**
@@ -828,7 +831,7 @@ void rcu_idle_exit(void)
828
831
* If you add or remove a call to rcu_user_exit(), be sure to test with
829
832
* CONFIG_RCU_EQS_DEBUG=y.
830
833
*/
831
- void rcu_user_exit (void )
834
+ void noinstr rcu_user_exit (void )
832
835
{
833
836
rcu_eqs_exit (1 );
834
837
}
@@ -876,28 +879,35 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
876
879
rcu_cleanup_after_idle ();
877
880
878
881
incby = 1 ;
879
- } else if (irq && tick_nohz_full_cpu (rdp -> cpu ) &&
880
- rdp -> dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
881
- READ_ONCE (rdp -> rcu_urgent_qs ) &&
882
- !READ_ONCE (rdp -> rcu_forced_tick )) {
883
- // We get here only if we had already exited the extended
884
- // quiescent state and this was an interrupt (not an NMI).
885
- // Therefore, (1) RCU is already watching and (2) The fact
886
- // that we are in an interrupt handler and that the rcu_node
887
- // lock is an irq-disabled lock prevents self-deadlock.
888
- // So we can safely recheck under the lock.
889
- raw_spin_lock_rcu_node (rdp -> mynode );
890
- if (rdp -> rcu_urgent_qs && !rdp -> rcu_forced_tick ) {
891
- // A nohz_full CPU is in the kernel and RCU
892
- // needs a quiescent state. Turn on the tick!
893
- WRITE_ONCE (rdp -> rcu_forced_tick , true);
894
- tick_dep_set_cpu (rdp -> cpu , TICK_DEP_BIT_RCU );
882
+ } else if (irq ) {
883
+ instrumentation_begin ();
884
+ if (tick_nohz_full_cpu (rdp -> cpu ) &&
885
+ rdp -> dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
886
+ READ_ONCE (rdp -> rcu_urgent_qs ) &&
887
+ !READ_ONCE (rdp -> rcu_forced_tick )) {
888
+ // We get here only if we had already exited the
889
+ // extended quiescent state and this was an
890
+ // interrupt (not an NMI). Therefore, (1) RCU is
891
+ // already watching and (2) The fact that we are in
892
+ // an interrupt handler and that the rcu_node lock
893
+ // is an irq-disabled lock prevents self-deadlock.
894
+ // So we can safely recheck under the lock.
895
+ raw_spin_lock_rcu_node (rdp -> mynode );
896
+ if (rdp -> rcu_urgent_qs && !rdp -> rcu_forced_tick ) {
897
+ // A nohz_full CPU is in the kernel and RCU
898
+ // needs a quiescent state. Turn on the tick!
899
+ WRITE_ONCE (rdp -> rcu_forced_tick , true);
900
+ tick_dep_set_cpu (rdp -> cpu , TICK_DEP_BIT_RCU );
901
+ }
902
+ raw_spin_unlock_rcu_node (rdp -> mynode );
895
903
}
896
- raw_spin_unlock_rcu_node ( rdp -> mynode );
904
+ instrumentation_end ( );
897
905
}
906
+ instrumentation_begin ();
898
907
trace_rcu_dyntick (incby == 1 ? TPS ("Endirq" ) : TPS ("++=" ),
899
908
rdp -> dynticks_nmi_nesting ,
900
909
rdp -> dynticks_nmi_nesting + incby , atomic_read (& rdp -> dynticks ));
910
+ instrumentation_end ();
901
911
WRITE_ONCE (rdp -> dynticks_nmi_nesting , /* Prevent store tearing. */
902
912
rdp -> dynticks_nmi_nesting + incby );
903
913
barrier ();
@@ -906,11 +916,10 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
906
916
/**
907
917
* rcu_nmi_enter - inform RCU of entry to NMI context
908
918
*/
909
- void rcu_nmi_enter (void )
919
+ noinstr void rcu_nmi_enter (void )
910
920
{
911
921
rcu_nmi_enter_common (false);
912
922
}
913
- NOKPROBE_SYMBOL (rcu_nmi_enter );
914
923
915
924
/**
916
925
* rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
@@ -934,7 +943,7 @@ NOKPROBE_SYMBOL(rcu_nmi_enter);
934
943
* If you add or remove a call to rcu_irq_enter(), be sure to test with
935
944
* CONFIG_RCU_EQS_DEBUG=y.
936
945
*/
937
- void rcu_irq_enter (void )
946
+ noinstr void rcu_irq_enter (void )
938
947
{
939
948
lockdep_assert_irqs_disabled ();
940
949
rcu_nmi_enter_common (true);
@@ -979,7 +988,7 @@ static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
979
988
* if the current CPU is not in its idle loop or is in an interrupt or
980
989
* NMI handler, return true.
981
990
*/
982
- bool notrace rcu_is_watching (void )
991
+ bool rcu_is_watching (void )
983
992
{
984
993
bool ret ;
985
994
@@ -1031,12 +1040,12 @@ bool rcu_lockdep_current_cpu_online(void)
1031
1040
1032
1041
if (in_nmi () || !rcu_scheduler_fully_active )
1033
1042
return true;
1034
- preempt_disable ();
1043
+ preempt_disable_notrace ();
1035
1044
rdp = this_cpu_ptr (& rcu_data );
1036
1045
rnp = rdp -> mynode ;
1037
1046
if (rdp -> grpmask & rcu_rnp_online_cpus (rnp ))
1038
1047
ret = true;
1039
- preempt_enable ();
1048
+ preempt_enable_notrace ();
1040
1049
return ret ;
1041
1050
}
1042
1051
EXPORT_SYMBOL_GPL (rcu_lockdep_current_cpu_online );
0 commit comments