@@ -631,17 +631,16 @@ void cleanup_srcu_struct(struct srcu_struct *ssp)
631631}
632632EXPORT_SYMBOL_GPL (cleanup_srcu_struct );
633633
634+ #ifdef CONFIG_PROVE_RCU
634635/*
635636 * Check for consistent NMI safety.
636637 */
637- static void srcu_check_nmi_safety (struct srcu_struct * ssp , bool nmi_safe )
638+ void srcu_check_nmi_safety (struct srcu_struct * ssp , bool nmi_safe )
638639{
639640 int nmi_safe_mask = 1 << nmi_safe ;
640641 int old_nmi_safe_mask ;
641642 struct srcu_data * sdp ;
642643
643- if (!IS_ENABLED (CONFIG_PROVE_RCU ))
644- return ;
645644 /* NMI-unsafe use in NMI is a bad sign */
646645 WARN_ON_ONCE (!nmi_safe && in_nmi ());
647646 sdp = raw_cpu_ptr (ssp -> sda );
@@ -652,6 +651,8 @@ static void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe)
652651 }
653652 WARN_ONCE (old_nmi_safe_mask != nmi_safe_mask , "CPU %d old state %d new state %d\n" , sdp -> cpu , old_nmi_safe_mask , nmi_safe_mask );
654653}
654+ EXPORT_SYMBOL_GPL (srcu_check_nmi_safety );
655+ #endif /* CONFIG_PROVE_RCU */
655656
656657/*
657658 * Counts the new reader in the appropriate per-CPU element of the
@@ -665,7 +666,6 @@ int __srcu_read_lock(struct srcu_struct *ssp)
665666 idx = READ_ONCE (ssp -> srcu_idx ) & 0x1 ;
666667 this_cpu_inc (ssp -> sda -> srcu_lock_count [idx ].counter );
667668 smp_mb (); /* B */ /* Avoid leaking the critical section. */
668- srcu_check_nmi_safety (ssp , false);
669669 return idx ;
670670}
671671EXPORT_SYMBOL_GPL (__srcu_read_lock );
@@ -679,7 +679,6 @@ void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
679679{
680680 smp_mb (); /* C */ /* Avoid leaking the critical section. */
681681 this_cpu_inc (ssp -> sda -> srcu_unlock_count [idx ].counter );
682- srcu_check_nmi_safety (ssp , false);
683682}
684683EXPORT_SYMBOL_GPL (__srcu_read_unlock );
685684
@@ -690,16 +689,14 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
690689 * srcu_struct, but in an NMI-safe manner using RMW atomics.
691690 * Returns an index that must be passed to the matching srcu_read_unlock().
692691 */
693- int __srcu_read_lock_nmisafe (struct srcu_struct * ssp , bool chknmisafe )
692+ int __srcu_read_lock_nmisafe (struct srcu_struct * ssp )
694693{
695694 int idx ;
696695 struct srcu_data * sdp = raw_cpu_ptr (ssp -> sda );
697696
698697 idx = READ_ONCE (ssp -> srcu_idx ) & 0x1 ;
699698 atomic_long_inc (& sdp -> srcu_lock_count [idx ]);
700699 smp_mb__after_atomic (); /* B */ /* Avoid leaking the critical section. */
701- if (chknmisafe )
702- srcu_check_nmi_safety (ssp , true);
703700 return idx ;
704701}
705702EXPORT_SYMBOL_GPL (__srcu_read_lock_nmisafe );
@@ -709,14 +706,12 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
709706 * element of the srcu_struct. Note that this may well be a different
710707 * CPU than that which was incremented by the corresponding srcu_read_lock().
711708 */
712- void __srcu_read_unlock_nmisafe (struct srcu_struct * ssp , int idx , bool chknmisafe )
709+ void __srcu_read_unlock_nmisafe (struct srcu_struct * ssp , int idx )
713710{
714711 struct srcu_data * sdp = raw_cpu_ptr (ssp -> sda );
715712
716713 smp_mb__before_atomic (); /* C */ /* Avoid leaking the critical section. */
717714 atomic_long_inc (& sdp -> srcu_unlock_count [idx ]);
718- if (chknmisafe )
719- srcu_check_nmi_safety (ssp , true);
720715}
721716EXPORT_SYMBOL_GPL (__srcu_read_unlock_nmisafe );
722717
@@ -1163,7 +1158,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
11631158 * SRCU read-side critical section so that the grace-period
11641159 * sequence number cannot wrap around in the meantime.
11651160 */
1166- idx = __srcu_read_lock_nmisafe (ssp , false );
1161+ idx = __srcu_read_lock_nmisafe (ssp );
11671162 ss_state = smp_load_acquire (& ssp -> srcu_size_state );
11681163 if (ss_state < SRCU_SIZE_WAIT_CALL )
11691164 sdp = per_cpu_ptr (ssp -> sda , 0 );
@@ -1196,7 +1191,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
11961191 srcu_funnel_gp_start (ssp , sdp , s , do_norm );
11971192 else if (needexp )
11981193 srcu_funnel_exp_start (ssp , sdp_mynode , s );
1199- __srcu_read_unlock_nmisafe (ssp , idx , false );
1194+ __srcu_read_unlock_nmisafe (ssp , idx );
12001195 return s ;
12011196}
12021197
@@ -1500,13 +1495,13 @@ void srcu_barrier(struct srcu_struct *ssp)
15001495 /* Initial count prevents reaching zero until all CBs are posted. */
15011496 atomic_set (& ssp -> srcu_barrier_cpu_cnt , 1 );
15021497
1503- idx = __srcu_read_lock_nmisafe (ssp , false );
1498+ idx = __srcu_read_lock_nmisafe (ssp );
15041499 if (smp_load_acquire (& ssp -> srcu_size_state ) < SRCU_SIZE_WAIT_BARRIER )
15051500 srcu_barrier_one_cpu (ssp , per_cpu_ptr (ssp -> sda , 0 ));
15061501 else
15071502 for_each_possible_cpu (cpu )
15081503 srcu_barrier_one_cpu (ssp , per_cpu_ptr (ssp -> sda , cpu ));
1509- __srcu_read_unlock_nmisafe (ssp , idx , false );
1504+ __srcu_read_unlock_nmisafe (ssp , idx );
15101505
15111506 /* Remove the initial count, at which point reaching zero can happen. */
15121507 if (atomic_dec_and_test (& ssp -> srcu_barrier_cpu_cnt ))
0 commit comments