@@ -626,6 +626,26 @@ void cleanup_srcu_struct(struct srcu_struct *ssp)
626626}
627627EXPORT_SYMBOL_GPL (cleanup_srcu_struct );
628628
629+ /*
630+ * Check for consistent NMI safety.
631+ */
632+ static void srcu_check_nmi_safety (struct srcu_struct * ssp , bool nmi_safe )
633+ {
634+ int nmi_safe_mask = 1 << nmi_safe ;
635+ int old_nmi_safe_mask ;
636+ struct srcu_data * sdp ;
637+
638+ if (!IS_ENABLED (CONFIG_PROVE_RCU ))
639+ return ;
640+ sdp = raw_cpu_ptr (ssp -> sda );
641+ old_nmi_safe_mask = READ_ONCE (sdp -> srcu_nmi_safety );
642+ if (!old_nmi_safe_mask ) {
643+ WRITE_ONCE (sdp -> srcu_nmi_safety , nmi_safe_mask );
644+ return ;
645+ }
646+ WARN_ONCE (old_nmi_safe_mask != nmi_safe_mask , "CPU %d old state %d new state %d\n" , sdp -> cpu , old_nmi_safe_mask , nmi_safe_mask );
647+ }
648+
629649/*
630650 * Counts the new reader in the appropriate per-CPU element of the
631651 * srcu_struct.
@@ -638,6 +658,7 @@ int __srcu_read_lock(struct srcu_struct *ssp)
638658 idx = READ_ONCE (ssp -> srcu_idx ) & 0x1 ;
639659 this_cpu_inc (ssp -> sda -> srcu_lock_count [idx ].counter );
640660 smp_mb (); /* B */ /* Avoid leaking the critical section. */
661+ srcu_check_nmi_safety (ssp , false);
641662 return idx ;
642663}
643664EXPORT_SYMBOL_GPL (__srcu_read_lock );
@@ -651,6 +672,7 @@ void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
651672{
652673 smp_mb (); /* C */ /* Avoid leaking the critical section. */
653674 this_cpu_inc (ssp -> sda -> srcu_unlock_count [idx ].counter );
675+ srcu_check_nmi_safety (ssp , false);
654676}
655677EXPORT_SYMBOL_GPL (__srcu_read_unlock );
656678
@@ -659,14 +681,16 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
659681 * srcu_struct, but in an NMI-safe manner using RMW atomics.
660682 * Returns an index that must be passed to the matching srcu_read_unlock().
661683 */
662- int __srcu_read_lock_nmisafe (struct srcu_struct * ssp )
684+ int __srcu_read_lock_nmisafe (struct srcu_struct * ssp , bool chknmisafe )
663685{
664686 int idx ;
665687 struct srcu_data * sdp = raw_cpu_ptr (ssp -> sda );
666688
667689 idx = READ_ONCE (ssp -> srcu_idx ) & 0x1 ;
668690 atomic_long_inc (& sdp -> srcu_lock_count [idx ]);
669691 smp_mb__after_atomic (); /* B */ /* Avoid leaking the critical section. */
692+ if (chknmisafe )
693+ srcu_check_nmi_safety (ssp , true);
670694 return idx ;
671695}
672696EXPORT_SYMBOL_GPL (__srcu_read_lock_nmisafe );
@@ -676,12 +700,14 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
676700 * element of the srcu_struct. Note that this may well be a different
677701 * CPU than that which was incremented by the corresponding srcu_read_lock().
678702 */
679- void __srcu_read_unlock_nmisafe (struct srcu_struct * ssp , int idx )
703+ void __srcu_read_unlock_nmisafe (struct srcu_struct * ssp , int idx , bool chknmisafe )
680704{
681705 struct srcu_data * sdp = raw_cpu_ptr (ssp -> sda );
682706
683707 smp_mb__before_atomic (); /* C */ /* Avoid leaking the critical section. */
684708 atomic_long_inc (& sdp -> srcu_unlock_count [idx ]);
709+ if (chknmisafe )
710+ srcu_check_nmi_safety (ssp , true);
685711}
686712EXPORT_SYMBOL_GPL (__srcu_read_unlock_nmisafe );
687713
@@ -1121,7 +1147,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
11211147 int ss_state ;
11221148
11231149 check_init_srcu_struct (ssp );
1124- idx = __srcu_read_lock_nmisafe (ssp );
1150+ idx = __srcu_read_lock_nmisafe (ssp , false );
11251151 ss_state = smp_load_acquire (& ssp -> srcu_size_state );
11261152 if (ss_state < SRCU_SIZE_WAIT_CALL )
11271153 sdp = per_cpu_ptr (ssp -> sda , 0 );
@@ -1154,7 +1180,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
11541180 srcu_funnel_gp_start (ssp , sdp , s , do_norm );
11551181 else if (needexp )
11561182 srcu_funnel_exp_start (ssp , sdp_mynode , s );
1157- __srcu_read_unlock_nmisafe (ssp , idx );
1183+ __srcu_read_unlock_nmisafe (ssp , idx , false );
11581184 return s ;
11591185}
11601186
@@ -1458,13 +1484,13 @@ void srcu_barrier(struct srcu_struct *ssp)
14581484 /* Initial count prevents reaching zero until all CBs are posted. */
14591485 atomic_set (& ssp -> srcu_barrier_cpu_cnt , 1 );
14601486
1461- idx = __srcu_read_lock_nmisafe (ssp );
1487+ idx = __srcu_read_lock_nmisafe (ssp , false );
14621488 if (smp_load_acquire (& ssp -> srcu_size_state ) < SRCU_SIZE_WAIT_BARRIER )
14631489 srcu_barrier_one_cpu (ssp , per_cpu_ptr (ssp -> sda , 0 ));
14641490 else
14651491 for_each_possible_cpu (cpu )
14661492 srcu_barrier_one_cpu (ssp , per_cpu_ptr (ssp -> sda , cpu ));
1467- __srcu_read_unlock_nmisafe (ssp , idx );
1493+ __srcu_read_unlock_nmisafe (ssp , idx , false );
14681494
14691495 /* Remove the initial count, at which point reaching zero can happen. */
14701496 if (atomic_dec_and_test (& ssp -> srcu_barrier_cpu_cnt ))
0 commit comments