@@ -726,7 +726,7 @@ static void srcu_gp_start(struct srcu_struct *ssp)
726
726
int state ;
727
727
728
728
if (smp_load_acquire (& ssp -> srcu_size_state ) < SRCU_SIZE_WAIT_BARRIER )
729
- sdp = per_cpu_ptr (ssp -> sda , 0 );
729
+ sdp = per_cpu_ptr (ssp -> sda , get_boot_cpu_id () );
730
730
else
731
731
sdp = this_cpu_ptr (ssp -> sda );
732
732
lockdep_assert_held (& ACCESS_PRIVATE (ssp , lock ));
@@ -837,7 +837,8 @@ static void srcu_gp_end(struct srcu_struct *ssp)
837
837
/* Initiate callback invocation as needed. */
838
838
ss_state = smp_load_acquire (& ssp -> srcu_size_state );
839
839
if (ss_state < SRCU_SIZE_WAIT_BARRIER ) {
840
- srcu_schedule_cbs_sdp (per_cpu_ptr (ssp -> sda , 0 ), cbdelay );
840
+ srcu_schedule_cbs_sdp (per_cpu_ptr (ssp -> sda , get_boot_cpu_id ()),
841
+ cbdelay );
841
842
} else {
842
843
idx = rcu_seq_ctr (gpseq ) % ARRAY_SIZE (snp -> srcu_have_cbs );
843
844
srcu_for_each_node_breadth_first (ssp , snp ) {
@@ -1161,7 +1162,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
1161
1162
idx = __srcu_read_lock_nmisafe (ssp );
1162
1163
ss_state = smp_load_acquire (& ssp -> srcu_size_state );
1163
1164
if (ss_state < SRCU_SIZE_WAIT_CALL )
1164
- sdp = per_cpu_ptr (ssp -> sda , 0 );
1165
+ sdp = per_cpu_ptr (ssp -> sda , get_boot_cpu_id () );
1165
1166
else
1166
1167
sdp = raw_cpu_ptr (ssp -> sda );
1167
1168
spin_lock_irqsave_sdp_contention (sdp , & flags );
@@ -1497,7 +1498,7 @@ void srcu_barrier(struct srcu_struct *ssp)
1497
1498
1498
1499
idx = __srcu_read_lock_nmisafe (ssp );
1499
1500
if (smp_load_acquire (& ssp -> srcu_size_state ) < SRCU_SIZE_WAIT_BARRIER )
1500
- srcu_barrier_one_cpu (ssp , per_cpu_ptr (ssp -> sda , 0 ));
1501
+ srcu_barrier_one_cpu (ssp , per_cpu_ptr (ssp -> sda , get_boot_cpu_id () ));
1501
1502
else
1502
1503
for_each_possible_cpu (cpu )
1503
1504
srcu_barrier_one_cpu (ssp , per_cpu_ptr (ssp -> sda , cpu ));
0 commit comments