@@ -604,37 +604,33 @@ static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
604
604
}
605
605
}
606
606
607
- static int nocb_gp_toggle_rdp (struct rcu_data * rdp )
607
+ static void nocb_gp_toggle_rdp (struct rcu_data * rdp_gp , struct rcu_data * rdp )
608
608
{
609
609
struct rcu_segcblist * cblist = & rdp -> cblist ;
610
610
unsigned long flags ;
611
- int ret ;
612
611
613
- rcu_nocb_lock_irqsave (rdp , flags );
614
- if (rcu_segcblist_test_flags (cblist , SEGCBLIST_OFFLOADED ) &&
615
- !rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_GP )) {
612
+ /*
613
+ * Locking orders future de-offloaded callbacks enqueue against previous
614
+ * handling of this rdp. Ie: Make sure rcuog is done with this rdp before
615
+ * deoffloaded callbacks can be enqueued.
616
+ */
617
+ raw_spin_lock_irqsave (& rdp -> nocb_lock , flags );
618
+ if (!rcu_segcblist_test_flags (cblist , SEGCBLIST_OFFLOADED )) {
616
619
/*
617
620
* Offloading. Set our flag and notify the offload worker.
618
621
* We will handle this rdp until it ever gets de-offloaded.
619
622
*/
620
- rcu_segcblist_set_flags (cblist , SEGCBLIST_KTHREAD_GP );
621
- ret = 1 ;
622
- } else if (!rcu_segcblist_test_flags (cblist , SEGCBLIST_OFFLOADED ) &&
623
- rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_GP )) {
623
+ list_add_tail (& rdp -> nocb_entry_rdp , & rdp_gp -> nocb_head_rdp );
624
+ rcu_segcblist_set_flags (cblist , SEGCBLIST_OFFLOADED );
625
+ } else {
624
626
/*
625
627
* De-offloading. Clear our flag and notify the de-offload worker.
626
628
* We will ignore this rdp until it ever gets re-offloaded.
627
629
*/
628
- rcu_segcblist_clear_flags (cblist , SEGCBLIST_KTHREAD_GP );
629
- ret = 0 ;
630
- } else {
631
- WARN_ON_ONCE (1 );
632
- ret = -1 ;
630
+ list_del (& rdp -> nocb_entry_rdp );
631
+ rcu_segcblist_clear_flags (cblist , SEGCBLIST_OFFLOADED );
633
632
}
634
-
635
- rcu_nocb_unlock_irqrestore (rdp , flags );
636
-
637
- return ret ;
633
+ raw_spin_unlock_irqrestore (& rdp -> nocb_lock , flags );
638
634
}
639
635
640
636
static void nocb_gp_sleep (struct rcu_data * my_rdp , int cpu )
@@ -841,14 +837,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
841
837
}
842
838
843
839
if (rdp_toggling ) {
844
- int ret ;
845
-
846
- ret = nocb_gp_toggle_rdp (rdp_toggling );
847
- if (ret == 1 )
848
- list_add_tail (& rdp_toggling -> nocb_entry_rdp , & my_rdp -> nocb_head_rdp );
849
- else if (ret == 0 )
850
- list_del (& rdp_toggling -> nocb_entry_rdp );
851
-
840
+ nocb_gp_toggle_rdp (my_rdp , rdp_toggling );
852
841
swake_up_one (& rdp_toggling -> nocb_state_wq );
853
842
}
854
843
@@ -1018,16 +1007,11 @@ void rcu_nocb_flush_deferred_wakeup(void)
1018
1007
}
1019
1008
EXPORT_SYMBOL_GPL (rcu_nocb_flush_deferred_wakeup );
1020
1009
1021
- static int rdp_offload_toggle (struct rcu_data * rdp ,
1022
- bool offload , unsigned long flags )
1023
- __releases (rdp - > nocb_lock )
1010
+ static int rcu_nocb_queue_toggle_rdp (struct rcu_data * rdp )
1024
1011
{
1025
- struct rcu_segcblist * cblist = & rdp -> cblist ;
1026
1012
struct rcu_data * rdp_gp = rdp -> nocb_gp_rdp ;
1027
1013
bool wake_gp = false;
1028
-
1029
- rcu_segcblist_offload (cblist , offload );
1030
- rcu_nocb_unlock_irqrestore (rdp , flags );
1014
+ unsigned long flags ;
1031
1015
1032
1016
raw_spin_lock_irqsave (& rdp_gp -> nocb_gp_lock , flags );
1033
1017
// Queue this rdp for add/del to/from the list to iterate on rcuog
@@ -1041,9 +1025,25 @@ static int rdp_offload_toggle(struct rcu_data *rdp,
1041
1025
return wake_gp ;
1042
1026
}
1043
1027
1028
+ static bool rcu_nocb_rdp_deoffload_wait_cond (struct rcu_data * rdp )
1029
+ {
1030
+ unsigned long flags ;
1031
+ bool ret ;
1032
+
1033
+ /*
1034
+ * Locking makes sure rcuog is done handling this rdp before deoffloaded
1035
+ * enqueue can happen. Also it keeps the SEGCBLIST_OFFLOADED flag stable
1036
+ * while the ->nocb_lock is held.
1037
+ */
1038
+ raw_spin_lock_irqsave (& rdp -> nocb_lock , flags );
1039
+ ret = !rcu_segcblist_test_flags (& rdp -> cblist , SEGCBLIST_OFFLOADED );
1040
+ raw_spin_unlock_irqrestore (& rdp -> nocb_lock , flags );
1041
+
1042
+ return ret ;
1043
+ }
1044
+
1044
1045
static int rcu_nocb_rdp_deoffload (struct rcu_data * rdp )
1045
1046
{
1046
- struct rcu_segcblist * cblist = & rdp -> cblist ;
1047
1047
unsigned long flags ;
1048
1048
int wake_gp ;
1049
1049
struct rcu_data * rdp_gp = rdp -> nocb_gp_rdp ;
@@ -1056,51 +1056,42 @@ static int rcu_nocb_rdp_deoffload(struct rcu_data *rdp)
1056
1056
/* Flush all callbacks from segcblist and bypass */
1057
1057
rcu_barrier ();
1058
1058
1059
+ /*
1060
+ * Make sure the rcuoc kthread isn't in the middle of a nocb locked
1061
+ * sequence while offloading is deactivated, along with nocb locking.
1062
+ */
1063
+ if (rdp -> nocb_cb_kthread )
1064
+ kthread_park (rdp -> nocb_cb_kthread );
1065
+
1059
1066
rcu_nocb_lock_irqsave (rdp , flags );
1060
1067
WARN_ON_ONCE (rcu_cblist_n_cbs (& rdp -> nocb_bypass ));
1061
1068
WARN_ON_ONCE (rcu_segcblist_n_cbs (& rdp -> cblist ));
1069
+ rcu_nocb_unlock_irqrestore (rdp , flags );
1062
1070
1063
- wake_gp = rdp_offload_toggle (rdp , false, flags );
1071
+ wake_gp = rcu_nocb_queue_toggle_rdp (rdp );
1064
1072
1065
1073
mutex_lock (& rdp_gp -> nocb_gp_kthread_mutex );
1074
+
1066
1075
if (rdp_gp -> nocb_gp_kthread ) {
1067
1076
if (wake_gp )
1068
1077
wake_up_process (rdp_gp -> nocb_gp_kthread );
1069
1078
1070
1079
swait_event_exclusive (rdp -> nocb_state_wq ,
1071
- !rcu_segcblist_test_flags (cblist ,
1072
- SEGCBLIST_KTHREAD_GP ));
1073
- if (rdp -> nocb_cb_kthread )
1074
- kthread_park (rdp -> nocb_cb_kthread );
1080
+ rcu_nocb_rdp_deoffload_wait_cond (rdp ));
1075
1081
} else {
1076
1082
/*
1077
1083
* No kthread to clear the flags for us or remove the rdp from the nocb list
1078
1084
* to iterate. Do it here instead. Locking doesn't look stricly necessary
1079
1085
* but we stick to paranoia in this rare path.
1080
1086
*/
1081
- rcu_nocb_lock_irqsave ( rdp , flags );
1082
- rcu_segcblist_clear_flags (& rdp -> cblist , SEGCBLIST_KTHREAD_GP );
1083
- rcu_nocb_unlock_irqrestore ( rdp , flags );
1087
+ raw_spin_lock_irqsave ( & rdp -> nocb_lock , flags );
1088
+ rcu_segcblist_clear_flags (& rdp -> cblist , SEGCBLIST_OFFLOADED );
1089
+ raw_spin_unlock_irqrestore ( & rdp -> nocb_lock , flags );
1084
1090
1085
1091
list_del (& rdp -> nocb_entry_rdp );
1086
1092
}
1087
- mutex_unlock (& rdp_gp -> nocb_gp_kthread_mutex );
1088
1093
1089
- /*
1090
- * Lock one last time to acquire latest callback updates from kthreads
1091
- * so we can later handle callbacks locally without locking.
1092
- */
1093
- rcu_nocb_lock_irqsave (rdp , flags );
1094
- /*
1095
- * Theoretically we could clear SEGCBLIST_LOCKING after the nocb
1096
- * lock is released but how about being paranoid for once?
1097
- */
1098
- rcu_segcblist_clear_flags (cblist , SEGCBLIST_LOCKING );
1099
- /*
1100
- * Without SEGCBLIST_LOCKING, we can't use
1101
- * rcu_nocb_unlock_irqrestore() anymore.
1102
- */
1103
- raw_spin_unlock_irqrestore (& rdp -> nocb_lock , flags );
1094
+ mutex_unlock (& rdp_gp -> nocb_gp_kthread_mutex );
1104
1095
1105
1096
return 0 ;
1106
1097
}
@@ -1129,10 +1120,20 @@ int rcu_nocb_cpu_deoffload(int cpu)
1129
1120
}
1130
1121
EXPORT_SYMBOL_GPL (rcu_nocb_cpu_deoffload );
1131
1122
1132
- static int rcu_nocb_rdp_offload (struct rcu_data * rdp )
1123
+ static bool rcu_nocb_rdp_offload_wait_cond (struct rcu_data * rdp )
1133
1124
{
1134
- struct rcu_segcblist * cblist = & rdp -> cblist ;
1135
1125
unsigned long flags ;
1126
+ bool ret ;
1127
+
1128
+ raw_spin_lock_irqsave (& rdp -> nocb_lock , flags );
1129
+ ret = rcu_segcblist_test_flags (& rdp -> cblist , SEGCBLIST_OFFLOADED );
1130
+ raw_spin_unlock_irqrestore (& rdp -> nocb_lock , flags );
1131
+
1132
+ return ret ;
1133
+ }
1134
+
1135
+ static int rcu_nocb_rdp_offload (struct rcu_data * rdp )
1136
+ {
1136
1137
int wake_gp ;
1137
1138
struct rcu_data * rdp_gp = rdp -> nocb_gp_rdp ;
1138
1139
@@ -1152,20 +1153,14 @@ static int rcu_nocb_rdp_offload(struct rcu_data *rdp)
1152
1153
WARN_ON_ONCE (rcu_cblist_n_cbs (& rdp -> nocb_bypass ));
1153
1154
WARN_ON_ONCE (rcu_segcblist_n_cbs (& rdp -> cblist ));
1154
1155
1155
- /*
1156
- * Can't use rcu_nocb_lock_irqsave() before SEGCBLIST_LOCKING
1157
- * is set.
1158
- */
1159
- raw_spin_lock_irqsave (& rdp -> nocb_lock , flags );
1160
-
1161
- wake_gp = rdp_offload_toggle (rdp , true, flags );
1156
+ wake_gp = rcu_nocb_queue_toggle_rdp (rdp );
1162
1157
if (wake_gp )
1163
1158
wake_up_process (rdp_gp -> nocb_gp_kthread );
1164
1159
1165
- kthread_unpark (rdp -> nocb_cb_kthread );
1166
-
1167
1160
swait_event_exclusive (rdp -> nocb_state_wq ,
1168
- rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_GP ));
1161
+ rcu_nocb_rdp_offload_wait_cond (rdp ));
1162
+
1163
+ kthread_unpark (rdp -> nocb_cb_kthread );
1169
1164
1170
1165
return 0 ;
1171
1166
}
@@ -1340,8 +1335,7 @@ void __init rcu_init_nohz(void)
1340
1335
rdp = per_cpu_ptr (& rcu_data , cpu );
1341
1336
if (rcu_segcblist_empty (& rdp -> cblist ))
1342
1337
rcu_segcblist_init (& rdp -> cblist );
1343
- rcu_segcblist_offload (& rdp -> cblist , true);
1344
- rcu_segcblist_set_flags (& rdp -> cblist , SEGCBLIST_KTHREAD_GP );
1338
+ rcu_segcblist_set_flags (& rdp -> cblist , SEGCBLIST_OFFLOADED );
1345
1339
}
1346
1340
rcu_organize_nocb_kthreads ();
1347
1341
}
0 commit comments