@@ -635,8 +635,7 @@ static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
635
635
}
636
636
}
637
637
638
- static int nocb_gp_toggle_rdp (struct rcu_data * rdp ,
639
- bool * wake_state )
638
+ static int nocb_gp_toggle_rdp (struct rcu_data * rdp )
640
639
{
641
640
struct rcu_segcblist * cblist = & rdp -> cblist ;
642
641
unsigned long flags ;
@@ -650,8 +649,6 @@ static int nocb_gp_toggle_rdp(struct rcu_data *rdp,
650
649
* We will handle this rdp until it ever gets de-offloaded.
651
650
*/
652
651
rcu_segcblist_set_flags (cblist , SEGCBLIST_KTHREAD_GP );
653
- if (rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_CB ))
654
- * wake_state = true;
655
652
ret = 1 ;
656
653
} else if (!rcu_segcblist_test_flags (cblist , SEGCBLIST_OFFLOADED ) &&
657
654
rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_GP )) {
@@ -660,8 +657,6 @@ static int nocb_gp_toggle_rdp(struct rcu_data *rdp,
660
657
* We will ignore this rdp until it ever gets re-offloaded.
661
658
*/
662
659
rcu_segcblist_clear_flags (cblist , SEGCBLIST_KTHREAD_GP );
663
- if (!rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_CB ))
664
- * wake_state = true;
665
660
ret = 0 ;
666
661
} else {
667
662
WARN_ON_ONCE (1 );
@@ -877,16 +872,15 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
877
872
}
878
873
879
874
if (rdp_toggling ) {
880
- bool wake_state = false;
881
875
int ret ;
882
876
883
- ret = nocb_gp_toggle_rdp (rdp_toggling , & wake_state );
877
+ ret = nocb_gp_toggle_rdp (rdp_toggling );
884
878
if (ret == 1 )
885
879
list_add_tail (& rdp_toggling -> nocb_entry_rdp , & my_rdp -> nocb_head_rdp );
886
880
else if (ret == 0 )
887
881
list_del (& rdp_toggling -> nocb_entry_rdp );
888
- if ( wake_state )
889
- swake_up_one (& rdp_toggling -> nocb_state_wq );
882
+
883
+ swake_up_one (& rdp_toggling -> nocb_state_wq );
890
884
}
891
885
892
886
my_rdp -> nocb_gp_seq = -1 ;
@@ -913,16 +907,9 @@ static int rcu_nocb_gp_kthread(void *arg)
913
907
return 0 ;
914
908
}
915
909
916
- static inline bool nocb_cb_can_run (struct rcu_data * rdp )
917
- {
918
- u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB ;
919
-
920
- return rcu_segcblist_test_flags (& rdp -> cblist , flags );
921
- }
922
-
923
910
static inline bool nocb_cb_wait_cond (struct rcu_data * rdp )
924
911
{
925
- return nocb_cb_can_run ( rdp ) && !READ_ONCE (rdp -> nocb_cb_sleep );
912
+ return !READ_ONCE (rdp -> nocb_cb_sleep ) || kthread_should_park ( );
926
913
}
927
914
928
915
/*
@@ -934,21 +921,19 @@ static void nocb_cb_wait(struct rcu_data *rdp)
934
921
struct rcu_segcblist * cblist = & rdp -> cblist ;
935
922
unsigned long cur_gp_seq ;
936
923
unsigned long flags ;
937
- bool needwake_state = false;
938
924
bool needwake_gp = false;
939
- bool can_sleep = true;
940
925
struct rcu_node * rnp = rdp -> mynode ;
941
926
942
- do {
943
- swait_event_interruptible_exclusive (rdp -> nocb_cb_wq ,
944
- nocb_cb_wait_cond (rdp ));
945
-
946
- if (READ_ONCE (rdp -> nocb_cb_sleep )) {
947
- WARN_ON (signal_pending (current ));
948
- trace_rcu_nocb_wake (rcu_state .name , rdp -> cpu , TPS ("WokeEmpty" ));
949
- }
950
- } while (!nocb_cb_can_run (rdp ));
927
+ swait_event_interruptible_exclusive (rdp -> nocb_cb_wq ,
928
+ nocb_cb_wait_cond (rdp ));
929
+ if (kthread_should_park ()) {
930
+ kthread_parkme ();
931
+ } else if (READ_ONCE (rdp -> nocb_cb_sleep )) {
932
+ WARN_ON (signal_pending (current ));
933
+ trace_rcu_nocb_wake (rcu_state .name , rdp -> cpu , TPS ("WokeEmpty" ));
934
+ }
951
935
936
+ WARN_ON_ONCE (!rcu_rdp_is_offloaded (rdp ));
952
937
953
938
local_irq_save (flags );
954
939
rcu_momentary_dyntick_idle ();
@@ -971,37 +956,16 @@ static void nocb_cb_wait(struct rcu_data *rdp)
971
956
raw_spin_unlock_rcu_node (rnp ); /* irqs remain disabled. */
972
957
}
973
958
974
- if (rcu_segcblist_test_flags (cblist , SEGCBLIST_OFFLOADED )) {
975
- if (!rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_CB )) {
976
- rcu_segcblist_set_flags (cblist , SEGCBLIST_KTHREAD_CB );
977
- if (rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_GP ))
978
- needwake_state = true;
979
- }
980
- if (rcu_segcblist_ready_cbs (cblist ))
981
- can_sleep = false;
959
+ if (!rcu_segcblist_ready_cbs (cblist )) {
960
+ WRITE_ONCE (rdp -> nocb_cb_sleep , true);
961
+ trace_rcu_nocb_wake (rcu_state .name , rdp -> cpu , TPS ("CBSleep" ));
982
962
} else {
983
- /*
984
- * De-offloading. Clear our flag and notify the de-offload worker.
985
- * We won't touch the callbacks and keep sleeping until we ever
986
- * get re-offloaded.
987
- */
988
- WARN_ON_ONCE (!rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_CB ));
989
- rcu_segcblist_clear_flags (cblist , SEGCBLIST_KTHREAD_CB );
990
- if (!rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_GP ))
991
- needwake_state = true;
963
+ WRITE_ONCE (rdp -> nocb_cb_sleep , false);
992
964
}
993
965
994
- WRITE_ONCE (rdp -> nocb_cb_sleep , can_sleep );
995
-
996
- if (rdp -> nocb_cb_sleep )
997
- trace_rcu_nocb_wake (rcu_state .name , rdp -> cpu , TPS ("CBSleep" ));
998
-
999
966
rcu_nocb_unlock_irqrestore (rdp , flags );
1000
967
if (needwake_gp )
1001
968
rcu_gp_kthread_wake ();
1002
-
1003
- if (needwake_state )
1004
- swake_up_one (& rdp -> nocb_state_wq );
1005
969
}
1006
970
1007
971
/*
@@ -1094,17 +1058,8 @@ static int rdp_offload_toggle(struct rcu_data *rdp,
1094
1058
bool wake_gp = false;
1095
1059
1096
1060
rcu_segcblist_offload (cblist , offload );
1097
-
1098
- if (rdp -> nocb_cb_sleep )
1099
- rdp -> nocb_cb_sleep = false;
1100
1061
rcu_nocb_unlock_irqrestore (rdp , flags );
1101
1062
1102
- /*
1103
- * Ignore former value of nocb_cb_sleep and force wake up as it could
1104
- * have been spuriously set to false already.
1105
- */
1106
- swake_up_one (& rdp -> nocb_cb_wq );
1107
-
1108
1063
raw_spin_lock_irqsave (& rdp_gp -> nocb_gp_lock , flags );
1109
1064
// Queue this rdp for add/del to/from the list to iterate on rcuog
1110
1065
WRITE_ONCE (rdp_gp -> nocb_toggling_rdp , rdp );
@@ -1161,28 +1116,19 @@ static long rcu_nocb_rdp_deoffload(void *arg)
1161
1116
if (wake_gp )
1162
1117
wake_up_process (rdp_gp -> nocb_gp_kthread );
1163
1118
1164
- /*
1165
- * If rcuo[p] kthread spawn failed, directly remove SEGCBLIST_KTHREAD_CB.
1166
- * Just wait SEGCBLIST_KTHREAD_GP to be cleared by rcuog.
1167
- */
1168
- if (!rdp -> nocb_cb_kthread ) {
1169
- rcu_nocb_lock_irqsave (rdp , flags );
1170
- rcu_segcblist_clear_flags (& rdp -> cblist , SEGCBLIST_KTHREAD_CB );
1171
- rcu_nocb_unlock_irqrestore (rdp , flags );
1172
- }
1173
-
1174
1119
swait_event_exclusive (rdp -> nocb_state_wq ,
1175
- !rcu_segcblist_test_flags (cblist ,
1176
- SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP ));
1120
+ !rcu_segcblist_test_flags (cblist ,
1121
+ SEGCBLIST_KTHREAD_GP ));
1122
+ if (rdp -> nocb_cb_kthread )
1123
+ kthread_park (rdp -> nocb_cb_kthread );
1177
1124
} else {
1178
1125
/*
1179
1126
* No kthread to clear the flags for us or remove the rdp from the nocb list
1180
1127
* to iterate. Do it here instead. Locking doesn't look stricly necessary
1181
1128
* but we stick to paranoia in this rare path.
1182
1129
*/
1183
1130
rcu_nocb_lock_irqsave (rdp , flags );
1184
- rcu_segcblist_clear_flags (& rdp -> cblist ,
1185
- SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP );
1131
+ rcu_segcblist_clear_flags (& rdp -> cblist , SEGCBLIST_KTHREAD_GP );
1186
1132
rcu_nocb_unlock_irqrestore (rdp , flags );
1187
1133
1188
1134
list_del (& rdp -> nocb_entry_rdp );
@@ -1282,8 +1228,10 @@ static long rcu_nocb_rdp_offload(void *arg)
1282
1228
wake_gp = rdp_offload_toggle (rdp , true, flags );
1283
1229
if (wake_gp )
1284
1230
wake_up_process (rdp_gp -> nocb_gp_kthread );
1231
+
1232
+ kthread_unpark (rdp -> nocb_cb_kthread );
1233
+
1285
1234
swait_event_exclusive (rdp -> nocb_state_wq ,
1286
- rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_CB ) &&
1287
1235
rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_GP ));
1288
1236
1289
1237
/*
@@ -1468,7 +1416,7 @@ void __init rcu_init_nohz(void)
1468
1416
if (rcu_segcblist_empty (& rdp -> cblist ))
1469
1417
rcu_segcblist_init (& rdp -> cblist );
1470
1418
rcu_segcblist_offload (& rdp -> cblist , true);
1471
- rcu_segcblist_set_flags (& rdp -> cblist , SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP );
1419
+ rcu_segcblist_set_flags (& rdp -> cblist , SEGCBLIST_KTHREAD_GP );
1472
1420
rcu_segcblist_clear_flags (& rdp -> cblist , SEGCBLIST_RCU_CORE );
1473
1421
}
1474
1422
rcu_organize_nocb_kthreads ();
@@ -1526,11 +1474,16 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu)
1526
1474
mutex_unlock (& rdp_gp -> nocb_gp_kthread_mutex );
1527
1475
1528
1476
/* Spawn the kthread for this CPU. */
1529
- t = kthread_run (rcu_nocb_cb_kthread , rdp ,
1530
- "rcuo%c/%d" , rcu_state .abbr , cpu );
1477
+ t = kthread_create (rcu_nocb_cb_kthread , rdp ,
1478
+ "rcuo%c/%d" , rcu_state .abbr , cpu );
1531
1479
if (WARN_ONCE (IS_ERR (t ), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n" , __func__ ))
1532
1480
goto end ;
1533
1481
1482
+ if (rcu_rdp_is_offloaded (rdp ))
1483
+ wake_up_process (t );
1484
+ else
1485
+ kthread_park (t );
1486
+
1534
1487
if (IS_ENABLED (CONFIG_RCU_NOCB_CPU_CB_BOOST ) && kthread_prio )
1535
1488
sched_setscheduler_nocheck (t , SCHED_FIFO , & sp );
1536
1489
0 commit comments