@@ -635,8 +635,7 @@ static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
635635 }
636636}
637637
638- static int nocb_gp_toggle_rdp (struct rcu_data * rdp ,
639- bool * wake_state )
638+ static int nocb_gp_toggle_rdp (struct rcu_data * rdp )
640639{
641640 struct rcu_segcblist * cblist = & rdp -> cblist ;
642641 unsigned long flags ;
@@ -650,8 +649,6 @@ static int nocb_gp_toggle_rdp(struct rcu_data *rdp,
650649 * We will handle this rdp until it ever gets de-offloaded.
651650 */
652651 rcu_segcblist_set_flags (cblist , SEGCBLIST_KTHREAD_GP );
653- if (rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_CB ))
654- * wake_state = true;
655652 ret = 1 ;
656653 } else if (!rcu_segcblist_test_flags (cblist , SEGCBLIST_OFFLOADED ) &&
657654 rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_GP )) {
@@ -660,8 +657,6 @@ static int nocb_gp_toggle_rdp(struct rcu_data *rdp,
660657 * We will ignore this rdp until it ever gets re-offloaded.
661658 */
662659 rcu_segcblist_clear_flags (cblist , SEGCBLIST_KTHREAD_GP );
663- if (!rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_CB ))
664- * wake_state = true;
665660 ret = 0 ;
666661 } else {
667662 WARN_ON_ONCE (1 );
@@ -877,16 +872,15 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
877872 }
878873
879874 if (rdp_toggling ) {
880- bool wake_state = false;
881875 int ret ;
882876
883- ret = nocb_gp_toggle_rdp (rdp_toggling , & wake_state );
877+ ret = nocb_gp_toggle_rdp (rdp_toggling );
884878 if (ret == 1 )
885879 list_add_tail (& rdp_toggling -> nocb_entry_rdp , & my_rdp -> nocb_head_rdp );
886880 else if (ret == 0 )
887881 list_del (& rdp_toggling -> nocb_entry_rdp );
888- if ( wake_state )
889- swake_up_one (& rdp_toggling -> nocb_state_wq );
882+
883+ swake_up_one (& rdp_toggling -> nocb_state_wq );
890884 }
891885
892886 my_rdp -> nocb_gp_seq = -1 ;
@@ -913,16 +907,9 @@ static int rcu_nocb_gp_kthread(void *arg)
913907 return 0 ;
914908}
915909
916- static inline bool nocb_cb_can_run (struct rcu_data * rdp )
917- {
918- u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB ;
919-
920- return rcu_segcblist_test_flags (& rdp -> cblist , flags );
921- }
922-
923910static inline bool nocb_cb_wait_cond (struct rcu_data * rdp )
924911{
925- return nocb_cb_can_run ( rdp ) && !READ_ONCE (rdp -> nocb_cb_sleep );
912+ return !READ_ONCE (rdp -> nocb_cb_sleep ) || kthread_should_park ( );
926913}
927914
928915/*
@@ -934,21 +921,19 @@ static void nocb_cb_wait(struct rcu_data *rdp)
934921 struct rcu_segcblist * cblist = & rdp -> cblist ;
935922 unsigned long cur_gp_seq ;
936923 unsigned long flags ;
937- bool needwake_state = false;
938924 bool needwake_gp = false;
939- bool can_sleep = true;
940925 struct rcu_node * rnp = rdp -> mynode ;
941926
942- do {
943- swait_event_interruptible_exclusive (rdp -> nocb_cb_wq ,
944- nocb_cb_wait_cond (rdp ));
945-
946- if (READ_ONCE (rdp -> nocb_cb_sleep )) {
947- WARN_ON (signal_pending (current ));
948- trace_rcu_nocb_wake (rcu_state .name , rdp -> cpu , TPS ("WokeEmpty" ));
949- }
950- } while (!nocb_cb_can_run (rdp ));
927+ swait_event_interruptible_exclusive (rdp -> nocb_cb_wq ,
928+ nocb_cb_wait_cond (rdp ));
929+ if (kthread_should_park ()) {
930+ kthread_parkme ();
931+ } else if (READ_ONCE (rdp -> nocb_cb_sleep )) {
932+ WARN_ON (signal_pending (current ));
933+ trace_rcu_nocb_wake (rcu_state .name , rdp -> cpu , TPS ("WokeEmpty" ));
934+ }
951935
936+ WARN_ON_ONCE (!rcu_rdp_is_offloaded (rdp ));
952937
953938 local_irq_save (flags );
954939 rcu_momentary_dyntick_idle ();
@@ -971,37 +956,16 @@ static void nocb_cb_wait(struct rcu_data *rdp)
971956 raw_spin_unlock_rcu_node (rnp ); /* irqs remain disabled. */
972957 }
973958
974- if (rcu_segcblist_test_flags (cblist , SEGCBLIST_OFFLOADED )) {
975- if (!rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_CB )) {
976- rcu_segcblist_set_flags (cblist , SEGCBLIST_KTHREAD_CB );
977- if (rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_GP ))
978- needwake_state = true;
979- }
980- if (rcu_segcblist_ready_cbs (cblist ))
981- can_sleep = false;
959+ if (!rcu_segcblist_ready_cbs (cblist )) {
960+ WRITE_ONCE (rdp -> nocb_cb_sleep , true);
961+ trace_rcu_nocb_wake (rcu_state .name , rdp -> cpu , TPS ("CBSleep" ));
982962 } else {
983- /*
984- * De-offloading. Clear our flag and notify the de-offload worker.
985- * We won't touch the callbacks and keep sleeping until we ever
986- * get re-offloaded.
987- */
988- WARN_ON_ONCE (!rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_CB ));
989- rcu_segcblist_clear_flags (cblist , SEGCBLIST_KTHREAD_CB );
990- if (!rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_GP ))
991- needwake_state = true;
963+ WRITE_ONCE (rdp -> nocb_cb_sleep , false);
992964 }
993965
994- WRITE_ONCE (rdp -> nocb_cb_sleep , can_sleep );
995-
996- if (rdp -> nocb_cb_sleep )
997- trace_rcu_nocb_wake (rcu_state .name , rdp -> cpu , TPS ("CBSleep" ));
998-
999966 rcu_nocb_unlock_irqrestore (rdp , flags );
1000967 if (needwake_gp )
1001968 rcu_gp_kthread_wake ();
1002-
1003- if (needwake_state )
1004- swake_up_one (& rdp -> nocb_state_wq );
1005969}
1006970
1007971/*
@@ -1094,17 +1058,8 @@ static int rdp_offload_toggle(struct rcu_data *rdp,
10941058 bool wake_gp = false;
10951059
10961060 rcu_segcblist_offload (cblist , offload );
1097-
1098- if (rdp -> nocb_cb_sleep )
1099- rdp -> nocb_cb_sleep = false;
11001061 rcu_nocb_unlock_irqrestore (rdp , flags );
11011062
1102- /*
1103- * Ignore former value of nocb_cb_sleep and force wake up as it could
1104- * have been spuriously set to false already.
1105- */
1106- swake_up_one (& rdp -> nocb_cb_wq );
1107-
11081063 raw_spin_lock_irqsave (& rdp_gp -> nocb_gp_lock , flags );
11091064 // Queue this rdp for add/del to/from the list to iterate on rcuog
11101065 WRITE_ONCE (rdp_gp -> nocb_toggling_rdp , rdp );
@@ -1161,28 +1116,19 @@ static long rcu_nocb_rdp_deoffload(void *arg)
11611116 if (wake_gp )
11621117 wake_up_process (rdp_gp -> nocb_gp_kthread );
11631118
1164- /*
1165- * If rcuo[p] kthread spawn failed, directly remove SEGCBLIST_KTHREAD_CB.
1166- * Just wait SEGCBLIST_KTHREAD_GP to be cleared by rcuog.
1167- */
1168- if (!rdp -> nocb_cb_kthread ) {
1169- rcu_nocb_lock_irqsave (rdp , flags );
1170- rcu_segcblist_clear_flags (& rdp -> cblist , SEGCBLIST_KTHREAD_CB );
1171- rcu_nocb_unlock_irqrestore (rdp , flags );
1172- }
1173-
11741119 swait_event_exclusive (rdp -> nocb_state_wq ,
1175- !rcu_segcblist_test_flags (cblist ,
1176- SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP ));
1120+ !rcu_segcblist_test_flags (cblist ,
1121+ SEGCBLIST_KTHREAD_GP ));
1122+ if (rdp -> nocb_cb_kthread )
1123+ kthread_park (rdp -> nocb_cb_kthread );
11771124 } else {
11781125 /*
11791126 * No kthread to clear the flags for us or remove the rdp from the nocb list
11801127 * to iterate. Do it here instead. Locking doesn't look stricly necessary
11811128 * but we stick to paranoia in this rare path.
11821129 */
11831130 rcu_nocb_lock_irqsave (rdp , flags );
1184- rcu_segcblist_clear_flags (& rdp -> cblist ,
1185- SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP );
1131+ rcu_segcblist_clear_flags (& rdp -> cblist , SEGCBLIST_KTHREAD_GP );
11861132 rcu_nocb_unlock_irqrestore (rdp , flags );
11871133
11881134 list_del (& rdp -> nocb_entry_rdp );
@@ -1282,8 +1228,10 @@ static long rcu_nocb_rdp_offload(void *arg)
12821228 wake_gp = rdp_offload_toggle (rdp , true, flags );
12831229 if (wake_gp )
12841230 wake_up_process (rdp_gp -> nocb_gp_kthread );
1231+
1232+ kthread_unpark (rdp -> nocb_cb_kthread );
1233+
12851234 swait_event_exclusive (rdp -> nocb_state_wq ,
1286- rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_CB ) &&
12871235 rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_GP ));
12881236
12891237 /*
@@ -1468,7 +1416,7 @@ void __init rcu_init_nohz(void)
14681416 if (rcu_segcblist_empty (& rdp -> cblist ))
14691417 rcu_segcblist_init (& rdp -> cblist );
14701418 rcu_segcblist_offload (& rdp -> cblist , true);
1471- rcu_segcblist_set_flags (& rdp -> cblist , SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP );
1419+ rcu_segcblist_set_flags (& rdp -> cblist , SEGCBLIST_KTHREAD_GP );
14721420 rcu_segcblist_clear_flags (& rdp -> cblist , SEGCBLIST_RCU_CORE );
14731421 }
14741422 rcu_organize_nocb_kthreads ();
@@ -1526,11 +1474,16 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu)
15261474 mutex_unlock (& rdp_gp -> nocb_gp_kthread_mutex );
15271475
15281476 /* Spawn the kthread for this CPU. */
1529- t = kthread_run (rcu_nocb_cb_kthread , rdp ,
1530- "rcuo%c/%d" , rcu_state .abbr , cpu );
1477+ t = kthread_create (rcu_nocb_cb_kthread , rdp ,
1478+ "rcuo%c/%d" , rcu_state .abbr , cpu );
15311479 if (WARN_ONCE (IS_ERR (t ), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n" , __func__ ))
15321480 goto end ;
15331481
1482+ if (rcu_rdp_is_offloaded (rdp ))
1483+ wake_up_process (t );
1484+ else
1485+ kthread_park (t );
1486+
15341487 if (IS_ENABLED (CONFIG_RCU_NOCB_CPU_CB_BOOST ) && kthread_prio )
15351488 sched_setscheduler_nocheck (t , SCHED_FIFO , & sp );
15361489
0 commit comments