@@ -4894,6 +4894,22 @@ rcu_boot_init_percpu_data(int cpu)
4894
4894
rcu_boot_init_nocb_percpu_data (rdp );
4895
4895
}
4896
4896
4897
+ static void rcu_thread_affine_rnp (struct task_struct * t , struct rcu_node * rnp )
4898
+ {
4899
+ cpumask_var_t affinity ;
4900
+ int cpu ;
4901
+
4902
+ if (!zalloc_cpumask_var (& affinity , GFP_KERNEL ))
4903
+ return ;
4904
+
4905
+ for_each_leaf_node_possible_cpu (rnp , cpu )
4906
+ cpumask_set_cpu (cpu , affinity );
4907
+
4908
+ kthread_affine_preferred (t , affinity );
4909
+
4910
+ free_cpumask_var (affinity );
4911
+ }
4912
+
4897
4913
struct kthread_worker * rcu_exp_gp_kworker ;
4898
4914
4899
4915
static void rcu_spawn_exp_par_gp_kworker (struct rcu_node * rnp )
@@ -4906,7 +4922,7 @@ static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
4906
4922
if (rnp -> exp_kworker )
4907
4923
return ;
4908
4924
4909
- kworker = kthread_run_worker (0 , name , rnp_index );
4925
+ kworker = kthread_create_worker (0 , name , rnp_index );
4910
4926
if (IS_ERR_OR_NULL (kworker )) {
4911
4927
pr_err ("Failed to create par gp kworker on %d/%d\n" ,
4912
4928
rnp -> grplo , rnp -> grphi );
@@ -4916,16 +4932,9 @@ static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
4916
4932
4917
4933
if (IS_ENABLED (CONFIG_RCU_EXP_KTHREAD ))
4918
4934
sched_setscheduler_nocheck (kworker -> task , SCHED_FIFO , & param );
4919
- }
4920
4935
4921
- static struct task_struct * rcu_exp_par_gp_task (struct rcu_node * rnp )
4922
- {
4923
- struct kthread_worker * kworker = READ_ONCE (rnp -> exp_kworker );
4924
-
4925
- if (!kworker )
4926
- return NULL ;
4927
-
4928
- return kworker -> task ;
4936
+ rcu_thread_affine_rnp (kworker -> task , rnp );
4937
+ wake_up_process (kworker -> task );
4929
4938
}
4930
4939
4931
4940
static void __init rcu_start_exp_gp_kworker (void )
@@ -5010,79 +5019,6 @@ int rcutree_prepare_cpu(unsigned int cpu)
5010
5019
return 0 ;
5011
5020
}
5012
5021
5013
- static void rcu_thread_affine_rnp (struct task_struct * t , struct rcu_node * rnp )
5014
- {
5015
- cpumask_var_t affinity ;
5016
- int cpu ;
5017
-
5018
- if (!zalloc_cpumask_var (& affinity , GFP_KERNEL ))
5019
- return ;
5020
-
5021
- for_each_leaf_node_possible_cpu (rnp , cpu )
5022
- cpumask_set_cpu (cpu , affinity );
5023
-
5024
- kthread_affine_preferred (t , affinity );
5025
-
5026
- free_cpumask_var (affinity );
5027
- }
5028
-
5029
- /*
5030
- * Update kthreads affinity during CPU-hotplug changes.
5031
- *
5032
- * Set the per-rcu_node kthread's affinity to cover all CPUs that are
5033
- * served by the rcu_node in question. The CPU hotplug lock is still
5034
- * held, so the value of rnp->qsmaskinit will be stable.
5035
- *
5036
- * We don't include outgoingcpu in the affinity set, use -1 if there is
5037
- * no outgoing CPU. If there are no CPUs left in the affinity set,
5038
- * this function allows the kthread to execute on any CPU.
5039
- *
5040
- * Any future concurrent calls are serialized via ->kthread_mutex.
5041
- */
5042
- static void rcutree_affinity_setting (unsigned int cpu , int outgoingcpu )
5043
- {
5044
- cpumask_var_t cm ;
5045
- unsigned long mask ;
5046
- struct rcu_data * rdp ;
5047
- struct rcu_node * rnp ;
5048
- struct task_struct * task_exp ;
5049
-
5050
- rdp = per_cpu_ptr (& rcu_data , cpu );
5051
- rnp = rdp -> mynode ;
5052
-
5053
- task_exp = rcu_exp_par_gp_task (rnp );
5054
-
5055
- /*
5056
- * If CPU is the boot one, this task is created later from early
5057
- * initcall since kthreadd must be created first.
5058
- */
5059
- if (!task_exp )
5060
- return ;
5061
-
5062
- if (!zalloc_cpumask_var (& cm , GFP_KERNEL ))
5063
- return ;
5064
-
5065
- mutex_lock (& rnp -> kthread_mutex );
5066
- mask = rcu_rnp_online_cpus (rnp );
5067
- for_each_leaf_node_possible_cpu (rnp , cpu )
5068
- if ((mask & leaf_node_cpu_bit (rnp , cpu )) &&
5069
- cpu != outgoingcpu )
5070
- cpumask_set_cpu (cpu , cm );
5071
- cpumask_and (cm , cm , housekeeping_cpumask (HK_TYPE_RCU ));
5072
- if (cpumask_empty (cm )) {
5073
- cpumask_copy (cm , housekeeping_cpumask (HK_TYPE_RCU ));
5074
- if (outgoingcpu >= 0 )
5075
- cpumask_clear_cpu (outgoingcpu , cm );
5076
- }
5077
-
5078
- if (task_exp )
5079
- set_cpus_allowed_ptr (task_exp , cm );
5080
-
5081
- mutex_unlock (& rnp -> kthread_mutex );
5082
-
5083
- free_cpumask_var (cm );
5084
- }
5085
-
5086
5022
/*
5087
5023
* Has the specified (known valid) CPU ever been fully online?
5088
5024
*/
@@ -5111,7 +5047,6 @@ int rcutree_online_cpu(unsigned int cpu)
5111
5047
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE )
5112
5048
return 0 ; /* Too early in boot for scheduler work. */
5113
5049
sync_sched_exp_online_cleanup (cpu );
5114
- rcutree_affinity_setting (cpu , -1 );
5115
5050
5116
5051
// Stop-machine done, so allow nohz_full to disable tick.
5117
5052
tick_dep_clear (TICK_DEP_BIT_RCU );
@@ -5328,8 +5263,6 @@ int rcutree_offline_cpu(unsigned int cpu)
5328
5263
rnp -> ffmask &= ~rdp -> grpmask ;
5329
5264
raw_spin_unlock_irqrestore_rcu_node (rnp , flags );
5330
5265
5331
- rcutree_affinity_setting (cpu , cpu );
5332
-
5333
5266
// nohz_full CPUs need the tick for stop-machine to work quickly
5334
5267
tick_dep_set (TICK_DEP_BIT_RCU );
5335
5268
return 0 ;
0 commit comments