Skip to content

Commit 8044c58

Browse files
author
Frederic Weisbecker
committed
rcu: Use kthread preferred affinity for RCU exp kworkers
Now that kthreads have an infrastructure to handle preferred affinity against CPU hotplug and housekeeping cpumask, convert RCU exp workers to use it instead of handling all the constraints by itself. Acked-by: Paul E. McKenney <[email protected]> Signed-off-by: Frederic Weisbecker <[email protected]>
1 parent b04e317 commit 8044c58

File tree

1 file changed

+19
-86
lines changed

1 file changed

+19
-86
lines changed

kernel/rcu/tree.c

Lines changed: 19 additions & 86 deletions
Original file line numberDiff line numberDiff line change
@@ -4894,6 +4894,22 @@ rcu_boot_init_percpu_data(int cpu)
48944894
rcu_boot_init_nocb_percpu_data(rdp);
48954895
}
48964896

4897+
static void rcu_thread_affine_rnp(struct task_struct *t, struct rcu_node *rnp)
4898+
{
4899+
cpumask_var_t affinity;
4900+
int cpu;
4901+
4902+
if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
4903+
return;
4904+
4905+
for_each_leaf_node_possible_cpu(rnp, cpu)
4906+
cpumask_set_cpu(cpu, affinity);
4907+
4908+
kthread_affine_preferred(t, affinity);
4909+
4910+
free_cpumask_var(affinity);
4911+
}
4912+
48974913
struct kthread_worker *rcu_exp_gp_kworker;
48984914

48994915
static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
@@ -4906,7 +4922,7 @@ static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
49064922
if (rnp->exp_kworker)
49074923
return;
49084924

4909-
kworker = kthread_run_worker(0, name, rnp_index);
4925+
kworker = kthread_create_worker(0, name, rnp_index);
49104926
if (IS_ERR_OR_NULL(kworker)) {
49114927
pr_err("Failed to create par gp kworker on %d/%d\n",
49124928
rnp->grplo, rnp->grphi);
@@ -4916,16 +4932,9 @@ static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
49164932

49174933
if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD))
49184934
sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, &param);
4919-
}
49204935

4921-
static struct task_struct *rcu_exp_par_gp_task(struct rcu_node *rnp)
4922-
{
4923-
struct kthread_worker *kworker = READ_ONCE(rnp->exp_kworker);
4924-
4925-
if (!kworker)
4926-
return NULL;
4927-
4928-
return kworker->task;
4936+
rcu_thread_affine_rnp(kworker->task, rnp);
4937+
wake_up_process(kworker->task);
49294938
}
49304939

49314940
static void __init rcu_start_exp_gp_kworker(void)
@@ -5010,79 +5019,6 @@ int rcutree_prepare_cpu(unsigned int cpu)
50105019
return 0;
50115020
}
50125021

5013-
static void rcu_thread_affine_rnp(struct task_struct *t, struct rcu_node *rnp)
5014-
{
5015-
cpumask_var_t affinity;
5016-
int cpu;
5017-
5018-
if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
5019-
return;
5020-
5021-
for_each_leaf_node_possible_cpu(rnp, cpu)
5022-
cpumask_set_cpu(cpu, affinity);
5023-
5024-
kthread_affine_preferred(t, affinity);
5025-
5026-
free_cpumask_var(affinity);
5027-
}
5028-
5029-
/*
5030-
* Update kthreads affinity during CPU-hotplug changes.
5031-
*
5032-
* Set the per-rcu_node kthread's affinity to cover all CPUs that are
5033-
* served by the rcu_node in question. The CPU hotplug lock is still
5034-
* held, so the value of rnp->qsmaskinit will be stable.
5035-
*
5036-
* We don't include outgoingcpu in the affinity set, use -1 if there is
5037-
* no outgoing CPU. If there are no CPUs left in the affinity set,
5038-
* this function allows the kthread to execute on any CPU.
5039-
*
5040-
* Any future concurrent calls are serialized via ->kthread_mutex.
5041-
*/
5042-
static void rcutree_affinity_setting(unsigned int cpu, int outgoingcpu)
5043-
{
5044-
cpumask_var_t cm;
5045-
unsigned long mask;
5046-
struct rcu_data *rdp;
5047-
struct rcu_node *rnp;
5048-
struct task_struct *task_exp;
5049-
5050-
rdp = per_cpu_ptr(&rcu_data, cpu);
5051-
rnp = rdp->mynode;
5052-
5053-
task_exp = rcu_exp_par_gp_task(rnp);
5054-
5055-
/*
5056-
* If CPU is the boot one, this task is created later from early
5057-
* initcall since kthreadd must be created first.
5058-
*/
5059-
if (!task_exp)
5060-
return;
5061-
5062-
if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
5063-
return;
5064-
5065-
mutex_lock(&rnp->kthread_mutex);
5066-
mask = rcu_rnp_online_cpus(rnp);
5067-
for_each_leaf_node_possible_cpu(rnp, cpu)
5068-
if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
5069-
cpu != outgoingcpu)
5070-
cpumask_set_cpu(cpu, cm);
5071-
cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU));
5072-
if (cpumask_empty(cm)) {
5073-
cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU));
5074-
if (outgoingcpu >= 0)
5075-
cpumask_clear_cpu(outgoingcpu, cm);
5076-
}
5077-
5078-
if (task_exp)
5079-
set_cpus_allowed_ptr(task_exp, cm);
5080-
5081-
mutex_unlock(&rnp->kthread_mutex);
5082-
5083-
free_cpumask_var(cm);
5084-
}
5085-
50865022
/*
50875023
* Has the specified (known valid) CPU ever been fully online?
50885024
*/
@@ -5111,7 +5047,6 @@ int rcutree_online_cpu(unsigned int cpu)
51115047
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
51125048
return 0; /* Too early in boot for scheduler work. */
51135049
sync_sched_exp_online_cleanup(cpu);
5114-
rcutree_affinity_setting(cpu, -1);
51155050

51165051
// Stop-machine done, so allow nohz_full to disable tick.
51175052
tick_dep_clear(TICK_DEP_BIT_RCU);
@@ -5328,8 +5263,6 @@ int rcutree_offline_cpu(unsigned int cpu)
53285263
rnp->ffmask &= ~rdp->grpmask;
53295264
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
53305265

5331-
rcutree_affinity_setting(cpu, cpu);
5332-
53335266
// nohz_full CPUs need the tick for stop-machine to work quickly
53345267
tick_dep_set(TICK_DEP_BIT_RCU);
53355268
return 0;

0 commit comments

Comments
 (0)