Skip to content

Commit 23ca523

Browse files
Chen Ridonghtejun
authored andcommitted
cgroup/cpuset: move legacy hotplug update to cpuset-v1.c
There are some differents about hotplug update between cpuset v1 and cpuset v2. Move the legacy code to cpuset-v1.c. 'update_tasks_cpumask' and 'update_tasks_nodemask' are both used in cpuset v1 and cpuset v2, declare them in cpuset-internal.h. The change from original code is that use callback_lock helpers to get callback_lock lock/unlock. Signed-off-by: Chen Ridong <[email protected]> Acked-by: Waiman Long <[email protected]> Signed-off-by: Tejun Heo <[email protected]>
1 parent 530020f commit 23ca523

File tree

3 files changed

+98
-94
lines changed

3 files changed

+98
-94
lines changed

kernel/cgroup/cpuset-internal.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -241,6 +241,8 @@ static inline int is_spread_slab(const struct cpuset *cs)
241241
void rebuild_sched_domains_locked(void);
242242
void callback_lock_irq(void);
243243
void callback_unlock_irq(void);
244+
void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus);
245+
void update_tasks_nodemask(struct cpuset *cs);
244246

245247
/*
246248
* cpuset-v1.c
@@ -253,5 +255,8 @@ s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft);
253255
void cpuset_update_task_spread_flags(struct cpuset *cs,
254256
struct task_struct *tsk);
255257
void update_tasks_flags(struct cpuset *cs);
258+
void hotplug_update_tasks_legacy(struct cpuset *cs,
259+
struct cpumask *new_cpus, nodemask_t *new_mems,
260+
bool cpus_updated, bool mems_updated);
256261

257262
#endif /* __CPUSET_INTERNAL_H */

kernel/cgroup/cpuset-v1.c

Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,14 @@
22

33
#include "cpuset-internal.h"
44

5+
/*
6+
* Legacy hierarchy call to cgroup_transfer_tasks() is handled asynchrously
7+
*/
8+
struct cpuset_remove_tasks_struct {
9+
struct work_struct work;
10+
struct cpuset *cs;
11+
};
12+
513
/*
614
* Frequency meter - How fast is some event occurring?
715
*
@@ -236,3 +244,86 @@ void update_tasks_flags(struct cpuset *cs)
236244
cpuset_update_task_spread_flags(cs, task);
237245
css_task_iter_end(&it);
238246
}
247+
248+
/*
249+
* If CPU and/or memory hotplug handlers, below, unplug any CPUs
250+
* or memory nodes, we need to walk over the cpuset hierarchy,
251+
* removing that CPU or node from all cpusets. If this removes the
252+
* last CPU or node from a cpuset, then move the tasks in the empty
253+
* cpuset to its next-highest non-empty parent.
254+
*/
255+
static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
256+
{
257+
struct cpuset *parent;
258+
259+
/*
260+
* Find its next-highest non-empty parent, (top cpuset
261+
* has online cpus, so can't be empty).
262+
*/
263+
parent = parent_cs(cs);
264+
while (cpumask_empty(parent->cpus_allowed) ||
265+
nodes_empty(parent->mems_allowed))
266+
parent = parent_cs(parent);
267+
268+
if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
269+
pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
270+
pr_cont_cgroup_name(cs->css.cgroup);
271+
pr_cont("\n");
272+
}
273+
}
274+
275+
static void cpuset_migrate_tasks_workfn(struct work_struct *work)
276+
{
277+
struct cpuset_remove_tasks_struct *s;
278+
279+
s = container_of(work, struct cpuset_remove_tasks_struct, work);
280+
remove_tasks_in_empty_cpuset(s->cs);
281+
css_put(&s->cs->css);
282+
kfree(s);
283+
}
284+
285+
void hotplug_update_tasks_legacy(struct cpuset *cs,
286+
struct cpumask *new_cpus, nodemask_t *new_mems,
287+
bool cpus_updated, bool mems_updated)
288+
{
289+
bool is_empty;
290+
291+
callback_lock_irq();
292+
cpumask_copy(cs->cpus_allowed, new_cpus);
293+
cpumask_copy(cs->effective_cpus, new_cpus);
294+
cs->mems_allowed = *new_mems;
295+
cs->effective_mems = *new_mems;
296+
callback_unlock_irq();
297+
298+
/*
299+
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
300+
* as the tasks will be migrated to an ancestor.
301+
*/
302+
if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
303+
update_tasks_cpumask(cs, new_cpus);
304+
if (mems_updated && !nodes_empty(cs->mems_allowed))
305+
update_tasks_nodemask(cs);
306+
307+
is_empty = cpumask_empty(cs->cpus_allowed) ||
308+
nodes_empty(cs->mems_allowed);
309+
310+
/*
311+
* Move tasks to the nearest ancestor with execution resources,
312+
* This is full cgroup operation which will also call back into
313+
* cpuset. Execute it asynchronously using workqueue.
314+
*/
315+
if (is_empty && cs->css.cgroup->nr_populated_csets &&
316+
css_tryget_online(&cs->css)) {
317+
struct cpuset_remove_tasks_struct *s;
318+
319+
s = kzalloc(sizeof(*s), GFP_KERNEL);
320+
if (WARN_ON_ONCE(!s)) {
321+
css_put(&cs->css);
322+
return;
323+
}
324+
325+
s->cs = cs;
326+
INIT_WORK(&s->work, cpuset_migrate_tasks_workfn);
327+
schedule_work(&s->work);
328+
}
329+
}

kernel/cgroup/cpuset.c

Lines changed: 2 additions & 94 deletions
Original file line numberDiff line numberDiff line change
@@ -65,14 +65,6 @@ static const char * const perr_strings[] = {
6565
[PERR_ACCESS] = "Enable partition not permitted",
6666
};
6767

68-
/*
69-
* Legacy hierarchy call to cgroup_transfer_tasks() is handled asynchrously
70-
*/
71-
struct cpuset_remove_tasks_struct {
72-
struct work_struct work;
73-
struct cpuset *cs;
74-
};
75-
7668
/*
7769
* Exclusive CPUs distributed out to sub-partitions of top_cpuset
7870
*/
@@ -1144,7 +1136,7 @@ void rebuild_sched_domains(void)
11441136
* is used instead of effective_cpus to make sure all offline CPUs are also
11451137
* included as hotplug code won't update cpumasks for tasks in top_cpuset.
11461138
*/
1147-
static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1139+
void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
11481140
{
11491141
struct css_task_iter it;
11501142
struct task_struct *task;
@@ -2597,7 +2589,7 @@ static void *cpuset_being_rebound;
25972589
* effective cpuset's. As this function is called with cpuset_mutex held,
25982590
* cpuset membership stays stable.
25992591
*/
2600-
static void update_tasks_nodemask(struct cpuset *cs)
2592+
void update_tasks_nodemask(struct cpuset *cs)
26012593
{
26022594
static nodemask_t newmems; /* protected by cpuset_mutex */
26032595
struct css_task_iter it;
@@ -3936,90 +3928,6 @@ int __init cpuset_init(void)
39363928
return 0;
39373929
}
39383930

3939-
/*
3940-
* If CPU and/or memory hotplug handlers, below, unplug any CPUs
3941-
* or memory nodes, we need to walk over the cpuset hierarchy,
3942-
* removing that CPU or node from all cpusets. If this removes the
3943-
* last CPU or node from a cpuset, then move the tasks in the empty
3944-
* cpuset to its next-highest non-empty parent.
3945-
*/
3946-
static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
3947-
{
3948-
struct cpuset *parent;
3949-
3950-
/*
3951-
* Find its next-highest non-empty parent, (top cpuset
3952-
* has online cpus, so can't be empty).
3953-
*/
3954-
parent = parent_cs(cs);
3955-
while (cpumask_empty(parent->cpus_allowed) ||
3956-
nodes_empty(parent->mems_allowed))
3957-
parent = parent_cs(parent);
3958-
3959-
if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
3960-
pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
3961-
pr_cont_cgroup_name(cs->css.cgroup);
3962-
pr_cont("\n");
3963-
}
3964-
}
3965-
3966-
static void cpuset_migrate_tasks_workfn(struct work_struct *work)
3967-
{
3968-
struct cpuset_remove_tasks_struct *s;
3969-
3970-
s = container_of(work, struct cpuset_remove_tasks_struct, work);
3971-
remove_tasks_in_empty_cpuset(s->cs);
3972-
css_put(&s->cs->css);
3973-
kfree(s);
3974-
}
3975-
3976-
static void
3977-
hotplug_update_tasks_legacy(struct cpuset *cs,
3978-
struct cpumask *new_cpus, nodemask_t *new_mems,
3979-
bool cpus_updated, bool mems_updated)
3980-
{
3981-
bool is_empty;
3982-
3983-
spin_lock_irq(&callback_lock);
3984-
cpumask_copy(cs->cpus_allowed, new_cpus);
3985-
cpumask_copy(cs->effective_cpus, new_cpus);
3986-
cs->mems_allowed = *new_mems;
3987-
cs->effective_mems = *new_mems;
3988-
spin_unlock_irq(&callback_lock);
3989-
3990-
/*
3991-
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
3992-
* as the tasks will be migrated to an ancestor.
3993-
*/
3994-
if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
3995-
update_tasks_cpumask(cs, new_cpus);
3996-
if (mems_updated && !nodes_empty(cs->mems_allowed))
3997-
update_tasks_nodemask(cs);
3998-
3999-
is_empty = cpumask_empty(cs->cpus_allowed) ||
4000-
nodes_empty(cs->mems_allowed);
4001-
4002-
/*
4003-
* Move tasks to the nearest ancestor with execution resources,
4004-
* This is full cgroup operation which will also call back into
4005-
* cpuset. Execute it asynchronously using workqueue.
4006-
*/
4007-
if (is_empty && cs->css.cgroup->nr_populated_csets &&
4008-
css_tryget_online(&cs->css)) {
4009-
struct cpuset_remove_tasks_struct *s;
4010-
4011-
s = kzalloc(sizeof(*s), GFP_KERNEL);
4012-
if (WARN_ON_ONCE(!s)) {
4013-
css_put(&cs->css);
4014-
return;
4015-
}
4016-
4017-
s->cs = cs;
4018-
INIT_WORK(&s->work, cpuset_migrate_tasks_workfn);
4019-
schedule_work(&s->work);
4020-
}
4021-
}
4022-
40233931
static void
40243932
hotplug_update_tasks(struct cpuset *cs,
40253933
struct cpumask *new_cpus, nodemask_t *new_mems,

0 commit comments

Comments
 (0)