Skip to content

Commit 50e7663

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
sched/cpuset/pm: Fix cpuset vs. suspend-resume bugs
Cpusets vs. suspend-resume is _completely_ broken. And it got noticed because it now resulted in non-cpuset usage breaking too. On suspend cpuset_cpu_inactive() doesn't call into cpuset_update_active_cpus() because it doesn't want to move tasks about, there is no need, all tasks are frozen and won't run again until after we've resumed everything. But this means that when we finally do call into cpuset_update_active_cpus() after resuming the last frozen cpu in cpuset_cpu_active(), the top_cpuset will not have any difference with the cpu_active_mask and this it will not in fact do _anything_. So the cpuset configuration will not be restored. This was largely hidden because we would unconditionally create identity domains and mobile users would not in fact use cpusets much. And servers what do use cpusets tend to not suspend-resume much. An addition problem is that we'd not in fact wait for the cpuset work to finish before resuming the tasks, allowing spurious migrations outside of the specified domains. Fix the rebuild by introducing cpuset_force_rebuild() and fix the ordering with cpuset_wait_for_hotplug(). Reported-by: Andy Lutomirski <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Rafael J. Wysocki <[email protected]> Cc: Tejun Heo <[email protected]> Cc: Thomas Gleixner <[email protected]> Fixes: deb7aa3 ("cpuset: reorganize CPU / memory hotplug handling") Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent a731ebe commit 50e7663

File tree

4 files changed

+28
-6
lines changed

4 files changed

+28
-6
lines changed

include/linux/cpuset.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,9 @@ static inline void cpuset_dec(void)
5151

5252
extern int cpuset_init(void);
5353
extern void cpuset_init_smp(void);
54+
extern void cpuset_force_rebuild(void);
5455
extern void cpuset_update_active_cpus(void);
56+
extern void cpuset_wait_for_hotplug(void);
5557
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
5658
extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
5759
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -164,11 +166,15 @@ static inline bool cpusets_enabled(void) { return false; }
164166
static inline int cpuset_init(void) { return 0; }
165167
static inline void cpuset_init_smp(void) {}
166168

169+
static inline void cpuset_force_rebuild(void) { }
170+
167171
static inline void cpuset_update_active_cpus(void)
168172
{
169173
partition_sched_domains(1, NULL, NULL);
170174
}
171175

176+
static inline void cpuset_wait_for_hotplug(void) { }
177+
172178
static inline void cpuset_cpus_allowed(struct task_struct *p,
173179
struct cpumask *mask)
174180
{

kernel/cgroup/cpuset.c

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2267,6 +2267,13 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs)
22672267
mutex_unlock(&cpuset_mutex);
22682268
}
22692269

2270+
static bool force_rebuild;
2271+
2272+
void cpuset_force_rebuild(void)
2273+
{
2274+
force_rebuild = true;
2275+
}
2276+
22702277
/**
22712278
* cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
22722279
*
@@ -2341,8 +2348,10 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
23412348
}
23422349

23432350
/* rebuild sched domains if cpus_allowed has changed */
2344-
if (cpus_updated)
2351+
if (cpus_updated || force_rebuild) {
2352+
force_rebuild = false;
23452353
rebuild_sched_domains();
2354+
}
23462355
}
23472356

23482357
void cpuset_update_active_cpus(void)
@@ -2355,6 +2364,11 @@ void cpuset_update_active_cpus(void)
23552364
schedule_work(&cpuset_hotplug_work);
23562365
}
23572366

2367+
void cpuset_wait_for_hotplug(void)
2368+
{
2369+
flush_work(&cpuset_hotplug_work);
2370+
}
2371+
23582372
/*
23592373
* Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
23602374
* Call this routine anytime after node_states[N_MEMORY] changes.

kernel/power/process.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,9 @@
2020
#include <linux/workqueue.h>
2121
#include <linux/kmod.h>
2222
#include <trace/events/power.h>
23+
#include <linux/cpuset.h>
2324

24-
/*
25+
/*
2526
* Timeout for stopping processes
2627
*/
2728
unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
@@ -202,6 +203,8 @@ void thaw_processes(void)
202203
__usermodehelper_set_disable_depth(UMH_FREEZING);
203204
thaw_workqueues();
204205

206+
cpuset_wait_for_hotplug();
207+
205208
read_lock(&tasklist_lock);
206209
for_each_process_thread(g, p) {
207210
/* No other threads should have PF_SUSPEND_TASK set */

kernel/sched/core.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5556,16 +5556,15 @@ static void cpuset_cpu_active(void)
55565556
* operation in the resume sequence, just build a single sched
55575557
* domain, ignoring cpusets.
55585558
*/
5559-
num_cpus_frozen--;
5560-
if (likely(num_cpus_frozen)) {
5561-
partition_sched_domains(1, NULL, NULL);
5559+
partition_sched_domains(1, NULL, NULL);
5560+
if (--num_cpus_frozen)
55625561
return;
5563-
}
55645562
/*
55655563
* This is the last CPU online operation. So fall through and
55665564
* restore the original sched domains by considering the
55675565
* cpuset configurations.
55685566
*/
5567+
cpuset_force_rebuild();
55695568
}
55705569
cpuset_update_active_cpus();
55715570
}

0 commit comments

Comments
 (0)