Skip to content

Commit f75f4d6

Browse files
Mike Galbraithjnettlet
authored andcommitted
cpuset: Convert callback_lock to raw_spinlock_t
The two commits below add up to a cpuset might_sleep() splat for RT: 8447a0f cpuset: convert callback_mutex to a spinlock 344736f cpuset: simplify cpuset_node_allowed API BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:995 in_atomic(): 0, irqs_disabled(): 1, pid: 11718, name: cset CPU: 135 PID: 11718 Comm: cset Tainted: G E 4.10.0-rt1-rt #4 Hardware name: Intel Corporation BRICKLAND/BRICKLAND, BIOS BRHSXSD1.86B.0056.R01.1409242327 09/24/2014 Call Trace: ? dump_stack+0x5c/0x81 ? ___might_sleep+0xf4/0x170 ? rt_spin_lock+0x1c/0x50 ? __cpuset_node_allowed+0x66/0xc0 ? ___slab_alloc+0x390/0x570 <disables IRQs> ? anon_vma_fork+0x8f/0x140 ? copy_page_range+0x6cf/0xb00 ? anon_vma_fork+0x8f/0x140 ? __slab_alloc.isra.74+0x5a/0x81 ? anon_vma_fork+0x8f/0x140 ? kmem_cache_alloc+0x1b5/0x1f0 ? anon_vma_fork+0x8f/0x140 ? copy_process.part.35+0x1670/0x1ee0 ? _do_fork+0xdd/0x3f0 ? _do_fork+0xdd/0x3f0 ? do_syscall_64+0x61/0x170 ? entry_SYSCALL64_slow_path+0x25/0x25 The later ensured that a NUMA box WILL take callback_lock in atomic context by removing the allocator and reclaim path __GFP_HARDWALL usage which prevented such contexts from taking callback_mutex. One option would be to reinstate __GFP_HARDWALL protections for RT, however, as the 8447a0f changelog states: The callback_mutex is only used to synchronize reads/updates of cpusets' flags and cpu/node masks. These operations should always proceed fast so there's no reason why we can't use a spinlock instead of the mutex. Cc: [email protected] Signed-off-by: Mike Galbraith <[email protected]> Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
1 parent 8472a33 commit f75f4d6

File tree

1 file changed

+33
-33
lines changed

1 file changed

+33
-33
lines changed

kernel/cpuset.c

Lines changed: 33 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -285,7 +285,7 @@ static struct cpuset top_cpuset = {
285285
*/
286286

287287
static DEFINE_MUTEX(cpuset_mutex);
288-
static DEFINE_SPINLOCK(callback_lock);
288+
static DEFINE_RAW_SPINLOCK(callback_lock);
289289

290290
static struct workqueue_struct *cpuset_migrate_mm_wq;
291291

@@ -908,9 +908,9 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
908908
continue;
909909
rcu_read_unlock();
910910

911-
spin_lock_irq(&callback_lock);
911+
raw_spin_lock_irq(&callback_lock);
912912
cpumask_copy(cp->effective_cpus, new_cpus);
913-
spin_unlock_irq(&callback_lock);
913+
raw_spin_unlock_irq(&callback_lock);
914914

915915
WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
916916
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
@@ -975,9 +975,9 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
975975
if (retval < 0)
976976
return retval;
977977

978-
spin_lock_irq(&callback_lock);
978+
raw_spin_lock_irq(&callback_lock);
979979
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
980-
spin_unlock_irq(&callback_lock);
980+
raw_spin_unlock_irq(&callback_lock);
981981

982982
/* use trialcs->cpus_allowed as a temp variable */
983983
update_cpumasks_hier(cs, trialcs->cpus_allowed);
@@ -1177,9 +1177,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
11771177
continue;
11781178
rcu_read_unlock();
11791179

1180-
spin_lock_irq(&callback_lock);
1180+
raw_spin_lock_irq(&callback_lock);
11811181
cp->effective_mems = *new_mems;
1182-
spin_unlock_irq(&callback_lock);
1182+
raw_spin_unlock_irq(&callback_lock);
11831183

11841184
WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
11851185
!nodes_equal(cp->mems_allowed, cp->effective_mems));
@@ -1247,9 +1247,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
12471247
if (retval < 0)
12481248
goto done;
12491249

1250-
spin_lock_irq(&callback_lock);
1250+
raw_spin_lock_irq(&callback_lock);
12511251
cs->mems_allowed = trialcs->mems_allowed;
1252-
spin_unlock_irq(&callback_lock);
1252+
raw_spin_unlock_irq(&callback_lock);
12531253

12541254
/* use trialcs->mems_allowed as a temp variable */
12551255
update_nodemasks_hier(cs, &trialcs->mems_allowed);
@@ -1340,9 +1340,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
13401340
spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
13411341
|| (is_spread_page(cs) != is_spread_page(trialcs)));
13421342

1343-
spin_lock_irq(&callback_lock);
1343+
raw_spin_lock_irq(&callback_lock);
13441344
cs->flags = trialcs->flags;
1345-
spin_unlock_irq(&callback_lock);
1345+
raw_spin_unlock_irq(&callback_lock);
13461346

13471347
if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
13481348
rebuild_sched_domains_locked();
@@ -1757,7 +1757,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
17571757
cpuset_filetype_t type = seq_cft(sf)->private;
17581758
int ret = 0;
17591759

1760-
spin_lock_irq(&callback_lock);
1760+
raw_spin_lock_irq(&callback_lock);
17611761

17621762
switch (type) {
17631763
case FILE_CPULIST:
@@ -1776,7 +1776,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
17761776
ret = -EINVAL;
17771777
}
17781778

1779-
spin_unlock_irq(&callback_lock);
1779+
raw_spin_unlock_irq(&callback_lock);
17801780
return ret;
17811781
}
17821782

@@ -1991,12 +1991,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
19911991

19921992
cpuset_inc();
19931993

1994-
spin_lock_irq(&callback_lock);
1994+
raw_spin_lock_irq(&callback_lock);
19951995
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
19961996
cpumask_copy(cs->effective_cpus, parent->effective_cpus);
19971997
cs->effective_mems = parent->effective_mems;
19981998
}
1999-
spin_unlock_irq(&callback_lock);
1999+
raw_spin_unlock_irq(&callback_lock);
20002000

20012001
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
20022002
goto out_unlock;
@@ -2023,12 +2023,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
20232023
}
20242024
rcu_read_unlock();
20252025

2026-
spin_lock_irq(&callback_lock);
2026+
raw_spin_lock_irq(&callback_lock);
20272027
cs->mems_allowed = parent->mems_allowed;
20282028
cs->effective_mems = parent->mems_allowed;
20292029
cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
20302030
cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
2031-
spin_unlock_irq(&callback_lock);
2031+
raw_spin_unlock_irq(&callback_lock);
20322032
out_unlock:
20332033
mutex_unlock(&cpuset_mutex);
20342034
return 0;
@@ -2067,7 +2067,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
20672067
static void cpuset_bind(struct cgroup_subsys_state *root_css)
20682068
{
20692069
mutex_lock(&cpuset_mutex);
2070-
spin_lock_irq(&callback_lock);
2070+
raw_spin_lock_irq(&callback_lock);
20712071

20722072
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
20732073
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
@@ -2078,7 +2078,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
20782078
top_cpuset.mems_allowed = top_cpuset.effective_mems;
20792079
}
20802080

2081-
spin_unlock_irq(&callback_lock);
2081+
raw_spin_unlock_irq(&callback_lock);
20822082
mutex_unlock(&cpuset_mutex);
20832083
}
20842084

@@ -2179,12 +2179,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
21792179
{
21802180
bool is_empty;
21812181

2182-
spin_lock_irq(&callback_lock);
2182+
raw_spin_lock_irq(&callback_lock);
21832183
cpumask_copy(cs->cpus_allowed, new_cpus);
21842184
cpumask_copy(cs->effective_cpus, new_cpus);
21852185
cs->mems_allowed = *new_mems;
21862186
cs->effective_mems = *new_mems;
2187-
spin_unlock_irq(&callback_lock);
2187+
raw_spin_unlock_irq(&callback_lock);
21882188

21892189
/*
21902190
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
@@ -2221,10 +2221,10 @@ hotplug_update_tasks(struct cpuset *cs,
22212221
if (nodes_empty(*new_mems))
22222222
*new_mems = parent_cs(cs)->effective_mems;
22232223

2224-
spin_lock_irq(&callback_lock);
2224+
raw_spin_lock_irq(&callback_lock);
22252225
cpumask_copy(cs->effective_cpus, new_cpus);
22262226
cs->effective_mems = *new_mems;
2227-
spin_unlock_irq(&callback_lock);
2227+
raw_spin_unlock_irq(&callback_lock);
22282228

22292229
if (cpus_updated)
22302230
update_tasks_cpumask(cs);
@@ -2317,21 +2317,21 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
23172317

23182318
/* synchronize cpus_allowed to cpu_active_mask */
23192319
if (cpus_updated) {
2320-
spin_lock_irq(&callback_lock);
2320+
raw_spin_lock_irq(&callback_lock);
23212321
if (!on_dfl)
23222322
cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
23232323
cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
2324-
spin_unlock_irq(&callback_lock);
2324+
raw_spin_unlock_irq(&callback_lock);
23252325
/* we don't mess with cpumasks of tasks in top_cpuset */
23262326
}
23272327

23282328
/* synchronize mems_allowed to N_MEMORY */
23292329
if (mems_updated) {
2330-
spin_lock_irq(&callback_lock);
2330+
raw_spin_lock_irq(&callback_lock);
23312331
if (!on_dfl)
23322332
top_cpuset.mems_allowed = new_mems;
23332333
top_cpuset.effective_mems = new_mems;
2334-
spin_unlock_irq(&callback_lock);
2334+
raw_spin_unlock_irq(&callback_lock);
23352335
update_tasks_nodemask(&top_cpuset);
23362336
}
23372337

@@ -2436,11 +2436,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
24362436
{
24372437
unsigned long flags;
24382438

2439-
spin_lock_irqsave(&callback_lock, flags);
2439+
raw_spin_lock_irqsave(&callback_lock, flags);
24402440
rcu_read_lock();
24412441
guarantee_online_cpus(task_cs(tsk), pmask);
24422442
rcu_read_unlock();
2443-
spin_unlock_irqrestore(&callback_lock, flags);
2443+
raw_spin_unlock_irqrestore(&callback_lock, flags);
24442444
}
24452445

24462446
void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
@@ -2488,11 +2488,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
24882488
nodemask_t mask;
24892489
unsigned long flags;
24902490

2491-
spin_lock_irqsave(&callback_lock, flags);
2491+
raw_spin_lock_irqsave(&callback_lock, flags);
24922492
rcu_read_lock();
24932493
guarantee_online_mems(task_cs(tsk), &mask);
24942494
rcu_read_unlock();
2495-
spin_unlock_irqrestore(&callback_lock, flags);
2495+
raw_spin_unlock_irqrestore(&callback_lock, flags);
24962496

24972497
return mask;
24982498
}
@@ -2584,14 +2584,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
25842584
return true;
25852585

25862586
/* Not hardwall and node outside mems_allowed: scan up cpusets */
2587-
spin_lock_irqsave(&callback_lock, flags);
2587+
raw_spin_lock_irqsave(&callback_lock, flags);
25882588

25892589
rcu_read_lock();
25902590
cs = nearest_hardwall_ancestor(task_cs(current));
25912591
allowed = node_isset(node, cs->mems_allowed);
25922592
rcu_read_unlock();
25932593

2594-
spin_unlock_irqrestore(&callback_lock, flags);
2594+
raw_spin_unlock_irqrestore(&callback_lock, flags);
25952595
return allowed;
25962596
}
25972597

0 commit comments

Comments
 (0)