Skip to content

Commit c172e0a

Browse files
compudjIngo Molnar
authored andcommitted
sched/membarrier: Return -ENOMEM to userspace on memory allocation failure
Remove the IPI fallback code from membarrier to deal with very infrequent cpumask memory allocation failure. Use GFP_KERNEL rather than GFP_NOWAIT, and relax the blocking guarantees for the expedited membarrier system call commands, allowing it to block if waiting for memory to be made available. In addition, now -ENOMEM can be returned to user-space if the cpumask memory allocation fails. Signed-off-by: Mathieu Desnoyers <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Chris Metcalf <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Eric W. Biederman <[email protected]> Cc: Kirill Tkhai <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Oleg Nesterov <[email protected]> Cc: Paul E. McKenney <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Russell King - ARM Linux admin <[email protected]> Cc: Thomas Gleixner <[email protected]> Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent c6d68c1 commit c172e0a

File tree

1 file changed

+20
-43
lines changed

1 file changed

+20
-43
lines changed

kernel/sched/membarrier.c

Lines changed: 20 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,6 @@ void membarrier_exec_mmap(struct mm_struct *mm)
6666
static int membarrier_global_expedited(void)
6767
{
6868
int cpu;
69-
bool fallback = false;
7069
cpumask_var_t tmpmask;
7170

7271
if (num_online_cpus() == 1)
@@ -78,15 +77,8 @@ static int membarrier_global_expedited(void)
7877
*/
7978
smp_mb(); /* system call entry is not a mb. */
8079

81-
/*
82-
* Expedited membarrier commands guarantee that they won't
83-
* block, hence the GFP_NOWAIT allocation flag and fallback
84-
* implementation.
85-
*/
86-
if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
87-
/* Fallback for OOM. */
88-
fallback = true;
89-
}
80+
if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
81+
return -ENOMEM;
9082

9183
cpus_read_lock();
9284
rcu_read_lock();
@@ -117,18 +109,15 @@ static int membarrier_global_expedited(void)
117109
if (p->flags & PF_KTHREAD)
118110
continue;
119111

120-
if (!fallback)
121-
__cpumask_set_cpu(cpu, tmpmask);
122-
else
123-
smp_call_function_single(cpu, ipi_mb, NULL, 1);
112+
__cpumask_set_cpu(cpu, tmpmask);
124113
}
125114
rcu_read_unlock();
126-
if (!fallback) {
127-
preempt_disable();
128-
smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
129-
preempt_enable();
130-
free_cpumask_var(tmpmask);
131-
}
115+
116+
preempt_disable();
117+
smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
118+
preempt_enable();
119+
120+
free_cpumask_var(tmpmask);
132121
cpus_read_unlock();
133122

134123
/*
@@ -143,7 +132,6 @@ static int membarrier_global_expedited(void)
143132
static int membarrier_private_expedited(int flags)
144133
{
145134
int cpu;
146-
bool fallback = false;
147135
cpumask_var_t tmpmask;
148136
struct mm_struct *mm = current->mm;
149137

@@ -168,15 +156,8 @@ static int membarrier_private_expedited(int flags)
168156
*/
169157
smp_mb(); /* system call entry is not a mb. */
170158

171-
/*
172-
* Expedited membarrier commands guarantee that they won't
173-
* block, hence the GFP_NOWAIT allocation flag and fallback
174-
* implementation.
175-
*/
176-
if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
177-
/* Fallback for OOM. */
178-
fallback = true;
179-
}
159+
if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
160+
return -ENOMEM;
180161

181162
cpus_read_lock();
182163
rcu_read_lock();
@@ -195,20 +176,16 @@ static int membarrier_private_expedited(int flags)
195176
continue;
196177
rcu_read_lock();
197178
p = rcu_dereference(cpu_rq(cpu)->curr);
198-
if (p && p->mm == mm) {
199-
if (!fallback)
200-
__cpumask_set_cpu(cpu, tmpmask);
201-
else
202-
smp_call_function_single(cpu, ipi_mb, NULL, 1);
203-
}
179+
if (p && p->mm == mm)
180+
__cpumask_set_cpu(cpu, tmpmask);
204181
}
205182
rcu_read_unlock();
206-
if (!fallback) {
207-
preempt_disable();
208-
smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
209-
preempt_enable();
210-
free_cpumask_var(tmpmask);
211-
}
183+
184+
preempt_disable();
185+
smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
186+
preempt_enable();
187+
188+
free_cpumask_var(tmpmask);
212189
cpus_read_unlock();
213190

214191
/*
@@ -264,7 +241,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
264241
struct rq *rq = cpu_rq(cpu);
265242
struct task_struct *p;
266243

267-
p = rcu_dereference(&rq->curr);
244+
p = rcu_dereference(rq->curr);
268245
if (p && p->mm == mm)
269246
__cpumask_set_cpu(cpu, tmpmask);
270247
}

0 commit comments

Comments
 (0)