@@ -66,7 +66,6 @@ void membarrier_exec_mmap(struct mm_struct *mm)
66
66
static int membarrier_global_expedited (void )
67
67
{
68
68
int cpu ;
69
- bool fallback = false;
70
69
cpumask_var_t tmpmask ;
71
70
72
71
if (num_online_cpus () == 1 )
@@ -78,15 +77,8 @@ static int membarrier_global_expedited(void)
78
77
*/
79
78
smp_mb (); /* system call entry is not a mb. */
80
79
81
- /*
82
- * Expedited membarrier commands guarantee that they won't
83
- * block, hence the GFP_NOWAIT allocation flag and fallback
84
- * implementation.
85
- */
86
- if (!zalloc_cpumask_var (& tmpmask , GFP_NOWAIT )) {
87
- /* Fallback for OOM. */
88
- fallback = true;
89
- }
80
+ if (!zalloc_cpumask_var (& tmpmask , GFP_KERNEL ))
81
+ return - ENOMEM ;
90
82
91
83
cpus_read_lock ();
92
84
rcu_read_lock ();
@@ -117,18 +109,15 @@ static int membarrier_global_expedited(void)
117
109
if (p -> flags & PF_KTHREAD )
118
110
continue ;
119
111
120
- if (!fallback )
121
- __cpumask_set_cpu (cpu , tmpmask );
122
- else
123
- smp_call_function_single (cpu , ipi_mb , NULL , 1 );
112
+ __cpumask_set_cpu (cpu , tmpmask );
124
113
}
125
114
rcu_read_unlock ();
126
- if (! fallback ) {
127
- preempt_disable ();
128
- smp_call_function_many (tmpmask , ipi_mb , NULL , 1 );
129
- preempt_enable ();
130
- free_cpumask_var ( tmpmask );
131
- }
115
+
116
+ preempt_disable ();
117
+ smp_call_function_many (tmpmask , ipi_mb , NULL , 1 );
118
+ preempt_enable ();
119
+
120
+ free_cpumask_var ( tmpmask );
132
121
cpus_read_unlock ();
133
122
134
123
/*
@@ -143,7 +132,6 @@ static int membarrier_global_expedited(void)
143
132
static int membarrier_private_expedited (int flags )
144
133
{
145
134
int cpu ;
146
- bool fallback = false;
147
135
cpumask_var_t tmpmask ;
148
136
struct mm_struct * mm = current -> mm ;
149
137
@@ -168,15 +156,8 @@ static int membarrier_private_expedited(int flags)
168
156
*/
169
157
smp_mb (); /* system call entry is not a mb. */
170
158
171
- /*
172
- * Expedited membarrier commands guarantee that they won't
173
- * block, hence the GFP_NOWAIT allocation flag and fallback
174
- * implementation.
175
- */
176
- if (!zalloc_cpumask_var (& tmpmask , GFP_NOWAIT )) {
177
- /* Fallback for OOM. */
178
- fallback = true;
179
- }
159
+ if (!zalloc_cpumask_var (& tmpmask , GFP_KERNEL ))
160
+ return - ENOMEM ;
180
161
181
162
cpus_read_lock ();
182
163
rcu_read_lock ();
@@ -195,20 +176,16 @@ static int membarrier_private_expedited(int flags)
195
176
continue ;
196
177
rcu_read_lock ();
197
178
p = rcu_dereference (cpu_rq (cpu )-> curr );
198
- if (p && p -> mm == mm ) {
199
- if (!fallback )
200
- __cpumask_set_cpu (cpu , tmpmask );
201
- else
202
- smp_call_function_single (cpu , ipi_mb , NULL , 1 );
203
- }
179
+ if (p && p -> mm == mm )
180
+ __cpumask_set_cpu (cpu , tmpmask );
204
181
}
205
182
rcu_read_unlock ();
206
- if (! fallback ) {
207
- preempt_disable ();
208
- smp_call_function_many (tmpmask , ipi_mb , NULL , 1 );
209
- preempt_enable ();
210
- free_cpumask_var ( tmpmask );
211
- }
183
+
184
+ preempt_disable ();
185
+ smp_call_function_many (tmpmask , ipi_mb , NULL , 1 );
186
+ preempt_enable ();
187
+
188
+ free_cpumask_var ( tmpmask );
212
189
cpus_read_unlock ();
213
190
214
191
/*
@@ -264,7 +241,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
264
241
struct rq * rq = cpu_rq (cpu );
265
242
struct task_struct * p ;
266
243
267
- p = rcu_dereference (& rq -> curr );
244
+ p = rcu_dereference (rq -> curr );
268
245
if (p && p -> mm == mm )
269
246
__cpumask_set_cpu (cpu , tmpmask );
270
247
}
0 commit comments