@@ -219,30 +219,99 @@ static inline void ipi_flush_tlb_all(void *ignored)
219
219
local_flush_tlb_all ();
220
220
}
221
221
222
+ static inline void ipi_flush_tlb_mm (void * info )
223
+ {
224
+ struct mm_struct * mm = (struct mm_struct * )info ;
225
+
226
+ local_flush_tlb_mm (mm );
227
+ }
228
+
229
+ static void smp_flush_tlb_mm (struct cpumask * cmask , struct mm_struct * mm )
230
+ {
231
+ unsigned int cpuid ;
232
+
233
+ if (cpumask_empty (cmask ))
234
+ return ;
235
+
236
+ cpuid = get_cpu ();
237
+
238
+ if (cpumask_any_but (cmask , cpuid ) >= nr_cpu_ids ) {
239
+ /* local cpu is the only cpu present in cpumask */
240
+ local_flush_tlb_mm (mm );
241
+ } else {
242
+ on_each_cpu_mask (cmask , ipi_flush_tlb_mm , mm , 1 );
243
+ }
244
+ put_cpu ();
245
+ }
246
+
247
+ struct flush_tlb_data {
248
+ unsigned long addr1 ;
249
+ unsigned long addr2 ;
250
+ };
251
+
252
+ static inline void ipi_flush_tlb_page (void * info )
253
+ {
254
+ struct flush_tlb_data * fd = (struct flush_tlb_data * )info ;
255
+
256
+ local_flush_tlb_page (NULL , fd -> addr1 );
257
+ }
258
+
259
+ static inline void ipi_flush_tlb_range (void * info )
260
+ {
261
+ struct flush_tlb_data * fd = (struct flush_tlb_data * )info ;
262
+
263
+ local_flush_tlb_range (NULL , fd -> addr1 , fd -> addr2 );
264
+ }
265
+
266
+ static void smp_flush_tlb_range (struct cpumask * cmask , unsigned long start ,
267
+ unsigned long end )
268
+ {
269
+ unsigned int cpuid ;
270
+
271
+ if (cpumask_empty (cmask ))
272
+ return ;
273
+
274
+ cpuid = get_cpu ();
275
+
276
+ if (cpumask_any_but (cmask , cpuid ) >= nr_cpu_ids ) {
277
+ /* local cpu is the only cpu present in cpumask */
278
+ if ((end - start ) <= PAGE_SIZE )
279
+ local_flush_tlb_page (NULL , start );
280
+ else
281
+ local_flush_tlb_range (NULL , start , end );
282
+ } else {
283
+ struct flush_tlb_data fd ;
284
+
285
+ fd .addr1 = start ;
286
+ fd .addr2 = end ;
287
+
288
+ if ((end - start ) <= PAGE_SIZE )
289
+ on_each_cpu_mask (cmask , ipi_flush_tlb_page , & fd , 1 );
290
+ else
291
+ on_each_cpu_mask (cmask , ipi_flush_tlb_range , & fd , 1 );
292
+ }
293
+ put_cpu ();
294
+ }
295
+
222
296
void flush_tlb_all (void )
223
297
{
224
298
on_each_cpu (ipi_flush_tlb_all , NULL , 1 );
225
299
}
226
300
227
- /*
228
- * FIXME: implement proper functionality instead of flush_tlb_all.
229
- * *But*, as things currently stands, the local_tlb_flush_* functions will
230
- * all boil down to local_tlb_flush_all anyway.
231
- */
232
301
void flush_tlb_mm (struct mm_struct * mm )
233
302
{
234
- on_each_cpu ( ipi_flush_tlb_all , NULL , 1 );
303
+ smp_flush_tlb_mm ( mm_cpumask ( mm ), mm );
235
304
}
236
305
237
306
void flush_tlb_page (struct vm_area_struct * vma , unsigned long uaddr )
238
307
{
239
- on_each_cpu ( ipi_flush_tlb_all , NULL , 1 );
308
+ smp_flush_tlb_range ( mm_cpumask ( vma -> vm_mm ), uaddr , uaddr + PAGE_SIZE );
240
309
}
241
310
242
311
void flush_tlb_range (struct vm_area_struct * vma ,
243
312
unsigned long start , unsigned long end )
244
313
{
245
- on_each_cpu ( ipi_flush_tlb_all , NULL , 1 );
314
+ smp_flush_tlb_range ( mm_cpumask ( vma -> vm_mm ), start , end );
246
315
}
247
316
248
317
/* Instruction cache invalidate - performed on each cpu */
0 commit comments