@@ -230,7 +230,8 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
230
230
231
231
static inline void __flush_tlb_range (struct vm_area_struct * vma ,
232
232
unsigned long start , unsigned long end ,
233
- unsigned long stride , bool last_level )
233
+ unsigned long stride , bool last_level ,
234
+ int tlb_level )
234
235
{
235
236
unsigned long asid = ASID (vma -> vm_mm );
236
237
unsigned long addr ;
@@ -252,11 +253,11 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
252
253
dsb (ishst );
253
254
for (addr = start ; addr < end ; addr += stride ) {
254
255
if (last_level ) {
255
- __tlbi_level (vale1is , addr , 0 );
256
- __tlbi_user_level (vale1is , addr , 0 );
256
+ __tlbi_level (vale1is , addr , tlb_level );
257
+ __tlbi_user_level (vale1is , addr , tlb_level );
257
258
} else {
258
- __tlbi_level (vae1is , addr , 0 );
259
- __tlbi_user_level (vae1is , addr , 0 );
259
+ __tlbi_level (vae1is , addr , tlb_level );
260
+ __tlbi_user_level (vae1is , addr , tlb_level );
260
261
}
261
262
}
262
263
dsb (ish );
@@ -268,8 +269,9 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
268
269
/*
269
270
* We cannot use leaf-only invalidation here, since we may be invalidating
270
271
* table entries as part of collapsing hugepages or moving page tables.
272
+ * Set the tlb_level to 0 because we can not get enough information here.
271
273
*/
272
- __flush_tlb_range (vma , start , end , PAGE_SIZE , false);
274
+ __flush_tlb_range (vma , start , end , PAGE_SIZE , false, 0 );
273
275
}
274
276
275
277
static inline void flush_tlb_kernel_range (unsigned long start , unsigned long end )
0 commit comments