|
104 | 104 | __tlbi(op, arg); \
|
105 | 105 | } while(0)
|
106 | 106 |
|
| 107 | +#define __tlbi_user_level(op, arg, level) do { \ |
| 108 | + if (arm64_kernel_unmapped_at_el0()) \ |
| 109 | + __tlbi_level(op, (arg | USER_ASID_FLAG), level); \ |
| 110 | +} while (0) |
| 111 | + |
107 | 112 | /*
|
108 | 113 | * TLB Invalidation
|
109 | 114 | * ================
|
@@ -205,8 +210,9 @@ static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
|
205 | 210 | unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
|
206 | 211 |
|
207 | 212 | dsb(ishst);
|
208 |
| - __tlbi(vale1is, addr); |
209 |
| - __tlbi_user(vale1is, addr); |
| 213 | + /* This function is only called on a small page */ |
| 214 | + __tlbi_level(vale1is, addr, 3); |
| 215 | + __tlbi_user_level(vale1is, addr, 3); |
210 | 216 | }
|
211 | 217 |
|
212 | 218 | static inline void flush_tlb_page(struct vm_area_struct *vma,
|
@@ -246,11 +252,11 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
|
246 | 252 | dsb(ishst);
|
247 | 253 | for (addr = start; addr < end; addr += stride) {
|
248 | 254 | if (last_level) {
|
249 |
| - __tlbi(vale1is, addr); |
250 |
| - __tlbi_user(vale1is, addr); |
| 255 | + __tlbi_level(vale1is, addr, 0); |
| 256 | + __tlbi_user_level(vale1is, addr, 0); |
251 | 257 | } else {
|
252 |
| - __tlbi(vae1is, addr); |
253 |
| - __tlbi_user(vae1is, addr); |
| 258 | + __tlbi_level(vae1is, addr, 0); |
| 259 | + __tlbi_user_level(vae1is, addr, 0); |
254 | 260 | }
|
255 | 261 | }
|
256 | 262 | dsb(ish);
|
|
0 commit comments