|
3 | 3 | #include <linux/mm.h>
|
4 | 4 | #include <linux/smp.h>
|
5 | 5 | #include <linux/sched.h>
|
| 6 | +#include <linux/hugetlb.h> |
6 | 7 | #include <asm/sbi.h>
|
7 | 8 | #include <asm/mmu_context.h>
|
8 | 9 |
|
@@ -147,7 +148,33 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
|
147 | 148 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
148 | 149 | unsigned long end)
|
149 | 150 | {
|
150 |
| - __flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE); |
| 151 | + unsigned long stride_size; |
| 152 | + |
| 153 | + if (!is_vm_hugetlb_page(vma)) { |
| 154 | + stride_size = PAGE_SIZE; |
| 155 | + } else { |
| 156 | + stride_size = huge_page_size(hstate_vma(vma)); |
| 157 | + |
| 158 | + /* |
| 159 | + * As stated in the privileged specification, every PTE in a |
| 160 | + * NAPOT region must be invalidated, so reset the stride in that |
| 161 | + * case. |
| 162 | + */ |
| 163 | + if (has_svnapot()) { |
| 164 | + if (stride_size >= PGDIR_SIZE) |
| 165 | + stride_size = PGDIR_SIZE; |
| 166 | + else if (stride_size >= P4D_SIZE) |
| 167 | + stride_size = P4D_SIZE; |
| 168 | + else if (stride_size >= PUD_SIZE) |
| 169 | + stride_size = PUD_SIZE; |
| 170 | + else if (stride_size >= PMD_SIZE) |
| 171 | + stride_size = PMD_SIZE; |
| 172 | + else |
| 173 | + stride_size = PAGE_SIZE; |
| 174 | + } |
| 175 | + } |
| 176 | + |
| 177 | + __flush_tlb_range(vma->vm_mm, start, end - start, stride_size); |
151 | 178 | }
|
152 | 179 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
153 | 180 | void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|
0 commit comments