Skip to content

Commit d9be2b9

Browse files
author
Alexandre Ghiti
committed
riscv: Call secondary mmu notifier when flushing the tlb
This is required to allow the IOMMU driver to correctly flush its own TLB. Reviewed-by: Clément Léger <[email protected]> Reviewed-by: Samuel Holland <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexandre Ghiti <[email protected]>
1 parent 4458b8f commit d9be2b9

File tree

1 file changed

+22
-15
lines changed

1 file changed

+22
-15
lines changed

arch/riscv/mm/tlbflush.c

Lines changed: 22 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
#include <linux/smp.h>
55
#include <linux/sched.h>
66
#include <linux/hugetlb.h>
7+
#include <linux/mmu_notifier.h>
78
#include <asm/sbi.h>
89
#include <asm/mmu_context.h>
910

@@ -78,10 +79,17 @@ static void __ipi_flush_tlb_range_asid(void *info)
7879
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
7980
}
8081

81-
static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid,
82+
static inline unsigned long get_mm_asid(struct mm_struct *mm)
83+
{
84+
return mm ? cntx2asid(atomic_long_read(&mm->context.id)) : FLUSH_TLB_NO_ASID;
85+
}
86+
87+
static void __flush_tlb_range(struct mm_struct *mm,
88+
const struct cpumask *cmask,
8289
unsigned long start, unsigned long size,
8390
unsigned long stride)
8491
{
92+
unsigned long asid = get_mm_asid(mm);
8593
unsigned int cpu;
8694

8795
if (cpumask_empty(cmask))
@@ -105,30 +113,26 @@ static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid,
105113
}
106114

107115
put_cpu();
108-
}
109116

110-
static inline unsigned long get_mm_asid(struct mm_struct *mm)
111-
{
112-
return cntx2asid(atomic_long_read(&mm->context.id));
117+
if (mm)
118+
mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + size);
113119
}
114120

115121
void flush_tlb_mm(struct mm_struct *mm)
116122
{
117-
__flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
118-
0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
123+
__flush_tlb_range(mm, mm_cpumask(mm), 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
119124
}
120125

121126
void flush_tlb_mm_range(struct mm_struct *mm,
122127
unsigned long start, unsigned long end,
123128
unsigned int page_size)
124129
{
125-
__flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
126-
start, end - start, page_size);
130+
__flush_tlb_range(mm, mm_cpumask(mm), start, end - start, page_size);
127131
}
128132

129133
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
130134
{
131-
__flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
135+
__flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
132136
addr, PAGE_SIZE, PAGE_SIZE);
133137
}
134138

@@ -161,21 +165,21 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
161165
}
162166
}
163167

164-
__flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
168+
__flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
165169
start, end - start, stride_size);
166170
}
167171

168172
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
169173
{
170-
__flush_tlb_range(cpu_online_mask, FLUSH_TLB_NO_ASID,
174+
__flush_tlb_range(NULL, cpu_online_mask,
171175
start, end - start, PAGE_SIZE);
172176
}
173177

174178
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
175179
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
176180
unsigned long end)
177181
{
178-
__flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
182+
__flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
179183
start, end - start, PMD_SIZE);
180184
}
181185
#endif
@@ -189,7 +193,10 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
189193
struct mm_struct *mm,
190194
unsigned long uaddr)
191195
{
196+
unsigned long start = uaddr & PAGE_MASK;
197+
192198
cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
199+
mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + PAGE_SIZE);
193200
}
194201

195202
void arch_flush_tlb_batched_pending(struct mm_struct *mm)
@@ -199,7 +206,7 @@ void arch_flush_tlb_batched_pending(struct mm_struct *mm)
199206

200207
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
201208
{
202-
__flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
203-
FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
209+
__flush_tlb_range(NULL, &batch->cpumask,
210+
0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
204211
cpumask_clear(&batch->cpumask);
205212
}

0 commit comments

Comments
 (0)