Skip to content

Commit 62b78fd

Browse files
Alexandre Ghitipalmer-dabbelt
authored andcommitted
riscv: Improve flush_tlb_kernel_range()
This function used to simply flush the whole tlb of all harts, be more subtile and try to only flush the range. The problem is that we can only use PAGE_SIZE as stride since we don't know the size of the underlying mapping and then this function will be improved only if the size of the region to flush is < threshold * PAGE_SIZE. Signed-off-by: Alexandre Ghiti <[email protected]> Reviewed-by: Andrew Jones <[email protected]> Tested-by: Lad Prabhakar <[email protected]> # On RZ/Five SMARC Reviewed-by: Samuel Holland <[email protected]> Tested-by: Samuel Holland <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent ba6f359 commit 62b78fd

File tree

2 files changed

+30
-15
lines changed

2 files changed

+30
-15
lines changed

arch/riscv/include/asm/tlbflush.h

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
4040
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
4141
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
4242
unsigned long end);
43+
void flush_tlb_kernel_range(unsigned long start, unsigned long end);
4344
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4445
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
4546
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
@@ -56,15 +57,15 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
5657
local_flush_tlb_all();
5758
}
5859

59-
#define flush_tlb_mm(mm) flush_tlb_all()
60-
#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
61-
#endif /* !CONFIG_SMP || !CONFIG_MMU */
62-
6360
/* Flush a range of kernel pages */
6461
static inline void flush_tlb_kernel_range(unsigned long start,
6562
unsigned long end)
6663
{
67-
flush_tlb_all();
64+
local_flush_tlb_all();
6865
}
6966

67+
#define flush_tlb_mm(mm) flush_tlb_all()
68+
#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
69+
#endif /* !CONFIG_SMP || !CONFIG_MMU */
70+
7071
#endif /* _ASM_RISCV_TLBFLUSH_H */

arch/riscv/mm/tlbflush.c

Lines changed: 24 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -97,20 +97,27 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
9797
unsigned long size, unsigned long stride)
9898
{
9999
struct flush_tlb_range_data ftd;
100-
struct cpumask *cmask = mm_cpumask(mm);
100+
const struct cpumask *cmask;
101101
unsigned long asid = FLUSH_TLB_NO_ASID;
102-
unsigned int cpuid;
103102
bool broadcast;
104103

105-
if (cpumask_empty(cmask))
106-
return;
104+
if (mm) {
105+
unsigned int cpuid;
106+
107+
cmask = mm_cpumask(mm);
108+
if (cpumask_empty(cmask))
109+
return;
107110

108-
cpuid = get_cpu();
109-
/* check if the tlbflush needs to be sent to other CPUs */
110-
broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
111+
cpuid = get_cpu();
112+
/* check if the tlbflush needs to be sent to other CPUs */
113+
broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
111114

112-
if (static_branch_unlikely(&use_asid_allocator))
113-
asid = atomic_long_read(&mm->context.id) & asid_mask;
115+
if (static_branch_unlikely(&use_asid_allocator))
116+
asid = atomic_long_read(&mm->context.id) & asid_mask;
117+
} else {
118+
cmask = cpu_online_mask;
119+
broadcast = true;
120+
}
114121

115122
if (broadcast) {
116123
if (riscv_use_ipi_for_rfence()) {
@@ -128,7 +135,8 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
128135
local_flush_tlb_range_asid(start, size, stride, asid);
129136
}
130137

131-
put_cpu();
138+
if (mm)
139+
put_cpu();
132140
}
133141

134142
void flush_tlb_mm(struct mm_struct *mm)
@@ -179,6 +187,12 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
179187

180188
__flush_tlb_range(vma->vm_mm, start, end - start, stride_size);
181189
}
190+
191+
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
192+
{
193+
__flush_tlb_range(NULL, start, end - start, PAGE_SIZE);
194+
}
195+
182196
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
183197
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
184198
unsigned long end)

0 commit comments

Comments
 (0)