Skip to content

Commit a569726

Browse files
mdchitalepalmer-dabbelt
authored andcommitted
riscv: mm: Add support for Svinval extension
The Svinval extension splits SFENCE.VMA instruction into finer-grained invalidation and ordering operations and is mandatory for RVA23S64 profile. When Svinval is enabled the local_flush_tlb_range_threshold_asid function should use the following sequence to optimize the tlb flushes instead of a simple sfence.vma: sfence.w.inval svinval.vma . . svinval.vma sfence.inval.ir The maximum number of consecutive svinval.vma instructions that can be executed in local_flush_tlb_range_threshold_asid function is limited to 64. This is required to avoid soft lockups and the approach is similar to that used in arm64. Signed-off-by: Mayuresh Chitale <[email protected]> Reviewed-by: Andrew Jones <[email protected]> Reviewed-by: Alexandre Ghiti <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexandre Ghiti <[email protected]> Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent 82f2b0b commit a569726

File tree

1 file changed

+31
-0
lines changed

1 file changed

+31
-0
lines changed

arch/riscv/mm/tlbflush.c

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,27 @@
77
#include <linux/mmu_notifier.h>
88
#include <asm/sbi.h>
99
#include <asm/mmu_context.h>
10+
#include <asm/cpufeature.h>
11+
12+
#define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
13+
14+
static inline void local_sfence_inval_ir(void)
15+
{
16+
asm volatile(SFENCE_INVAL_IR() ::: "memory");
17+
}
18+
19+
static inline void local_sfence_w_inval(void)
20+
{
21+
asm volatile(SFENCE_W_INVAL() ::: "memory");
22+
}
23+
24+
static inline void local_sinval_vma(unsigned long vma, unsigned long asid)
25+
{
26+
if (asid != FLUSH_TLB_NO_ASID)
27+
asm volatile(SINVAL_VMA(%0, %1) : : "r" (vma), "r" (asid) : "memory");
28+
else
29+
asm volatile(SINVAL_VMA(%0, zero) : : "r" (vma) : "memory");
30+
}
1031

1132
/*
1233
* Flush entire TLB if number of entries to be flushed is greater
@@ -27,6 +48,16 @@ static void local_flush_tlb_range_threshold_asid(unsigned long start,
2748
return;
2849
}
2950

51+
if (has_svinval()) {
52+
local_sfence_w_inval();
53+
for (i = 0; i < nr_ptes_in_range; ++i) {
54+
local_sinval_vma(start, asid);
55+
start += stride;
56+
}
57+
local_sfence_inval_ir();
58+
return;
59+
}
60+
3061
for (i = 0; i < nr_ptes_in_range; ++i) {
3162
local_flush_tlb_page_asid(start, asid);
3263
start += stride;

0 commit comments

Comments
 (0)