Skip to content

Commit 3f1e782

Browse files
guoren83palmer-dabbelt
authored andcommitted
riscv: add ASID-based tlbflushing methods
Implement optimized version of the tlb flushing routines for systems using ASIDs. These are behind the use_asid_allocator static branch to not affect existing systems not using ASIDs. Signed-off-by: Guo Ren <[email protected]> [hch: rebased on top of previous cleanups, use the same algorithm as the non-ASID based code for local vs global flushes, keep functions as local as possible] Signed-off-by: Christoph Hellwig <[email protected]> Tested-by: Guo Ren <[email protected]> Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent 70c7605 commit 3f1e782

File tree

3 files changed

+43
-8
lines changed

3 files changed

+43
-8
lines changed

arch/riscv/include/asm/mmu_context.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,8 @@ static inline int init_new_context(struct task_struct *tsk,
3333
return 0;
3434
}
3535

36+
DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
37+
3638
#include <asm-generic/mmu_context.h>
3739

3840
#endif /* _ASM_RISCV_MMU_CONTEXT_H */

arch/riscv/mm/context.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919
#ifdef CONFIG_MMU
2020

21-
static DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
21+
DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
2222

2323
static unsigned long asid_bits;
2424
static unsigned long num_asids;

arch/riscv/mm/tlbflush.c

Lines changed: 40 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,24 @@
44
#include <linux/smp.h>
55
#include <linux/sched.h>
66
#include <asm/sbi.h>
7+
#include <asm/mmu_context.h>
8+
9+
static inline void local_flush_tlb_all_asid(unsigned long asid)
10+
{
11+
__asm__ __volatile__ ("sfence.vma x0, %0"
12+
:
13+
: "r" (asid)
14+
: "memory");
15+
}
16+
17+
static inline void local_flush_tlb_page_asid(unsigned long addr,
18+
unsigned long asid)
19+
{
20+
__asm__ __volatile__ ("sfence.vma %0, %1"
21+
:
22+
: "r" (addr), "r" (asid)
23+
: "memory");
24+
}
725

826
void flush_tlb_all(void)
927
{
@@ -16,21 +34,36 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
1634
struct cpumask *cmask = mm_cpumask(mm);
1735
struct cpumask hmask;
1836
unsigned int cpuid;
37+
bool broadcast;
1938

2039
if (cpumask_empty(cmask))
2140
return;
2241

2342
cpuid = get_cpu();
43+
/* check if the tlbflush needs to be sent to other CPUs */
44+
broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
45+
if (static_branch_unlikely(&use_asid_allocator)) {
46+
unsigned long asid = atomic_long_read(&mm->context.id);
2447

25-
if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
26-
/* local cpu is the only cpu present in cpumask */
27-
if (size <= stride)
48+
if (broadcast) {
49+
riscv_cpuid_to_hartid_mask(cmask, &hmask);
50+
sbi_remote_sfence_vma_asid(cpumask_bits(&hmask),
51+
start, size, asid);
52+
} else if (size <= stride) {
53+
local_flush_tlb_page_asid(start, asid);
54+
} else {
55+
local_flush_tlb_all_asid(asid);
56+
}
57+
} else {
58+
if (broadcast) {
59+
riscv_cpuid_to_hartid_mask(cmask, &hmask);
60+
sbi_remote_sfence_vma(cpumask_bits(&hmask),
61+
start, size);
62+
} else if (size <= stride) {
2863
local_flush_tlb_page(start);
29-
else
64+
} else {
3065
local_flush_tlb_all();
31-
} else {
32-
riscv_cpuid_to_hartid_mask(cmask, &hmask);
33-
sbi_remote_sfence_vma(cpumask_bits(&hmask), start, size);
66+
}
3467
}
3568

3669
put_cpu();

0 commit comments

Comments
 (0)