Skip to content

Commit c6026d3

Browse files
SiFiveHollandpalmer-dabbelt
authored andcommitted
riscv: mm: Combine the SMP and UP TLB flush code
In SMP configurations, all TLB flushing narrower than flush_tlb_all() goes through __flush_tlb_range(). Do the same in UP configurations. This allows UP configurations to take advantage of recent improvements to the code in tlbflush.c, such as support for huge pages and flushing multiple-page ranges. Reviewed-by: Alexandre Ghiti <[email protected]> Signed-off-by: Samuel Holland <[email protected]> Reviewed-by: Yunhui Cui <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent 9546f00 commit c6026d3

File tree

3 files changed

+5
-33
lines changed

3 files changed

+5
-33
lines changed

arch/riscv/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ config RISCV
6060
select ARCH_USE_MEMTEST
6161
select ARCH_USE_QUEUED_RWLOCKS
6262
select ARCH_USES_CFI_TRAPS if CFI_CLANG
63-
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP && MMU
63+
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if MMU
6464
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
6565
select ARCH_WANT_FRAME_POINTERS
6666
select ARCH_WANT_GENERAL_HUGETLB if !RISCV_ISA_SVNAPOT

arch/riscv/include/asm/tlbflush.h

Lines changed: 3 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -27,12 +27,7 @@ static inline void local_flush_tlb_page(unsigned long addr)
2727
{
2828
ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
2929
}
30-
#else /* CONFIG_MMU */
31-
#define local_flush_tlb_all() do { } while (0)
32-
#define local_flush_tlb_page(addr) do { } while (0)
33-
#endif /* CONFIG_MMU */
3430

35-
#if defined(CONFIG_SMP) && defined(CONFIG_MMU)
3631
void flush_tlb_all(void);
3732
void flush_tlb_mm(struct mm_struct *mm);
3833
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
@@ -54,28 +49,8 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
5449
unsigned long uaddr);
5550
void arch_flush_tlb_batched_pending(struct mm_struct *mm);
5651
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
57-
58-
#else /* CONFIG_SMP && CONFIG_MMU */
59-
60-
#define flush_tlb_all() local_flush_tlb_all()
61-
#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
62-
63-
static inline void flush_tlb_range(struct vm_area_struct *vma,
64-
unsigned long start, unsigned long end)
65-
{
66-
local_flush_tlb_all();
67-
}
68-
69-
/* Flush a range of kernel pages */
70-
static inline void flush_tlb_kernel_range(unsigned long start,
71-
unsigned long end)
72-
{
73-
local_flush_tlb_all();
74-
}
75-
76-
#define flush_tlb_mm(mm) flush_tlb_all()
77-
#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
78-
#define local_flush_tlb_kernel_range(start, end) flush_tlb_all()
79-
#endif /* !CONFIG_SMP || !CONFIG_MMU */
52+
#else /* CONFIG_MMU */
53+
#define local_flush_tlb_all() do { } while (0)
54+
#endif /* CONFIG_MMU */
8055

8156
#endif /* _ASM_RISCV_TLBFLUSH_H */

arch/riscv/mm/Makefile

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,11 @@ endif
1313
KCOV_INSTRUMENT_init.o := n
1414

1515
obj-y += init.o
16-
obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o
16+
obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o tlbflush.o
1717
obj-y += cacheflush.o
1818
obj-y += context.o
1919
obj-y += pmem.o
2020

21-
ifeq ($(CONFIG_MMU),y)
22-
obj-$(CONFIG_SMP) += tlbflush.o
23-
endif
2421
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
2522
obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
2623
obj-$(CONFIG_KASAN) += kasan_init.o

0 commit comments

Comments
 (0)