Skip to content

Commit cb80b96

Browse files
committed
Merge tag 'riscv-for-linus-6.3-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V fixes from Palmer Dabbelt: - fixes to the ASID allocator to avoid leaking stale mappings between tasks - fix the vmalloc fault handler to tolerate huge pages * tag 'riscv-for-linus-6.3-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: RISC-V: mm: Support huge page in vmalloc_fault() riscv: asid: Fixup stale TLB entry cause application crash Revert "riscv: mm: notify remote harts about mmu cache updates"
2 parents e50a803 + 47dd902 commit cb80b96

File tree

5 files changed

+42
-51
lines changed

5 files changed

+42
-51
lines changed

arch/riscv/include/asm/mmu.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,6 @@ typedef struct {
1919
#ifdef CONFIG_SMP
2020
/* A local icache flush is needed before user execution can resume. */
2121
cpumask_t icache_stale_mask;
22-
/* A local tlb flush is needed before user execution can resume. */
23-
cpumask_t tlb_stale_mask;
2422
#endif
2523
} mm_context_t;
2624

arch/riscv/include/asm/tlbflush.h

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -22,24 +22,6 @@ static inline void local_flush_tlb_page(unsigned long addr)
2222
{
2323
ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
2424
}
25-
26-
static inline void local_flush_tlb_all_asid(unsigned long asid)
27-
{
28-
__asm__ __volatile__ ("sfence.vma x0, %0"
29-
:
30-
: "r" (asid)
31-
: "memory");
32-
}
33-
34-
static inline void local_flush_tlb_page_asid(unsigned long addr,
35-
unsigned long asid)
36-
{
37-
__asm__ __volatile__ ("sfence.vma %0, %1"
38-
:
39-
: "r" (addr), "r" (asid)
40-
: "memory");
41-
}
42-
4325
#else /* CONFIG_MMU */
4426
#define local_flush_tlb_all() do { } while (0)
4527
#define local_flush_tlb_page(addr) do { } while (0)

arch/riscv/mm/context.c

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -196,16 +196,6 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
196196

197197
if (need_flush_tlb)
198198
local_flush_tlb_all();
199-
#ifdef CONFIG_SMP
200-
else {
201-
cpumask_t *mask = &mm->context.tlb_stale_mask;
202-
203-
if (cpumask_test_cpu(cpu, mask)) {
204-
cpumask_clear_cpu(cpu, mask);
205-
local_flush_tlb_all_asid(cntx & asid_mask);
206-
}
207-
}
208-
#endif
209199
}
210200

211201
static void set_mm_noasid(struct mm_struct *mm)
@@ -215,12 +205,24 @@ static void set_mm_noasid(struct mm_struct *mm)
215205
local_flush_tlb_all();
216206
}
217207

218-
static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
208+
static inline void set_mm(struct mm_struct *prev,
209+
struct mm_struct *next, unsigned int cpu)
219210
{
220-
if (static_branch_unlikely(&use_asid_allocator))
221-
set_mm_asid(mm, cpu);
222-
else
223-
set_mm_noasid(mm);
211+
/*
212+
* The mm_cpumask indicates which harts' TLBs contain the virtual
213+
* address mapping of the mm. Compared to noasid, using asid
214+
* can't guarantee that stale TLB entries are invalidated because
215+
* the asid mechanism wouldn't flush TLB for every switch_mm for
216+
* performance. So when using asid, keep all CPUs footmarks in
217+
* cpumask() until mm reset.
218+
*/
219+
cpumask_set_cpu(cpu, mm_cpumask(next));
220+
if (static_branch_unlikely(&use_asid_allocator)) {
221+
set_mm_asid(next, cpu);
222+
} else {
223+
cpumask_clear_cpu(cpu, mm_cpumask(prev));
224+
set_mm_noasid(next);
225+
}
224226
}
225227

226228
static int __init asids_init(void)
@@ -274,7 +276,8 @@ static int __init asids_init(void)
274276
}
275277
early_initcall(asids_init);
276278
#else
277-
static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
279+
static inline void set_mm(struct mm_struct *prev,
280+
struct mm_struct *next, unsigned int cpu)
278281
{
279282
/* Nothing to do here when there is no MMU */
280283
}
@@ -327,10 +330,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
327330
*/
328331
cpu = smp_processor_id();
329332

330-
cpumask_clear_cpu(cpu, mm_cpumask(prev));
331-
cpumask_set_cpu(cpu, mm_cpumask(next));
332-
333-
set_mm(next, cpu);
333+
set_mm(prev, next, cpu);
334334

335335
flush_icache_deferred(next, cpu);
336336
}

arch/riscv/mm/fault.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
143143
no_context(regs, addr);
144144
return;
145145
}
146+
if (pud_leaf(*pud_k))
147+
goto flush_tlb;
146148

147149
/*
148150
* Since the vmalloc area is global, it is unnecessary
@@ -153,6 +155,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
153155
no_context(regs, addr);
154156
return;
155157
}
158+
if (pmd_leaf(*pmd_k))
159+
goto flush_tlb;
156160

157161
/*
158162
* Make sure the actual PTE exists as well to
@@ -172,6 +176,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
172176
* ordering constraint, not a cache flush; it is
173177
* necessary even after writing invalid entries.
174178
*/
179+
flush_tlb:
175180
local_flush_tlb_page(addr);
176181
}
177182

arch/riscv/mm/tlbflush.c

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,23 @@
55
#include <linux/sched.h>
66
#include <asm/sbi.h>
77
#include <asm/mmu_context.h>
8-
#include <asm/tlbflush.h>
8+
9+
static inline void local_flush_tlb_all_asid(unsigned long asid)
10+
{
11+
__asm__ __volatile__ ("sfence.vma x0, %0"
12+
:
13+
: "r" (asid)
14+
: "memory");
15+
}
16+
17+
static inline void local_flush_tlb_page_asid(unsigned long addr,
18+
unsigned long asid)
19+
{
20+
__asm__ __volatile__ ("sfence.vma %0, %1"
21+
:
22+
: "r" (addr), "r" (asid)
23+
: "memory");
24+
}
925

1026
void flush_tlb_all(void)
1127
{
@@ -15,7 +31,6 @@ void flush_tlb_all(void)
1531
static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
1632
unsigned long size, unsigned long stride)
1733
{
18-
struct cpumask *pmask = &mm->context.tlb_stale_mask;
1934
struct cpumask *cmask = mm_cpumask(mm);
2035
unsigned int cpuid;
2136
bool broadcast;
@@ -29,15 +44,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
2944
if (static_branch_unlikely(&use_asid_allocator)) {
3045
unsigned long asid = atomic_long_read(&mm->context.id);
3146

32-
/*
33-
* TLB will be immediately flushed on harts concurrently
34-
* executing this MM context. TLB flush on other harts
35-
* is deferred until this MM context migrates there.
36-
*/
37-
cpumask_setall(pmask);
38-
cpumask_clear_cpu(cpuid, pmask);
39-
cpumask_andnot(pmask, pmask, cmask);
40-
4147
if (broadcast) {
4248
sbi_remote_sfence_vma_asid(cmask, start, size, asid);
4349
} else if (size <= stride) {

0 commit comments

Comments
 (0)