Skip to content

Commit 648321f

Browse files
xhackerustcpalmer-dabbelt
authored andcommitted
riscv: mm: try VMA lock-based page fault handling first
Attempt VMA lock-based page fault handling first, and fall back to the existing mmap_lock-based handling if that fails. A simple running the ebizzy benchmark on Lichee Pi 4A shows that PER_VMA_LOCK can improve the ebizzy benchmark by about 32.68%. In theory, the more CPUs, the bigger improvement, but I don't have any HW platform which has more than 4 CPUs. This is the riscv variant of "x86/mm: try VMA lock-based page fault handling first". Signed-off-by: Jisheng Zhang <[email protected]> Reviewed-by: Guo Ren <[email protected]> Reviewed-by: Suren Baghdasaryan <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent 7d3332b commit 648321f

File tree

2 files changed

+34
-0
lines changed

2 files changed

+34
-0
lines changed

arch/riscv/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ config RISCV
4444
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
4545
select ARCH_SUPPORTS_HUGETLBFS if MMU
4646
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
47+
select ARCH_SUPPORTS_PER_VMA_LOCK if MMU
4748
select ARCH_USE_MEMTEST
4849
select ARCH_USE_QUEUED_RWLOCKS
4950
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU

arch/riscv/mm/fault.c

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -274,6 +274,36 @@ void handle_page_fault(struct pt_regs *regs)
274274
flags |= FAULT_FLAG_WRITE;
275275
else if (cause == EXC_INST_PAGE_FAULT)
276276
flags |= FAULT_FLAG_INSTRUCTION;
277+
#ifdef CONFIG_PER_VMA_LOCK
278+
if (!(flags & FAULT_FLAG_USER))
279+
goto lock_mmap;
280+
281+
vma = lock_vma_under_rcu(mm, addr);
282+
if (!vma)
283+
goto lock_mmap;
284+
285+
if (unlikely(access_error(cause, vma))) {
286+
vma_end_read(vma);
287+
goto lock_mmap;
288+
}
289+
290+
fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs);
291+
vma_end_read(vma);
292+
293+
if (!(fault & VM_FAULT_RETRY)) {
294+
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
295+
goto done;
296+
}
297+
count_vm_vma_lock_event(VMA_LOCK_RETRY);
298+
299+
if (fault_signal_pending(fault, regs)) {
300+
if (!user_mode(regs))
301+
no_context(regs, addr);
302+
return;
303+
}
304+
lock_mmap:
305+
#endif /* CONFIG_PER_VMA_LOCK */
306+
277307
retry:
278308
mmap_read_lock(mm);
279309
vma = find_vma(mm, addr);
@@ -343,6 +373,9 @@ void handle_page_fault(struct pt_regs *regs)
343373

344374
mmap_read_unlock(mm);
345375

376+
#ifdef CONFIG_PER_VMA_LOCK
377+
done:
378+
#endif
346379
if (unlikely(fault & VM_FAULT_ERROR)) {
347380
tsk->thread.bad_cause = cause;
348381
mm_fault_error(regs, addr, fault);

0 commit comments

Comments
 (0)