Skip to content

Commit b92cd80

Browse files
torvaldsgregkh
authored andcommitted
arm64/mm: Convert to using lock_mm_and_find_vma()
commit ae870a6 upstream. This converts arm64 to use the new page fault helper. It was very straightforward, but still needed a fix for the "obvious" conversion I initially did. Thanks to Suren for the fix and testing. Fixed-and-tested-by: Suren Baghdasaryan <[email protected]> Unnecessary-code-removal-by: Liam R. Howlett <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> [6.1: Ignore CONFIG_PER_VMA_LOCK context] Signed-off-by: Samuel Mendoza-Jonas <[email protected]> Signed-off-by: David Woodhouse <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 755aa1b commit b92cd80

File tree

2 files changed

+10
-37
lines changed

2 files changed

+10
-37
lines changed

arch/arm64/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -211,6 +211,7 @@ config ARM64
211211
select IRQ_DOMAIN
212212
select IRQ_FORCED_THREADING
213213
select KASAN_VMALLOC if KASAN
214+
select LOCK_MM_AND_FIND_VMA
214215
select MODULES_USE_ELF_RELA
215216
select NEED_DMA_MAP_STATE
216217
select NEED_SG_DMA_LENGTH

arch/arm64/mm/fault.c

Lines changed: 9 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -483,27 +483,14 @@ static void do_bad_area(unsigned long far, unsigned long esr,
483483
#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
484484
#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
485485

486-
static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
486+
static vm_fault_t __do_page_fault(struct mm_struct *mm,
487+
struct vm_area_struct *vma, unsigned long addr,
487488
unsigned int mm_flags, unsigned long vm_flags,
488489
struct pt_regs *regs)
489490
{
490-
struct vm_area_struct *vma = find_vma(mm, addr);
491-
492-
if (unlikely(!vma))
493-
return VM_FAULT_BADMAP;
494-
495491
/*
496492
* Ok, we have a good vm_area for this memory access, so we can handle
497493
* it.
498-
*/
499-
if (unlikely(vma->vm_start > addr)) {
500-
if (!(vma->vm_flags & VM_GROWSDOWN))
501-
return VM_FAULT_BADMAP;
502-
if (expand_stack(vma, addr))
503-
return VM_FAULT_BADMAP;
504-
}
505-
506-
/*
507494
* Check that the permissions on the VMA allow for the fault which
508495
* occurred.
509496
*/
@@ -535,6 +522,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
535522
unsigned long vm_flags;
536523
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
537524
unsigned long addr = untagged_addr(far);
525+
struct vm_area_struct *vma;
538526

539527
if (kprobe_page_fault(regs, esr))
540528
return 0;
@@ -585,31 +573,14 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
585573

586574
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
587575

588-
/*
589-
* As per x86, we may deadlock here. However, since the kernel only
590-
* validly references user space from well defined areas of the code,
591-
* we can bug out early if this is from code which shouldn't.
592-
*/
593-
if (!mmap_read_trylock(mm)) {
594-
if (!user_mode(regs) && !search_exception_tables(regs->pc))
595-
goto no_context;
596576
retry:
597-
mmap_read_lock(mm);
598-
} else {
599-
/*
600-
* The above mmap_read_trylock() might have succeeded in which
601-
* case, we'll have missed the might_sleep() from down_read().
602-
*/
603-
might_sleep();
604-
#ifdef CONFIG_DEBUG_VM
605-
if (!user_mode(regs) && !search_exception_tables(regs->pc)) {
606-
mmap_read_unlock(mm);
607-
goto no_context;
608-
}
609-
#endif
577+
vma = lock_mm_and_find_vma(mm, addr, regs);
578+
if (unlikely(!vma)) {
579+
fault = VM_FAULT_BADMAP;
580+
goto done;
610581
}
611582

612-
fault = __do_page_fault(mm, addr, mm_flags, vm_flags, regs);
583+
fault = __do_page_fault(mm, vma, addr, mm_flags, vm_flags, regs);
613584

614585
/* Quick path to respond to signals */
615586
if (fault_signal_pending(fault, regs)) {
@@ -628,6 +599,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
628599
}
629600
mmap_read_unlock(mm);
630601

602+
done:
631603
/*
632604
* Handle the "normal" (no error) case first.
633605
*/

0 commit comments

Comments
 (0)