Skip to content

Commit ac764de

Browse files
bwhacksgregkh
authored andcommitted
riscv/mm: Convert to using lock_mm_and_find_vma()
commit 7267ef7 upstream. Signed-off-by: Ben Hutchings <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> [6.1: Kconfig context] Signed-off-by: Samuel Mendoza-Jonas <[email protected]> Signed-off-by: David Woodhouse <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 7227d70 commit ac764de

File tree

2 files changed

+14
-18
lines changed

2 files changed

+14
-18
lines changed

arch/riscv/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,7 @@ config RISCV
114114
select HAVE_RSEQ
115115
select IRQ_DOMAIN
116116
select IRQ_FORCED_THREADING
117+
select LOCK_MM_AND_FIND_VMA
117118
select MODULES_USE_ELF_RELA if MODULES
118119
select MODULE_SECTIONS if MODULES
119120
select OF

arch/riscv/mm/fault.c

Lines changed: 13 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -83,13 +83,13 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
8383
BUG();
8484
}
8585

86-
static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
86+
static inline void
87+
bad_area_nosemaphore(struct pt_regs *regs, int code, unsigned long addr)
8788
{
8889
/*
8990
* Something tried to access memory that isn't in our memory map.
9091
* Fix it, but check if it's kernel or user first.
9192
*/
92-
mmap_read_unlock(mm);
9393
/* User mode accesses just cause a SIGSEGV */
9494
if (user_mode(regs)) {
9595
do_trap(regs, SIGSEGV, code, addr);
@@ -99,6 +99,15 @@ static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code
9999
no_context(regs, addr);
100100
}
101101

102+
static inline void
103+
bad_area(struct pt_regs *regs, struct mm_struct *mm, int code,
104+
unsigned long addr)
105+
{
106+
mmap_read_unlock(mm);
107+
108+
bad_area_nosemaphore(regs, code, addr);
109+
}
110+
102111
static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
103112
{
104113
pgd_t *pgd, *pgd_k;
@@ -281,31 +290,17 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
281290
else if (cause == EXC_INST_PAGE_FAULT)
282291
flags |= FAULT_FLAG_INSTRUCTION;
283292
retry:
284-
mmap_read_lock(mm);
285-
vma = find_vma(mm, addr);
293+
vma = lock_mm_and_find_vma(mm, addr, regs);
286294
if (unlikely(!vma)) {
287295
tsk->thread.bad_cause = cause;
288-
bad_area(regs, mm, code, addr);
289-
return;
290-
}
291-
if (likely(vma->vm_start <= addr))
292-
goto good_area;
293-
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
294-
tsk->thread.bad_cause = cause;
295-
bad_area(regs, mm, code, addr);
296-
return;
297-
}
298-
if (unlikely(expand_stack(vma, addr))) {
299-
tsk->thread.bad_cause = cause;
300-
bad_area(regs, mm, code, addr);
296+
bad_area_nosemaphore(regs, code, addr);
301297
return;
302298
}
303299

304300
/*
305301
* Ok, we have a good vm_area for this memory access, so
306302
* we can handle it.
307303
*/
308-
good_area:
309304
code = SEGV_ACCERR;
310305

311306
if (unlikely(access_error(cause, vma))) {

0 commit comments

Comments
 (0)