Skip to content

Commit 1f4197f

Browse files
bwhacksgregkh
authored andcommitted
arm/mm: Convert to using lock_mm_and_find_vma()
commit 8b35ca3 upstream. arm has an additional check for address < FIRST_USER_ADDRESS before expanding the stack. Since FIRST_USER_ADDRESS is defined everywhere (generally as 0), move that check to the generic expand_downwards(). Signed-off-by: Ben Hutchings <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Signed-off-by: Samuel Mendoza-Jonas <[email protected]> Signed-off-by: David Woodhouse <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent ac764de commit 1f4197f

File tree

3 files changed

+16
-50
lines changed

3 files changed

+16
-50
lines changed

arch/arm/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,7 @@ config ARM
122122
select HAVE_UID16
123123
select HAVE_VIRT_CPU_ACCOUNTING_GEN
124124
select IRQ_FORCED_THREADING
125+
select LOCK_MM_AND_FIND_VMA
125126
select MODULES_USE_ELF_REL
126127
select NEED_DMA_MAP_STATE
127128
select OF_EARLY_FLATTREE if OF

arch/arm/mm/fault.c

Lines changed: 14 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -231,37 +231,11 @@ static inline bool is_permission_fault(unsigned int fsr)
231231
return false;
232232
}
233233

234-
static vm_fault_t __kprobes
235-
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int flags,
236-
unsigned long vma_flags, struct pt_regs *regs)
237-
{
238-
struct vm_area_struct *vma = find_vma(mm, addr);
239-
if (unlikely(!vma))
240-
return VM_FAULT_BADMAP;
241-
242-
if (unlikely(vma->vm_start > addr)) {
243-
if (!(vma->vm_flags & VM_GROWSDOWN))
244-
return VM_FAULT_BADMAP;
245-
if (addr < FIRST_USER_ADDRESS)
246-
return VM_FAULT_BADMAP;
247-
if (expand_stack(vma, addr))
248-
return VM_FAULT_BADMAP;
249-
}
250-
251-
/*
252-
* ok, we have a good vm_area for this memory access, check the
253-
* permissions on the VMA allow for the fault which occurred.
254-
*/
255-
if (!(vma->vm_flags & vma_flags))
256-
return VM_FAULT_BADACCESS;
257-
258-
return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
259-
}
260-
261234
static int __kprobes
262235
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
263236
{
264237
struct mm_struct *mm = current->mm;
238+
struct vm_area_struct *vma;
265239
int sig, code;
266240
vm_fault_t fault;
267241
unsigned int flags = FAULT_FLAG_DEFAULT;
@@ -300,31 +274,21 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
300274

301275
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
302276

303-
/*
304-
* As per x86, we may deadlock here. However, since the kernel only
305-
* validly references user space from well defined areas of the code,
306-
* we can bug out early if this is from code which shouldn't.
307-
*/
308-
if (!mmap_read_trylock(mm)) {
309-
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
310-
goto no_context;
311277
retry:
312-
mmap_read_lock(mm);
313-
} else {
314-
/*
315-
* The above down_read_trylock() might have succeeded in
316-
* which case, we'll have missed the might_sleep() from
317-
* down_read()
318-
*/
319-
might_sleep();
320-
#ifdef CONFIG_DEBUG_VM
321-
if (!user_mode(regs) &&
322-
!search_exception_tables(regs->ARM_pc))
323-
goto no_context;
324-
#endif
278+
vma = lock_mm_and_find_vma(mm, addr, regs);
279+
if (unlikely(!vma)) {
280+
fault = VM_FAULT_BADMAP;
281+
goto bad_area;
325282
}
326283

327-
fault = __do_page_fault(mm, addr, flags, vm_flags, regs);
284+
/*
285+
* ok, we have a good vm_area for this memory access, check the
286+
* permissions on the VMA allow for the fault which occurred.
287+
*/
288+
if (!(vma->vm_flags & vm_flags))
289+
fault = VM_FAULT_BADACCESS;
290+
else
291+
fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
328292

329293
/* If we need to retry but a fatal signal is pending, handle the
330294
* signal first. We do not need to release the mmap_lock because
@@ -355,6 +319,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
355319
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
356320
return 0;
357321

322+
bad_area:
358323
/*
359324
* If we are in kernel mode at this point, we
360325
* have no context to handle this fault with.

mm/mmap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2045,7 +2045,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
20452045
int error = 0;
20462046

20472047
address &= PAGE_MASK;
2048-
if (address < mmap_min_addr)
2048+
if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
20492049
return -EPERM;
20502050

20512051
/* Enforce stack_guard_gap */

0 commit comments

Comments
 (0)