@@ -231,37 +231,11 @@ static inline bool is_permission_fault(unsigned int fsr)
231
231
return false;
232
232
}
233
233
234
- static vm_fault_t __kprobes
235
- __do_page_fault (struct mm_struct * mm , unsigned long addr , unsigned int flags ,
236
- unsigned long vma_flags , struct pt_regs * regs )
237
- {
238
- struct vm_area_struct * vma = find_vma (mm , addr );
239
- if (unlikely (!vma ))
240
- return VM_FAULT_BADMAP ;
241
-
242
- if (unlikely (vma -> vm_start > addr )) {
243
- if (!(vma -> vm_flags & VM_GROWSDOWN ))
244
- return VM_FAULT_BADMAP ;
245
- if (addr < FIRST_USER_ADDRESS )
246
- return VM_FAULT_BADMAP ;
247
- if (expand_stack (vma , addr ))
248
- return VM_FAULT_BADMAP ;
249
- }
250
-
251
- /*
252
- * ok, we have a good vm_area for this memory access, check the
253
- * permissions on the VMA allow for the fault which occurred.
254
- */
255
- if (!(vma -> vm_flags & vma_flags ))
256
- return VM_FAULT_BADACCESS ;
257
-
258
- return handle_mm_fault (vma , addr & PAGE_MASK , flags , regs );
259
- }
260
-
261
234
static int __kprobes
262
235
do_page_fault (unsigned long addr , unsigned int fsr , struct pt_regs * regs )
263
236
{
264
237
struct mm_struct * mm = current -> mm ;
238
+ struct vm_area_struct * vma ;
265
239
int sig , code ;
266
240
vm_fault_t fault ;
267
241
unsigned int flags = FAULT_FLAG_DEFAULT ;
@@ -300,31 +274,21 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
300
274
301
275
perf_sw_event (PERF_COUNT_SW_PAGE_FAULTS , 1 , regs , addr );
302
276
303
- /*
304
- * As per x86, we may deadlock here. However, since the kernel only
305
- * validly references user space from well defined areas of the code,
306
- * we can bug out early if this is from code which shouldn't.
307
- */
308
- if (!mmap_read_trylock (mm )) {
309
- if (!user_mode (regs ) && !search_exception_tables (regs -> ARM_pc ))
310
- goto no_context ;
311
277
retry :
312
- mmap_read_lock (mm );
313
- } else {
314
- /*
315
- * The above down_read_trylock() might have succeeded in
316
- * which case, we'll have missed the might_sleep() from
317
- * down_read()
318
- */
319
- might_sleep ();
320
- #ifdef CONFIG_DEBUG_VM
321
- if (!user_mode (regs ) &&
322
- !search_exception_tables (regs -> ARM_pc ))
323
- goto no_context ;
324
- #endif
278
+ vma = lock_mm_and_find_vma (mm , addr , regs );
279
+ if (unlikely (!vma )) {
280
+ fault = VM_FAULT_BADMAP ;
281
+ goto bad_area ;
325
282
}
326
283
327
- fault = __do_page_fault (mm , addr , flags , vm_flags , regs );
284
+ /*
285
+ * ok, we have a good vm_area for this memory access, check the
286
+ * permissions on the VMA allow for the fault which occurred.
287
+ */
288
+ if (!(vma -> vm_flags & vm_flags ))
289
+ fault = VM_FAULT_BADACCESS ;
290
+ else
291
+ fault = handle_mm_fault (vma , addr & PAGE_MASK , flags , regs );
328
292
329
293
/* If we need to retry but a fatal signal is pending, handle the
330
294
* signal first. We do not need to release the mmap_lock because
@@ -355,6 +319,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
355
319
if (likely (!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS ))))
356
320
return 0 ;
357
321
322
+ bad_area :
358
323
/*
359
324
* If we are in kernel mode at this point, we
360
325
* have no context to handle this fault with.
0 commit comments