Skip to content

Commit 6a1bb02

Browse files
xzpetertorvalds
authored andcommitted
mm/arm64: use general page fault accounting
Use the general page fault accounting by passing regs into handle_mm_fault(). It naturally solve the issue of multiple page fault accounting when page fault retry happened. To do this, we pass pt_regs pointer into __do_page_fault(). Signed-off-by: Peter Xu <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Acked-by: Will Deacon <[email protected]> Cc: Catalin Marinas <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 79fea6c commit 6a1bb02

File tree

1 file changed

+6
-23
lines changed

1 file changed

+6
-23
lines changed

arch/arm64/mm/fault.c

Lines changed: 6 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -404,7 +404,8 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
404404
#define VM_FAULT_BADACCESS 0x020000
405405

406406
static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
407-
unsigned int mm_flags, unsigned long vm_flags)
407+
unsigned int mm_flags, unsigned long vm_flags,
408+
struct pt_regs *regs)
408409
{
409410
struct vm_area_struct *vma = find_vma(mm, addr);
410411

@@ -428,7 +429,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
428429
*/
429430
if (!(vma->vm_flags & vm_flags))
430431
return VM_FAULT_BADACCESS;
431-
return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags, NULL);
432+
return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags, regs);
432433
}
433434

434435
static bool is_el0_instruction_abort(unsigned int esr)
@@ -450,7 +451,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
450451
{
451452
const struct fault_info *inf;
452453
struct mm_struct *mm = current->mm;
453-
vm_fault_t fault, major = 0;
454+
vm_fault_t fault;
454455
unsigned long vm_flags = VM_ACCESS_FLAGS;
455456
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
456457

@@ -516,8 +517,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
516517
#endif
517518
}
518519

519-
fault = __do_page_fault(mm, addr, mm_flags, vm_flags);
520-
major |= fault & VM_FAULT_MAJOR;
520+
fault = __do_page_fault(mm, addr, mm_flags, vm_flags, regs);
521521

522522
/* Quick path to respond to signals */
523523
if (fault_signal_pending(fault, regs)) {
@@ -538,25 +538,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
538538
* Handle the "normal" (no error) case first.
539539
*/
540540
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
541-
VM_FAULT_BADACCESS)))) {
542-
/*
543-
* Major/minor page fault accounting is only done
544-
* once. If we go through a retry, it is extremely
545-
* likely that the page will be found in page cache at
546-
* that point.
547-
*/
548-
if (major) {
549-
current->maj_flt++;
550-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
551-
addr);
552-
} else {
553-
current->min_flt++;
554-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
555-
addr);
556-
}
557-
541+
VM_FAULT_BADACCESS))))
558542
return 0;
559-
}
560543

561544
/*
562545
* If we are in kernel mode at this point, we have no context to

0 commit comments

Comments
 (0)