Skip to content

Commit 79fea6c

Browse files
xzpetertorvalds
authored andcommitted
mm/arm: use general page fault accounting
Use the general page fault accounting by passing regs into handle_mm_fault(). It naturally solve the issue of multiple page fault accounting when page fault retry happened. To do this, we need to pass the pt_regs pointer into __do_page_fault(). Fix PERF_COUNT_SW_PAGE_FAULTS perf event manually for page fault retries, by moving it before taking mmap_sem. Signed-off-by: Peter Xu <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Cc: Russell King <[email protected]> Cc: Will Deacon <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 52e3f8d commit 79fea6c

File tree

1 file changed

+6
-19
lines changed

1 file changed

+6
-19
lines changed

arch/arm/mm/fault.c

Lines changed: 6 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,8 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
202202

203203
static vm_fault_t __kprobes
204204
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
205-
unsigned int flags, struct task_struct *tsk)
205+
unsigned int flags, struct task_struct *tsk,
206+
struct pt_regs *regs)
206207
{
207208
struct vm_area_struct *vma;
208209
vm_fault_t fault;
@@ -224,7 +225,7 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
224225
goto out;
225226
}
226227

227-
return handle_mm_fault(vma, addr & PAGE_MASK, flags, NULL);
228+
return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
228229

229230
check_stack:
230231
/* Don't allow expansion below FIRST_USER_ADDRESS */
@@ -266,6 +267,8 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
266267
if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
267268
flags |= FAULT_FLAG_WRITE;
268269

270+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
271+
269272
/*
270273
* As per x86, we may deadlock here. However, since the kernel only
271274
* validly references user space from well defined areas of the code,
@@ -290,7 +293,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
290293
#endif
291294
}
292295

293-
fault = __do_page_fault(mm, addr, fsr, flags, tsk);
296+
fault = __do_page_fault(mm, addr, fsr, flags, tsk, regs);
294297

295298
/* If we need to retry but a fatal signal is pending, handle the
296299
* signal first. We do not need to release the mmap_lock because
@@ -302,23 +305,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
302305
return 0;
303306
}
304307

305-
/*
306-
* Major/minor page fault accounting is only done on the
307-
* initial attempt. If we go through a retry, it is extremely
308-
* likely that the page will be found in page cache at that point.
309-
*/
310-
311-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
312308
if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
313-
if (fault & VM_FAULT_MAJOR) {
314-
tsk->maj_flt++;
315-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
316-
regs, addr);
317-
} else {
318-
tsk->min_flt++;
319-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
320-
regs, addr);
321-
}
322309
if (fault & VM_FAULT_RETRY) {
323310
flags |= FAULT_FLAG_TRIED;
324311
goto retry;

0 commit comments

Comments
 (0)