|
18 | 18 | #include <linux/signal.h>
|
19 | 19 | #include <linux/extable.h>
|
20 | 20 | #include <linux/hardirq.h>
|
| 21 | +#include <linux/perf_event.h> |
21 | 22 |
|
22 | 23 | /*
|
23 | 24 | * Decode of hardware exception sends us to one of several
|
@@ -53,6 +54,8 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
|
53 | 54 |
|
54 | 55 | if (user_mode(regs))
|
55 | 56 | flags |= FAULT_FLAG_USER;
|
| 57 | + |
| 58 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
56 | 59 | retry:
|
57 | 60 | mmap_read_lock(mm);
|
58 | 61 | vma = find_vma(mm, address);
|
@@ -88,18 +91,14 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
|
88 | 91 | break;
|
89 | 92 | }
|
90 | 93 |
|
91 |
| - fault = handle_mm_fault(vma, address, flags, NULL); |
| 94 | + fault = handle_mm_fault(vma, address, flags, regs); |
92 | 95 |
|
93 | 96 | if (fault_signal_pending(fault, regs))
|
94 | 97 | return;
|
95 | 98 |
|
96 | 99 | /* The most common case -- we are done. */
|
97 | 100 | if (likely(!(fault & VM_FAULT_ERROR))) {
|
98 | 101 | if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
99 |
| - if (fault & VM_FAULT_MAJOR) |
100 |
| - current->maj_flt++; |
101 |
| - else |
102 |
| - current->min_flt++; |
103 | 102 | if (fault & VM_FAULT_RETRY) {
|
104 | 103 | flags |= FAULT_FLAG_TRIED;
|
105 | 104 | goto retry;
|
|
0 commit comments