|
81 | 81 |
|
82 | 82 | int entry_mark_page_accessed(struct pt_regs *ctx) {
|
83 | 83 | u64 ts, delta;
|
84 |
| - struct page *arg0 = (struct page *) PT_REGS_PARM1(ctx); |
| 84 | + struct page *arg0 = GET_ARG1_PAGE; |
85 | 85 | u32 zero = 0; // static key for accessing pages[0]
|
86 | 86 | u64 *bts = birth.lookup(&arg0);
|
87 | 87 | if (bts != NULL) {
|
|
94 | 94 | }
|
95 | 95 | """
|
96 | 96 |
|
97 |
| -bpf_text_kfunc = """ |
| 97 | +bpf_text_kfunc_cache_readahead = """ |
98 | 98 | KFUNC_PROBE(RA_FUNC)
|
99 | 99 | {
|
100 | 100 | u32 pid = bpf_get_current_pid_tgid();
|
|
112 | 112 | flag.update(&pid, &zero);
|
113 | 113 | return 0;
|
114 | 114 | }
|
| 115 | +""" |
115 | 116 |
|
116 |
| -KFUNC_PROBE(mark_page_accessed, struct page *arg0) |
| 117 | +bpf_text_kfunc_mark_accessed_template = """ |
| 118 | +KFUNC_PROBE(MA_FUNC_NAME, MA_ARG_TYPE arg0) |
117 | 119 | {
|
118 | 120 | u64 ts, delta;
|
119 | 121 | u32 zero = 0; // static key for accessing pages[0]
|
120 |
| - u64 *bts = birth.lookup(&arg0); |
| 122 | + struct page *page = GET_PAGE_PTR_FROM_ARG0; |
| 123 | + u64 *bts = birth.lookup(&page); |
121 | 124 |
|
122 | 125 | if (bts != NULL) {
|
123 | 126 | delta = bpf_ktime_get_ns() - *bts;
|
124 | 127 | dist.atomic_increment(bpf_log2l(delta/1000000));
|
125 | 128 | pages.atomic_increment(zero, -1);
|
126 |
| - birth.delete(&arg0); // remove the entry from hashmap |
| 129 | + birth.delete(&page); // remove the entry from hashmap |
127 | 130 | }
|
128 | 131 | return 0;
|
129 | 132 | }
|
|
181 | 184 | elif BPF.get_kprobe_functions(b"page_cache_ra_order"):
|
182 | 185 | ra_func = "page_cache_ra_order"
|
183 | 186 | else:
|
184 |
| - print("Not found any kfunc.") |
| 187 | + print("Not found any kfunc for page cache readahead.") |
185 | 188 | exit()
|
186 |
| - bpf_text += bpf_text_kfunc.replace("RA_FUNC", ra_func) |
| 189 | + bpf_text += bpf_text_kfunc_cache_readahead.replace("RA_FUNC", ra_func) |
187 | 190 | if BPF.get_kprobe_functions(b"__page_cache_alloc"):
|
188 | 191 | bpf_text += bpf_text_kfunc_cache_alloc_ret_page
|
189 | 192 | else:
|
|
195 | 198 | print("ERROR: No cache alloc function found. Exiting.")
|
196 | 199 | exit()
|
197 | 200 | bpf_text += bpf_text_kfunc_cache_alloc_ret_folio_func_body
|
| 201 | + if BPF.get_kprobe_functions(b"folio_mark_accessed"): |
| 202 | + ma_func_name = "folio_mark_accessed" |
| 203 | + ma_arg_type = "struct folio *" |
| 204 | + get_page_ptr_code = "folio_page(arg0, 0)" |
| 205 | + bpf_text_kfunc_mark_accessed = bpf_text_kfunc_mark_accessed_template \ |
| 206 | + .replace("MA_FUNC_NAME", ma_func_name) \ |
| 207 | + .replace("MA_ARG_TYPE", ma_arg_type) \ |
| 208 | + .replace("GET_PAGE_PTR_FROM_ARG0", get_page_ptr_code) |
| 209 | + elif BPF.get_kprobe_functions(b"mark_page_accessed"): |
| 210 | + ma_func_name = "mark_page_accessed" |
| 211 | + ma_arg_type = "struct page *" |
| 212 | + get_page_ptr_code = "arg0" |
| 213 | + bpf_text_kfunc_mark_accessed = bpf_text_kfunc_mark_accessed_template \ |
| 214 | + .replace("MA_FUNC_NAME", ma_func_name) \ |
| 215 | + .replace("MA_ARG_TYPE", ma_arg_type) \ |
| 216 | + .replace("GET_PAGE_PTR_FROM_ARG0", get_page_ptr_code) |
| 217 | + else: |
| 218 | + print("Not found any kfunc for page cache mark accessed.") |
| 219 | + exit() |
| 220 | + bpf_text += bpf_text_kfunc_mark_accessed |
198 | 221 | b = BPF(text=bpf_text)
|
199 | 222 | else:
|
200 | 223 | bpf_text += bpf_text_kprobe
|
|
205 | 228 | elif BPF.get_kprobe_functions(b"page_cache_ra_order"):
|
206 | 229 | ra_event = "page_cache_ra_order"
|
207 | 230 | else:
|
208 |
| - print("Not found any kprobe.") |
| 231 | + print("Not found any kprobe for page cache readahead.") |
209 | 232 | exit()
|
210 | 233 | if BPF.get_kprobe_functions(b"__page_cache_alloc"):
|
211 | 234 | cache_func = "__page_cache_alloc"
|
|
219 | 242 | print("ERROR: No cache alloc function found. Exiting.")
|
220 | 243 | exit()
|
221 | 244 | bpf_text = bpf_text.replace('GET_RETVAL_PAGE', 'folio_page((struct folio *)PT_REGS_RC(ctx), 0)')
|
| 245 | + if BPF.get_kprobe_functions(b"folio_mark_accessed"): |
| 246 | + ma_event = "folio_mark_accessed" |
| 247 | + bpf_text = bpf_text.replace('GET_ARG1_PAGE', 'folio_page((struct folio *)PT_REGS_PARM1(ctx), 0)') |
| 248 | + elif BPF.get_kprobe_functions(b"mark_page_accessed"): |
| 249 | + ma_event = "mark_page_accessed" |
| 250 | + bpf_text = bpf_text.replace('GET_ARG1_PAGE', '(struct page *)PT_REGS_PARM1(ctx)') |
| 251 | + else: |
| 252 | + print("Not found any kprobe for page cache mark accessed.") |
| 253 | + exit() |
222 | 254 |
|
223 | 255 | b = BPF(text=bpf_text)
|
224 | 256 | b.attach_kprobe(event=ra_event, fn_name="entry__do_page_cache_readahead")
|
225 | 257 | b.attach_kretprobe(event=ra_event, fn_name="exit__do_page_cache_readahead")
|
226 | 258 | b.attach_kretprobe(event=cache_func, fn_name="exit__page_cache_alloc")
|
227 |
| - b.attach_kprobe(event="mark_page_accessed", fn_name="entry_mark_page_accessed") |
| 259 | + b.attach_kprobe(event=ma_event, fn_name="entry_mark_page_accessed") |
228 | 260 |
|
229 | 261 | # header
|
230 | 262 | print("Tracing... Hit Ctrl-C to end.")
|
|
0 commit comments