|
15 | 15 | #include <asm/sigcontext.h>
|
16 | 16 | #include <asm/ucontext.h>
|
17 | 17 | #include <asm/vdso.h>
|
18 |
| -#ifdef CONFIG_COMPAT |
19 |
| -#include "../kernel/ppc32.h" |
20 |
| -#endif |
21 | 18 | #include <asm/pte-walk.h>
|
22 | 19 |
|
| 20 | +#include "callchain.h" |
23 | 21 |
|
24 | 22 | /*
|
25 | 23 | * Is sp valid as the address of the next kernel stack frame after prev_sp?
|
@@ -102,358 +100,6 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
|
102 | 100 | }
|
103 | 101 | }
|
104 | 102 |
|
105 |
| -static inline bool invalid_user_sp(unsigned long sp) |
106 |
| -{ |
107 |
| - unsigned long mask = is_32bit_task() ? 3 : 7; |
108 |
| - unsigned long top = STACK_TOP - (is_32bit_task() ? 16 : 32); |
109 |
| - |
110 |
| - return (!sp || (sp & mask) || (sp > top)); |
111 |
| -} |
112 |
| - |
113 |
| -#ifdef CONFIG_PPC64 |
114 |
| -/* |
115 |
| - * On 64-bit we don't want to invoke hash_page on user addresses from |
116 |
| - * interrupt context, so if the access faults, we read the page tables |
117 |
| - * to find which page (if any) is mapped and access it directly. |
118 |
| - */ |
119 |
| -static int read_user_stack_slow(void __user *ptr, void *buf, int nb) |
120 |
| -{ |
121 |
| - int ret = -EFAULT; |
122 |
| - pgd_t *pgdir; |
123 |
| - pte_t *ptep, pte; |
124 |
| - unsigned shift; |
125 |
| - unsigned long addr = (unsigned long) ptr; |
126 |
| - unsigned long offset; |
127 |
| - unsigned long pfn, flags; |
128 |
| - void *kaddr; |
129 |
| - |
130 |
| - pgdir = current->mm->pgd; |
131 |
| - if (!pgdir) |
132 |
| - return -EFAULT; |
133 |
| - |
134 |
| - local_irq_save(flags); |
135 |
| - ptep = find_current_mm_pte(pgdir, addr, NULL, &shift); |
136 |
| - if (!ptep) |
137 |
| - goto err_out; |
138 |
| - if (!shift) |
139 |
| - shift = PAGE_SHIFT; |
140 |
| - |
141 |
| - /* align address to page boundary */ |
142 |
| - offset = addr & ((1UL << shift) - 1); |
143 |
| - |
144 |
| - pte = READ_ONCE(*ptep); |
145 |
| - if (!pte_present(pte) || !pte_user(pte)) |
146 |
| - goto err_out; |
147 |
| - pfn = pte_pfn(pte); |
148 |
| - if (!page_is_ram(pfn)) |
149 |
| - goto err_out; |
150 |
| - |
151 |
| - /* no highmem to worry about here */ |
152 |
| - kaddr = pfn_to_kaddr(pfn); |
153 |
| - memcpy(buf, kaddr + offset, nb); |
154 |
| - ret = 0; |
155 |
| -err_out: |
156 |
| - local_irq_restore(flags); |
157 |
| - return ret; |
158 |
| -} |
159 |
| - |
160 |
| -static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret) |
161 |
| -{ |
162 |
| - if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) || |
163 |
| - ((unsigned long)ptr & 7)) |
164 |
| - return -EFAULT; |
165 |
| - |
166 |
| - if (!probe_user_read(ret, ptr, sizeof(*ret))) |
167 |
| - return 0; |
168 |
| - |
169 |
| - return read_user_stack_slow(ptr, ret, 8); |
170 |
| -} |
171 |
| - |
172 |
| -/* |
173 |
| - * 64-bit user processes use the same stack frame for RT and non-RT signals. |
174 |
| - */ |
175 |
| -struct signal_frame_64 { |
176 |
| - char dummy[__SIGNAL_FRAMESIZE]; |
177 |
| - struct ucontext uc; |
178 |
| - unsigned long unused[2]; |
179 |
| - unsigned int tramp[6]; |
180 |
| - struct siginfo *pinfo; |
181 |
| - void *puc; |
182 |
| - struct siginfo info; |
183 |
| - char abigap[288]; |
184 |
| -}; |
185 |
| - |
186 |
| -static int is_sigreturn_64_address(unsigned long nip, unsigned long fp) |
187 |
| -{ |
188 |
| - if (nip == fp + offsetof(struct signal_frame_64, tramp)) |
189 |
| - return 1; |
190 |
| - if (vdso64_rt_sigtramp && current->mm->context.vdso_base && |
191 |
| - nip == current->mm->context.vdso_base + vdso64_rt_sigtramp) |
192 |
| - return 1; |
193 |
| - return 0; |
194 |
| -} |
195 |
| - |
196 |
| -/* |
197 |
| - * Do some sanity checking on the signal frame pointed to by sp. |
198 |
| - * We check the pinfo and puc pointers in the frame. |
199 |
| - */ |
200 |
| -static int sane_signal_64_frame(unsigned long sp) |
201 |
| -{ |
202 |
| - struct signal_frame_64 __user *sf; |
203 |
| - unsigned long pinfo, puc; |
204 |
| - |
205 |
| - sf = (struct signal_frame_64 __user *) sp; |
206 |
| - if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) || |
207 |
| - read_user_stack_64((unsigned long __user *) &sf->puc, &puc)) |
208 |
| - return 0; |
209 |
| - return pinfo == (unsigned long) &sf->info && |
210 |
| - puc == (unsigned long) &sf->uc; |
211 |
| -} |
212 |
| - |
213 |
| -static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry, |
214 |
| - struct pt_regs *regs) |
215 |
| -{ |
216 |
| - unsigned long sp, next_sp; |
217 |
| - unsigned long next_ip; |
218 |
| - unsigned long lr; |
219 |
| - long level = 0; |
220 |
| - struct signal_frame_64 __user *sigframe; |
221 |
| - unsigned long __user *fp, *uregs; |
222 |
| - |
223 |
| - next_ip = perf_instruction_pointer(regs); |
224 |
| - lr = regs->link; |
225 |
| - sp = regs->gpr[1]; |
226 |
| - perf_callchain_store(entry, next_ip); |
227 |
| - |
228 |
| - while (entry->nr < entry->max_stack) { |
229 |
| - fp = (unsigned long __user *) sp; |
230 |
| - if (invalid_user_sp(sp) || read_user_stack_64(fp, &next_sp)) |
231 |
| - return; |
232 |
| - if (level > 0 && read_user_stack_64(&fp[2], &next_ip)) |
233 |
| - return; |
234 |
| - |
235 |
| - /* |
236 |
| - * Note: the next_sp - sp >= signal frame size check |
237 |
| - * is true when next_sp < sp, which can happen when |
238 |
| - * transitioning from an alternate signal stack to the |
239 |
| - * normal stack. |
240 |
| - */ |
241 |
| - if (next_sp - sp >= sizeof(struct signal_frame_64) && |
242 |
| - (is_sigreturn_64_address(next_ip, sp) || |
243 |
| - (level <= 1 && is_sigreturn_64_address(lr, sp))) && |
244 |
| - sane_signal_64_frame(sp)) { |
245 |
| - /* |
246 |
| - * This looks like an signal frame |
247 |
| - */ |
248 |
| - sigframe = (struct signal_frame_64 __user *) sp; |
249 |
| - uregs = sigframe->uc.uc_mcontext.gp_regs; |
250 |
| - if (read_user_stack_64(&uregs[PT_NIP], &next_ip) || |
251 |
| - read_user_stack_64(&uregs[PT_LNK], &lr) || |
252 |
| - read_user_stack_64(&uregs[PT_R1], &sp)) |
253 |
| - return; |
254 |
| - level = 0; |
255 |
| - perf_callchain_store_context(entry, PERF_CONTEXT_USER); |
256 |
| - perf_callchain_store(entry, next_ip); |
257 |
| - continue; |
258 |
| - } |
259 |
| - |
260 |
| - if (level == 0) |
261 |
| - next_ip = lr; |
262 |
| - perf_callchain_store(entry, next_ip); |
263 |
| - ++level; |
264 |
| - sp = next_sp; |
265 |
| - } |
266 |
| -} |
267 |
| - |
268 |
| -#else /* CONFIG_PPC64 */ |
269 |
| -static int read_user_stack_slow(void __user *ptr, void *buf, int nb) |
270 |
| -{ |
271 |
| - return 0; |
272 |
| -} |
273 |
| - |
274 |
| -static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry, |
275 |
| - struct pt_regs *regs) |
276 |
| -{ |
277 |
| -} |
278 |
| - |
279 |
| -#define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE |
280 |
| -#define sigcontext32 sigcontext |
281 |
| -#define mcontext32 mcontext |
282 |
| -#define ucontext32 ucontext |
283 |
| -#define compat_siginfo_t struct siginfo |
284 |
| - |
285 |
| -#endif /* CONFIG_PPC64 */ |
286 |
| - |
287 |
| -#if defined(CONFIG_PPC32) || defined(CONFIG_COMPAT) |
288 |
| -/* |
289 |
| - * On 32-bit we just access the address and let hash_page create a |
290 |
| - * HPTE if necessary, so there is no need to fall back to reading |
291 |
| - * the page tables. Since this is called at interrupt level, |
292 |
| - * do_page_fault() won't treat a DSI as a page fault. |
293 |
| - */ |
294 |
| -static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) |
295 |
| -{ |
296 |
| - int rc; |
297 |
| - |
298 |
| - if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) || |
299 |
| - ((unsigned long)ptr & 3)) |
300 |
| - return -EFAULT; |
301 |
| - |
302 |
| - rc = probe_user_read(ret, ptr, sizeof(*ret)); |
303 |
| - |
304 |
| - if (IS_ENABLED(CONFIG_PPC64) && rc) |
305 |
| - return read_user_stack_slow(ptr, ret, 4); |
306 |
| - |
307 |
| - return rc; |
308 |
| -} |
309 |
| - |
310 |
| -/* |
311 |
| - * Layout for non-RT signal frames |
312 |
| - */ |
313 |
| -struct signal_frame_32 { |
314 |
| - char dummy[__SIGNAL_FRAMESIZE32]; |
315 |
| - struct sigcontext32 sctx; |
316 |
| - struct mcontext32 mctx; |
317 |
| - int abigap[56]; |
318 |
| -}; |
319 |
| - |
320 |
| -/* |
321 |
| - * Layout for RT signal frames |
322 |
| - */ |
323 |
| -struct rt_signal_frame_32 { |
324 |
| - char dummy[__SIGNAL_FRAMESIZE32 + 16]; |
325 |
| - compat_siginfo_t info; |
326 |
| - struct ucontext32 uc; |
327 |
| - int abigap[56]; |
328 |
| -}; |
329 |
| - |
330 |
| -static int is_sigreturn_32_address(unsigned int nip, unsigned int fp) |
331 |
| -{ |
332 |
| - if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad)) |
333 |
| - return 1; |
334 |
| - if (vdso32_sigtramp && current->mm->context.vdso_base && |
335 |
| - nip == current->mm->context.vdso_base + vdso32_sigtramp) |
336 |
| - return 1; |
337 |
| - return 0; |
338 |
| -} |
339 |
| - |
340 |
| -static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp) |
341 |
| -{ |
342 |
| - if (nip == fp + offsetof(struct rt_signal_frame_32, |
343 |
| - uc.uc_mcontext.mc_pad)) |
344 |
| - return 1; |
345 |
| - if (vdso32_rt_sigtramp && current->mm->context.vdso_base && |
346 |
| - nip == current->mm->context.vdso_base + vdso32_rt_sigtramp) |
347 |
| - return 1; |
348 |
| - return 0; |
349 |
| -} |
350 |
| - |
351 |
| -static int sane_signal_32_frame(unsigned int sp) |
352 |
| -{ |
353 |
| - struct signal_frame_32 __user *sf; |
354 |
| - unsigned int regs; |
355 |
| - |
356 |
| - sf = (struct signal_frame_32 __user *) (unsigned long) sp; |
357 |
| - if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, ®s)) |
358 |
| - return 0; |
359 |
| - return regs == (unsigned long) &sf->mctx; |
360 |
| -} |
361 |
| - |
362 |
| -static int sane_rt_signal_32_frame(unsigned int sp) |
363 |
| -{ |
364 |
| - struct rt_signal_frame_32 __user *sf; |
365 |
| - unsigned int regs; |
366 |
| - |
367 |
| - sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp; |
368 |
| - if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, ®s)) |
369 |
| - return 0; |
370 |
| - return regs == (unsigned long) &sf->uc.uc_mcontext; |
371 |
| -} |
372 |
| - |
373 |
| -static unsigned int __user *signal_frame_32_regs(unsigned int sp, |
374 |
| - unsigned int next_sp, unsigned int next_ip) |
375 |
| -{ |
376 |
| - struct mcontext32 __user *mctx = NULL; |
377 |
| - struct signal_frame_32 __user *sf; |
378 |
| - struct rt_signal_frame_32 __user *rt_sf; |
379 |
| - |
380 |
| - /* |
381 |
| - * Note: the next_sp - sp >= signal frame size check |
382 |
| - * is true when next_sp < sp, for example, when |
383 |
| - * transitioning from an alternate signal stack to the |
384 |
| - * normal stack. |
385 |
| - */ |
386 |
| - if (next_sp - sp >= sizeof(struct signal_frame_32) && |
387 |
| - is_sigreturn_32_address(next_ip, sp) && |
388 |
| - sane_signal_32_frame(sp)) { |
389 |
| - sf = (struct signal_frame_32 __user *) (unsigned long) sp; |
390 |
| - mctx = &sf->mctx; |
391 |
| - } |
392 |
| - |
393 |
| - if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) && |
394 |
| - is_rt_sigreturn_32_address(next_ip, sp) && |
395 |
| - sane_rt_signal_32_frame(sp)) { |
396 |
| - rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp; |
397 |
| - mctx = &rt_sf->uc.uc_mcontext; |
398 |
| - } |
399 |
| - |
400 |
| - if (!mctx) |
401 |
| - return NULL; |
402 |
| - return mctx->mc_gregs; |
403 |
| -} |
404 |
| - |
405 |
| -static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry, |
406 |
| - struct pt_regs *regs) |
407 |
| -{ |
408 |
| - unsigned int sp, next_sp; |
409 |
| - unsigned int next_ip; |
410 |
| - unsigned int lr; |
411 |
| - long level = 0; |
412 |
| - unsigned int __user *fp, *uregs; |
413 |
| - |
414 |
| - next_ip = perf_instruction_pointer(regs); |
415 |
| - lr = regs->link; |
416 |
| - sp = regs->gpr[1]; |
417 |
| - perf_callchain_store(entry, next_ip); |
418 |
| - |
419 |
| - while (entry->nr < entry->max_stack) { |
420 |
| - fp = (unsigned int __user *) (unsigned long) sp; |
421 |
| - if (invalid_user_sp(sp) || read_user_stack_32(fp, &next_sp)) |
422 |
| - return; |
423 |
| - if (level > 0 && read_user_stack_32(&fp[1], &next_ip)) |
424 |
| - return; |
425 |
| - |
426 |
| - uregs = signal_frame_32_regs(sp, next_sp, next_ip); |
427 |
| - if (!uregs && level <= 1) |
428 |
| - uregs = signal_frame_32_regs(sp, next_sp, lr); |
429 |
| - if (uregs) { |
430 |
| - /* |
431 |
| - * This looks like an signal frame, so restart |
432 |
| - * the stack trace with the values in it. |
433 |
| - */ |
434 |
| - if (read_user_stack_32(&uregs[PT_NIP], &next_ip) || |
435 |
| - read_user_stack_32(&uregs[PT_LNK], &lr) || |
436 |
| - read_user_stack_32(&uregs[PT_R1], &sp)) |
437 |
| - return; |
438 |
| - level = 0; |
439 |
| - perf_callchain_store_context(entry, PERF_CONTEXT_USER); |
440 |
| - perf_callchain_store(entry, next_ip); |
441 |
| - continue; |
442 |
| - } |
443 |
| - |
444 |
| - if (level == 0) |
445 |
| - next_ip = lr; |
446 |
| - perf_callchain_store(entry, next_ip); |
447 |
| - ++level; |
448 |
| - sp = next_sp; |
449 |
| - } |
450 |
| -} |
451 |
| -#else /* 32bit */ |
452 |
| -static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry, |
453 |
| - struct pt_regs *regs) |
454 |
| -{} |
455 |
| -#endif /* 32bit */ |
456 |
| - |
457 | 103 | void
|
458 | 104 | perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
459 | 105 | {
|
|
0 commit comments