|
15 | 15 | #include <linux/export.h>
|
16 | 16 | #include <linux/seq_file.h>
|
17 | 17 | #include <linux/spinlock.h>
|
| 18 | +#include <linux/uaccess.h> |
| 19 | +#include <linux/compat.h> |
18 | 20 | #include <linux/sysfs.h>
|
| 21 | +#include <asm/stacktrace.h> |
19 | 22 | #include <asm/irq.h>
|
20 | 23 | #include <asm/cpu_mf.h>
|
21 | 24 | #include <asm/lowcore.h>
|
@@ -212,6 +215,44 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
212 | 215 | }
|
213 | 216 | }
|
214 | 217 |
|
| 218 | +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, |
| 219 | + struct pt_regs *regs) |
| 220 | +{ |
| 221 | + struct stack_frame_user __user *sf; |
| 222 | + unsigned long ip, sp; |
| 223 | + bool first = true; |
| 224 | + |
| 225 | + if (is_compat_task()) |
| 226 | + return; |
| 227 | + perf_callchain_store(entry, instruction_pointer(regs)); |
| 228 | + sf = (void __user *)user_stack_pointer(regs); |
| 229 | + pagefault_disable(); |
| 230 | + while (entry->nr < entry->max_stack) { |
| 231 | + if (__get_user(sp, &sf->back_chain)) |
| 232 | + break; |
| 233 | + if (__get_user(ip, &sf->gprs[8])) |
| 234 | + break; |
| 235 | + if (ip & 0x1) { |
| 236 | + /* |
| 237 | + * If the instruction address is invalid, and this |
| 238 | + * is the first stack frame, assume r14 has not |
| 239 | + * been written to the stack yet. Otherwise exit. |
| 240 | + */ |
| 241 | + if (first && !(regs->gprs[14] & 0x1)) |
| 242 | + ip = regs->gprs[14]; |
| 243 | + else |
| 244 | + break; |
| 245 | + } |
| 246 | + perf_callchain_store(entry, ip); |
| 247 | + /* Sanity check: ABI requires SP to be aligned 8 bytes. */ |
| 248 | + if (!sp || sp & 0x7) |
| 249 | + break; |
| 250 | + sf = (void __user *)sp; |
| 251 | + first = false; |
| 252 | + } |
| 253 | + pagefault_enable(); |
| 254 | +} |
| 255 | + |
215 | 256 | /* Perf definitions for PMU event attributes in sysfs */
|
216 | 257 | ssize_t cpumf_events_sysfs_show(struct device *dev,
|
217 | 258 | struct device_attribute *attr, char *page)
|
|
0 commit comments