|
1 | 1 | // SPDX-License-Identifier: GPL-2.0
|
2 |
| -/* Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. */ |
3 | 2 |
|
4 | 3 | #include <linux/sched/debug.h>
|
5 | 4 | #include <linux/sched/task_stack.h>
|
6 | 5 | #include <linux/stacktrace.h>
|
7 | 6 | #include <linux/ftrace.h>
|
| 7 | +#include <linux/ptrace.h> |
8 | 8 |
|
9 |
| -void save_stack_trace(struct stack_trace *trace) |
| 9 | +#ifdef CONFIG_FRAME_POINTER |
| 10 | + |
| 11 | +struct stackframe { |
| 12 | + unsigned long fp; |
| 13 | + unsigned long ra; |
| 14 | +}; |
| 15 | + |
| 16 | +void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, |
| 17 | + bool (*fn)(unsigned long, void *), void *arg) |
10 | 18 | {
|
11 |
| - save_stack_trace_tsk(current, trace); |
| 19 | + unsigned long fp, sp, pc; |
| 20 | + |
| 21 | + if (regs) { |
| 22 | + fp = frame_pointer(regs); |
| 23 | + sp = user_stack_pointer(regs); |
| 24 | + pc = instruction_pointer(regs); |
| 25 | + } else if (task == NULL || task == current) { |
| 26 | + const register unsigned long current_sp __asm__ ("sp"); |
| 27 | + const register unsigned long current_fp __asm__ ("r8"); |
| 28 | + fp = current_fp; |
| 29 | + sp = current_sp; |
| 30 | + pc = (unsigned long)walk_stackframe; |
| 31 | + } else { |
| 32 | + /* task blocked in __switch_to */ |
| 33 | + fp = thread_saved_fp(task); |
| 34 | + sp = thread_saved_sp(task); |
| 35 | + pc = thread_saved_lr(task); |
| 36 | + } |
| 37 | + |
| 38 | + for (;;) { |
| 39 | + unsigned long low, high; |
| 40 | + struct stackframe *frame; |
| 41 | + |
| 42 | + if (unlikely(!__kernel_text_address(pc) || fn(pc, arg))) |
| 43 | + break; |
| 44 | + |
| 45 | + /* Validate frame pointer */ |
| 46 | + low = sp; |
| 47 | + high = ALIGN(sp, THREAD_SIZE); |
| 48 | + if (unlikely(fp < low || fp > high || fp & 0x3)) |
| 49 | + break; |
| 50 | + /* Unwind stack frame */ |
| 51 | + frame = (struct stackframe *)fp; |
| 52 | + sp = fp; |
| 53 | + fp = frame->fp; |
| 54 | + pc = ftrace_graph_ret_addr(current, NULL, frame->ra, |
| 55 | + (unsigned long *)(fp - 8)); |
| 56 | + } |
12 | 57 | }
|
13 |
| -EXPORT_SYMBOL_GPL(save_stack_trace); |
14 | 58 |
|
15 |
| -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
| 59 | +#else /* !CONFIG_FRAME_POINTER */ |
| 60 | + |
| 61 | +static void notrace walk_stackframe(struct task_struct *task, |
| 62 | + struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg) |
16 | 63 | {
|
17 |
| - unsigned long *fp, *stack_start, *stack_end; |
18 |
| - unsigned long addr; |
19 |
| - int skip = trace->skip; |
20 |
| - int savesched; |
21 |
| - int graph_idx = 0; |
| 64 | + unsigned long sp, pc; |
| 65 | + unsigned long *ksp; |
22 | 66 |
|
23 |
| - if (tsk == current) { |
24 |
| - asm volatile("mov %0, r8\n":"=r"(fp)); |
25 |
| - savesched = 1; |
| 67 | + if (regs) { |
| 68 | + sp = user_stack_pointer(regs); |
| 69 | + pc = instruction_pointer(regs); |
| 70 | + } else if (task == NULL || task == current) { |
| 71 | + const register unsigned long current_sp __asm__ ("sp"); |
| 72 | + sp = current_sp; |
| 73 | + pc = (unsigned long)walk_stackframe; |
26 | 74 | } else {
|
27 |
| - fp = (unsigned long *)thread_saved_fp(tsk); |
28 |
| - savesched = 0; |
| 75 | + /* task blocked in __switch_to */ |
| 76 | + sp = thread_saved_sp(task); |
| 77 | + pc = thread_saved_lr(task); |
29 | 78 | }
|
30 | 79 |
|
31 |
| - addr = (unsigned long) fp & THREAD_MASK; |
32 |
| - stack_start = (unsigned long *) addr; |
33 |
| - stack_end = (unsigned long *) (addr + THREAD_SIZE); |
34 |
| - |
35 |
| - while (fp > stack_start && fp < stack_end) { |
36 |
| - unsigned long lpp, fpp; |
| 80 | + if (unlikely(sp & 0x3)) |
| 81 | + return; |
37 | 82 |
|
38 |
| - fpp = fp[0]; |
39 |
| - lpp = fp[1]; |
40 |
| - if (!__kernel_text_address(lpp)) |
| 83 | + ksp = (unsigned long *)sp; |
| 84 | + while (!kstack_end(ksp)) { |
| 85 | + if (__kernel_text_address(pc) && unlikely(fn(pc, arg))) |
41 | 86 | break;
|
42 |
| - else |
43 |
| - lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL); |
44 |
| - |
45 |
| - if (savesched || !in_sched_functions(lpp)) { |
46 |
| - if (skip) { |
47 |
| - skip--; |
48 |
| - } else { |
49 |
| - trace->entries[trace->nr_entries++] = lpp; |
50 |
| - if (trace->nr_entries >= trace->max_entries) |
51 |
| - break; |
52 |
| - } |
53 |
| - } |
54 |
| - fp = (unsigned long *)fpp; |
| 87 | + pc = (*ksp++) - 0x4; |
55 | 88 | }
|
56 | 89 | }
|
| 90 | +#endif /* CONFIG_FRAME_POINTER */ |
| 91 | + |
| 92 | +static bool print_trace_address(unsigned long pc, void *arg) |
| 93 | +{ |
| 94 | + print_ip_sym(pc); |
| 95 | + return false; |
| 96 | +} |
| 97 | + |
| 98 | +void show_stack(struct task_struct *task, unsigned long *sp) |
| 99 | +{ |
| 100 | + pr_cont("Call Trace:\n"); |
| 101 | + walk_stackframe(task, NULL, print_trace_address, NULL); |
| 102 | +} |
| 103 | + |
| 104 | +static bool save_wchan(unsigned long pc, void *arg) |
| 105 | +{ |
| 106 | + if (!in_sched_functions(pc)) { |
| 107 | + unsigned long *p = arg; |
| 108 | + *p = pc; |
| 109 | + return true; |
| 110 | + } |
| 111 | + return false; |
| 112 | +} |
| 113 | + |
| 114 | +unsigned long get_wchan(struct task_struct *task) |
| 115 | +{ |
| 116 | + unsigned long pc = 0; |
| 117 | + |
| 118 | + if (likely(task && task != current && task->state != TASK_RUNNING)) |
| 119 | + walk_stackframe(task, NULL, save_wchan, &pc); |
| 120 | + return pc; |
| 121 | +} |
| 122 | + |
| 123 | +#ifdef CONFIG_STACKTRACE |
| 124 | +static bool __save_trace(unsigned long pc, void *arg, bool nosched) |
| 125 | +{ |
| 126 | + struct stack_trace *trace = arg; |
| 127 | + |
| 128 | + if (unlikely(nosched && in_sched_functions(pc))) |
| 129 | + return false; |
| 130 | + if (unlikely(trace->skip > 0)) { |
| 131 | + trace->skip--; |
| 132 | + return false; |
| 133 | + } |
| 134 | + |
| 135 | + trace->entries[trace->nr_entries++] = pc; |
| 136 | + return (trace->nr_entries >= trace->max_entries); |
| 137 | +} |
| 138 | + |
| 139 | +static bool save_trace(unsigned long pc, void *arg) |
| 140 | +{ |
| 141 | + return __save_trace(pc, arg, false); |
| 142 | +} |
| 143 | + |
| 144 | +/* |
| 145 | + * Save stack-backtrace addresses into a stack_trace buffer. |
| 146 | + */ |
| 147 | +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
| 148 | +{ |
| 149 | + walk_stackframe(tsk, NULL, save_trace, trace); |
| 150 | +} |
57 | 151 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|
| 152 | + |
| 153 | +void save_stack_trace(struct stack_trace *trace) |
| 154 | +{ |
| 155 | + save_stack_trace_tsk(NULL, trace); |
| 156 | +} |
| 157 | +EXPORT_SYMBOL_GPL(save_stack_trace); |
| 158 | + |
| 159 | +#endif /* CONFIG_STACKTRACE */ |
0 commit comments