|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | +/* |
| 3 | + * Common arm64 stack unwinder code. |
| 4 | + * |
| 5 | + * To implement a new arm64 stack unwinder: |
| 6 | + * 1) Include this header |
| 7 | + * |
| 8 | + * 2) Call into unwind_next_common() from your top level unwind |
| 9 | + * function, passing it the validation and translation callbacks |
| 10 | + * (though the later can be NULL if no translation is required). |
| 11 | + * |
| 12 | + * See: arch/arm64/kernel/stacktrace.c for the reference implementation. |
| 13 | + * |
| 14 | + * Copyright (C) 2012 ARM Ltd. |
| 15 | + */ |
| 16 | +#ifndef __ASM_STACKTRACE_COMMON_H |
| 17 | +#define __ASM_STACKTRACE_COMMON_H |
| 18 | + |
| 19 | +#include <linux/bitmap.h> |
| 20 | +#include <linux/bitops.h> |
| 21 | +#include <linux/kprobes.h> |
| 22 | +#include <linux/types.h> |
| 23 | + |
| 24 | +enum stack_type { |
| 25 | + STACK_TYPE_UNKNOWN, |
| 26 | + STACK_TYPE_TASK, |
| 27 | + STACK_TYPE_IRQ, |
| 28 | + STACK_TYPE_OVERFLOW, |
| 29 | + STACK_TYPE_SDEI_NORMAL, |
| 30 | + STACK_TYPE_SDEI_CRITICAL, |
| 31 | + STACK_TYPE_HYP, |
| 32 | + __NR_STACK_TYPES |
| 33 | +}; |
| 34 | + |
| 35 | +struct stack_info { |
| 36 | + unsigned long low; |
| 37 | + unsigned long high; |
| 38 | + enum stack_type type; |
| 39 | +}; |
| 40 | + |
| 41 | +/* |
| 42 | + * A snapshot of a frame record or fp/lr register values, along with some |
| 43 | + * accounting information necessary for robust unwinding. |
| 44 | + * |
| 45 | + * @fp: The fp value in the frame record (or the real fp) |
| 46 | + * @pc: The lr value in the frame record (or the real lr) |
| 47 | + * |
| 48 | + * @stacks_done: Stacks which have been entirely unwound, for which it is no |
| 49 | + * longer valid to unwind to. |
| 50 | + * |
| 51 | + * @prev_fp: The fp that pointed to this frame record, or a synthetic value |
| 52 | + * of 0. This is used to ensure that within a stack, each |
| 53 | + * subsequent frame record is at an increasing address. |
| 54 | + * @prev_type: The type of stack this frame record was on, or a synthetic |
| 55 | + * value of STACK_TYPE_UNKNOWN. This is used to detect a |
| 56 | + * transition from one stack to another. |
| 57 | + * |
| 58 | + * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance |
| 59 | + * associated with the most recently encountered replacement lr |
| 60 | + * value. |
| 61 | + * |
| 62 | + * @task: The task being unwound. |
| 63 | + */ |
| 64 | +struct unwind_state { |
| 65 | + unsigned long fp; |
| 66 | + unsigned long pc; |
| 67 | + DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES); |
| 68 | + unsigned long prev_fp; |
| 69 | + enum stack_type prev_type; |
| 70 | +#ifdef CONFIG_KRETPROBES |
| 71 | + struct llist_node *kr_cur; |
| 72 | +#endif |
| 73 | + struct task_struct *task; |
| 74 | +}; |
| 75 | + |
| 76 | +static inline bool on_stack(unsigned long sp, unsigned long size, |
| 77 | + unsigned long low, unsigned long high, |
| 78 | + enum stack_type type, struct stack_info *info) |
| 79 | +{ |
| 80 | + if (!low) |
| 81 | + return false; |
| 82 | + |
| 83 | + if (sp < low || sp + size < sp || sp + size > high) |
| 84 | + return false; |
| 85 | + |
| 86 | + if (info) { |
| 87 | + info->low = low; |
| 88 | + info->high = high; |
| 89 | + info->type = type; |
| 90 | + } |
| 91 | + return true; |
| 92 | +} |
| 93 | + |
| 94 | +static inline void unwind_init_common(struct unwind_state *state, |
| 95 | + struct task_struct *task) |
| 96 | +{ |
| 97 | + state->task = task; |
| 98 | +#ifdef CONFIG_KRETPROBES |
| 99 | + state->kr_cur = NULL; |
| 100 | +#endif |
| 101 | + |
| 102 | + /* |
| 103 | + * Prime the first unwind. |
| 104 | + * |
| 105 | + * In unwind_next() we'll check that the FP points to a valid stack, |
| 106 | + * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be |
| 107 | + * treated as a transition to whichever stack that happens to be. The |
| 108 | + * prev_fp value won't be used, but we set it to 0 such that it is |
| 109 | + * definitely not an accessible stack address. |
| 110 | + */ |
| 111 | + bitmap_zero(state->stacks_done, __NR_STACK_TYPES); |
| 112 | + state->prev_fp = 0; |
| 113 | + state->prev_type = STACK_TYPE_UNKNOWN; |
| 114 | +} |
| 115 | + |
| 116 | +/* |
| 117 | + * stack_trace_translate_fp_fn() - Translates a non-kernel frame pointer to |
| 118 | + * a kernel address. |
| 119 | + * |
| 120 | + * @fp: the frame pointer to be updated to its kernel address. |
| 121 | + * @type: the stack type associated with frame pointer @fp |
| 122 | + * |
| 123 | + * Returns true and success and @fp is updated to the corresponding |
| 124 | + * kernel virtual address; otherwise returns false. |
| 125 | + */ |
| 126 | +typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp, |
| 127 | + enum stack_type type); |
| 128 | + |
| 129 | +/* |
| 130 | + * on_accessible_stack_fn() - Check whether a stack range is on any |
| 131 | + * of the possible stacks. |
| 132 | + * |
| 133 | + * @tsk: task whose stack is being unwound |
| 134 | + * @sp: stack address being checked |
| 135 | + * @size: size of the stack range being checked |
| 136 | + * @info: stack unwinding context |
| 137 | + */ |
| 138 | +typedef bool (*on_accessible_stack_fn)(const struct task_struct *tsk, |
| 139 | + unsigned long sp, unsigned long size, |
| 140 | + struct stack_info *info); |
| 141 | + |
| 142 | +static inline int unwind_next_common(struct unwind_state *state, |
| 143 | + struct stack_info *info, |
| 144 | + on_accessible_stack_fn accessible, |
| 145 | + stack_trace_translate_fp_fn translate_fp) |
| 146 | +{ |
| 147 | + unsigned long fp = state->fp, kern_fp = fp; |
| 148 | + struct task_struct *tsk = state->task; |
| 149 | + |
| 150 | + if (fp & 0x7) |
| 151 | + return -EINVAL; |
| 152 | + |
| 153 | + if (!accessible(tsk, fp, 16, info)) |
| 154 | + return -EINVAL; |
| 155 | + |
| 156 | + if (test_bit(info->type, state->stacks_done)) |
| 157 | + return -EINVAL; |
| 158 | + |
| 159 | + /* |
| 160 | + * If fp is not from the current address space perform the necessary |
| 161 | + * translation before dereferencing it to get the next fp. |
| 162 | + */ |
| 163 | + if (translate_fp && !translate_fp(&kern_fp, info->type)) |
| 164 | + return -EINVAL; |
| 165 | + |
| 166 | + /* |
| 167 | + * As stacks grow downward, any valid record on the same stack must be |
| 168 | + * at a strictly higher address than the prior record. |
| 169 | + * |
| 170 | + * Stacks can nest in several valid orders, e.g. |
| 171 | + * |
| 172 | + * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL |
| 173 | + * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW |
| 174 | + * HYP -> OVERFLOW |
| 175 | + * |
| 176 | + * ... but the nesting itself is strict. Once we transition from one |
| 177 | + * stack to another, it's never valid to unwind back to that first |
| 178 | + * stack. |
| 179 | + */ |
| 180 | + if (info->type == state->prev_type) { |
| 181 | + if (fp <= state->prev_fp) |
| 182 | + return -EINVAL; |
| 183 | + } else { |
| 184 | + __set_bit(state->prev_type, state->stacks_done); |
| 185 | + } |
| 186 | + |
| 187 | + /* |
| 188 | + * Record this frame record's values and location. The prev_fp and |
| 189 | + * prev_type are only meaningful to the next unwind_next() invocation. |
| 190 | + */ |
| 191 | + state->fp = READ_ONCE(*(unsigned long *)(kern_fp)); |
| 192 | + state->pc = READ_ONCE(*(unsigned long *)(kern_fp + 8)); |
| 193 | + state->prev_fp = fp; |
| 194 | + state->prev_type = info->type; |
| 195 | + |
| 196 | + return 0; |
| 197 | +} |
| 198 | + |
| 199 | +#endif /* __ASM_STACKTRACE_COMMON_H */ |
0 commit comments