Skip to content

Commit 4e00532

Browse files
author
Marc Zyngier
committed
KVM: arm64: Make unwind()/on_accessible_stack() per-unwinder functions
Having multiple versions of on_accessible_stack() (one per unwinder) makes it very hard to reason about what is used where due to the complexity of the various includes, the forward declarations, and the reliance on everything being 'inline'. Instead, move the code back where it should be. Each unwinder implements: - on_accessible_stack() as well as the helpers it depends on, - unwind()/unwind_next(), as they pass on_accessible_stack as a parameter to unwind_next_common() (which is the only common code here) This hardly results in any duplication, and makes it much easier to reason about the code. Signed-off-by: Marc Zyngier <[email protected]> Reviewed-by: Kalesh Singh <[email protected]> Tested-by: Kalesh Singh <[email protected]> Reviewed-by: Oliver Upton <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 9f5fee0 commit 4e00532

File tree

6 files changed

+213
-197
lines changed

6 files changed

+213
-197
lines changed

arch/arm64/include/asm/stacktrace.h

Lines changed: 0 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -57,78 +57,4 @@ static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
5757
struct stack_info *info) { return false; }
5858
#endif
5959

60-
61-
/*
62-
* We can only safely access per-cpu stacks from current in a non-preemptible
63-
* context.
64-
*/
65-
static inline bool on_accessible_stack(const struct task_struct *tsk,
66-
unsigned long sp, unsigned long size,
67-
struct stack_info *info)
68-
{
69-
if (on_accessible_stack_common(tsk, sp, size, info))
70-
return true;
71-
72-
if (on_task_stack(tsk, sp, size, info))
73-
return true;
74-
if (tsk != current || preemptible())
75-
return false;
76-
if (on_irq_stack(sp, size, info))
77-
return true;
78-
if (on_sdei_stack(sp, size, info))
79-
return true;
80-
81-
return false;
82-
}
83-
84-
/*
85-
* Unwind from one frame record (A) to the next frame record (B).
86-
*
87-
* We terminate early if the location of B indicates a malformed chain of frame
88-
* records (e.g. a cycle), determined based on the location and fp value of A
89-
* and the location (but not the fp value) of B.
90-
*/
91-
static inline int notrace unwind_next(struct unwind_state *state)
92-
{
93-
struct task_struct *tsk = state->task;
94-
unsigned long fp = state->fp;
95-
struct stack_info info;
96-
int err;
97-
98-
/* Final frame; nothing to unwind */
99-
if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
100-
return -ENOENT;
101-
102-
err = unwind_next_common(state, &info, NULL);
103-
if (err)
104-
return err;
105-
106-
state->pc = ptrauth_strip_insn_pac(state->pc);
107-
108-
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
109-
if (tsk->ret_stack &&
110-
(state->pc == (unsigned long)return_to_handler)) {
111-
unsigned long orig_pc;
112-
/*
113-
* This is a case where function graph tracer has
114-
* modified a return address (LR) in a stack frame
115-
* to hook a function return.
116-
* So replace it to an original value.
117-
*/
118-
orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
119-
(void *)state->fp);
120-
if (WARN_ON_ONCE(state->pc == orig_pc))
121-
return -EINVAL;
122-
state->pc = orig_pc;
123-
}
124-
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
125-
#ifdef CONFIG_KRETPROBES
126-
if (is_kretprobe_trampoline(state->pc))
127-
state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur);
128-
#endif
129-
130-
return 0;
131-
}
132-
NOKPROBE_SYMBOL(unwind_next);
133-
13460
#endif /* __ASM_STACKTRACE_H */

arch/arm64/include/asm/stacktrace/common.h

Lines changed: 15 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -79,15 +79,6 @@ struct unwind_state {
7979
struct task_struct *task;
8080
};
8181

82-
static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
83-
struct stack_info *info);
84-
85-
static inline bool on_accessible_stack(const struct task_struct *tsk,
86-
unsigned long sp, unsigned long size,
87-
struct stack_info *info);
88-
89-
static inline int unwind_next(struct unwind_state *state);
90-
9182
static inline bool on_stack(unsigned long sp, unsigned long size,
9283
unsigned long low, unsigned long high,
9384
enum stack_type type, struct stack_info *info)
@@ -106,21 +97,6 @@ static inline bool on_stack(unsigned long sp, unsigned long size,
10697
return true;
10798
}
10899

109-
static inline bool on_accessible_stack_common(const struct task_struct *tsk,
110-
unsigned long sp,
111-
unsigned long size,
112-
struct stack_info *info)
113-
{
114-
if (info)
115-
info->type = STACK_TYPE_UNKNOWN;
116-
117-
/*
118-
* Both the kernel and nvhe hypervisor make use of
119-
* an overflow_stack
120-
*/
121-
return on_overflow_stack(sp, size, info);
122-
}
123-
124100
static inline void unwind_init_common(struct unwind_state *state,
125101
struct task_struct *task)
126102
{
@@ -156,8 +132,22 @@ static inline void unwind_init_common(struct unwind_state *state,
156132
typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp,
157133
enum stack_type type);
158134

135+
/*
136+
* on_accessible_stack_fn() - Check whether a stack range is on any
137+
* of the possible stacks.
138+
*
139+
* @tsk: task whose stack is being unwound
140+
* @sp: stack address being checked
141+
* @size: size of the stack range being checked
142+
* @info: stack unwinding context
143+
*/
144+
typedef bool (*on_accessible_stack_fn)(const struct task_struct *tsk,
145+
unsigned long sp, unsigned long size,
146+
struct stack_info *info);
147+
159148
static inline int unwind_next_common(struct unwind_state *state,
160149
struct stack_info *info,
150+
on_accessible_stack_fn accessible,
161151
stack_trace_translate_fp_fn translate_fp)
162152
{
163153
unsigned long fp = state->fp, kern_fp = fp;
@@ -166,7 +156,7 @@ static inline int unwind_next_common(struct unwind_state *state,
166156
if (fp & 0x7)
167157
return -EINVAL;
168158

169-
if (!on_accessible_stack(tsk, fp, 16, info))
159+
if (!accessible(tsk, fp, 16, info))
170160
return -EINVAL;
171161

172162
if (test_bit(info->type, state->stacks_done))
@@ -212,19 +202,4 @@ static inline int unwind_next_common(struct unwind_state *state,
212202
return 0;
213203
}
214204

215-
static inline void notrace unwind(struct unwind_state *state,
216-
stack_trace_consume_fn consume_entry,
217-
void *cookie)
218-
{
219-
while (1) {
220-
int ret;
221-
222-
if (!consume_entry(cookie, state->pc))
223-
break;
224-
ret = unwind_next(state);
225-
if (ret < 0)
226-
break;
227-
}
228-
}
229-
NOKPROBE_SYMBOL(unwind);
230205
#endif /* __ASM_STACKTRACE_COMMON_H */

arch/arm64/include/asm/stacktrace/nvhe.h

Lines changed: 1 addition & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -37,59 +37,7 @@ static inline void kvm_nvhe_unwind_init(struct unwind_state *state,
3737
state->pc = pc;
3838
}
3939

40-
static inline bool on_hyp_stack(unsigned long sp, unsigned long size,
41-
struct stack_info *info);
42-
43-
static inline bool on_accessible_stack(const struct task_struct *tsk,
44-
unsigned long sp, unsigned long size,
45-
struct stack_info *info)
46-
{
47-
if (on_accessible_stack_common(tsk, sp, size, info))
48-
return true;
49-
50-
if (on_hyp_stack(sp, size, info))
51-
return true;
52-
53-
return false;
54-
}
55-
56-
#ifdef __KVM_NVHE_HYPERVISOR__
57-
/*
58-
* Protected nVHE HYP stack unwinder
59-
*
60-
* In protected mode, the unwinding is done by the hypervisor in EL2.
61-
*/
62-
63-
#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
64-
static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
65-
struct stack_info *info)
66-
{
67-
unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack);
68-
unsigned long high = low + OVERFLOW_STACK_SIZE;
69-
70-
return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
71-
}
72-
73-
static inline bool on_hyp_stack(unsigned long sp, unsigned long size,
74-
struct stack_info *info)
75-
{
76-
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
77-
unsigned long high = params->stack_hyp_va;
78-
unsigned long low = high - PAGE_SIZE;
79-
80-
return on_stack(sp, size, low, high, STACK_TYPE_HYP, info);
81-
}
82-
83-
static inline int notrace unwind_next(struct unwind_state *state)
84-
{
85-
struct stack_info info;
86-
87-
return unwind_next_common(state, &info, NULL);
88-
}
89-
NOKPROBE_SYMBOL(unwind_next);
90-
#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
91-
92-
#else /* !__KVM_NVHE_HYPERVISOR__ */
40+
#ifndef __KVM_NVHE_HYPERVISOR__
9341
/*
9442
* Conventional (non-protected) nVHE HYP stack unwinder
9543
*
@@ -142,36 +90,6 @@ static inline bool kvm_nvhe_stack_kern_va(unsigned long *addr,
14290
return true;
14391
}
14492

145-
static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
146-
struct stack_info *info)
147-
{
148-
struct kvm_nvhe_stacktrace_info *stacktrace_info
149-
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
150-
unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base;
151-
unsigned long high = low + OVERFLOW_STACK_SIZE;
152-
153-
return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
154-
}
155-
156-
static inline bool on_hyp_stack(unsigned long sp, unsigned long size,
157-
struct stack_info *info)
158-
{
159-
struct kvm_nvhe_stacktrace_info *stacktrace_info
160-
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
161-
unsigned long low = (unsigned long)stacktrace_info->stack_base;
162-
unsigned long high = low + PAGE_SIZE;
163-
164-
return on_stack(sp, size, low, high, STACK_TYPE_HYP, info);
165-
}
166-
167-
static inline int notrace unwind_next(struct unwind_state *state)
168-
{
169-
struct stack_info info;
170-
171-
return unwind_next_common(state, &info, kvm_nvhe_stack_kern_va);
172-
}
173-
NOKPROBE_SYMBOL(unwind_next);
174-
17593
void kvm_nvhe_dump_backtrace(unsigned long hyp_offset);
17694

17795
#endif /* __KVM_NVHE_HYPERVISOR__ */

arch/arm64/kernel/stacktrace.c

Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,96 @@ static inline void unwind_init_from_task(struct unwind_state *state,
6767
state->pc = thread_saved_pc(task);
6868
}
6969

70+
/*
71+
* We can only safely access per-cpu stacks from current in a non-preemptible
72+
* context.
73+
*/
74+
static bool on_accessible_stack(const struct task_struct *tsk,
75+
unsigned long sp, unsigned long size,
76+
struct stack_info *info)
77+
{
78+
if (info)
79+
info->type = STACK_TYPE_UNKNOWN;
80+
81+
if (on_task_stack(tsk, sp, size, info))
82+
return true;
83+
if (tsk != current || preemptible())
84+
return false;
85+
if (on_irq_stack(sp, size, info))
86+
return true;
87+
if (on_overflow_stack(sp, size, info))
88+
return true;
89+
if (on_sdei_stack(sp, size, info))
90+
return true;
91+
92+
return false;
93+
}
94+
95+
/*
96+
* Unwind from one frame record (A) to the next frame record (B).
97+
*
98+
* We terminate early if the location of B indicates a malformed chain of frame
99+
* records (e.g. a cycle), determined based on the location and fp value of A
100+
* and the location (but not the fp value) of B.
101+
*/
102+
static int notrace unwind_next(struct unwind_state *state)
103+
{
104+
struct task_struct *tsk = state->task;
105+
unsigned long fp = state->fp;
106+
struct stack_info info;
107+
int err;
108+
109+
/* Final frame; nothing to unwind */
110+
if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
111+
return -ENOENT;
112+
113+
err = unwind_next_common(state, &info, on_accessible_stack, NULL);
114+
if (err)
115+
return err;
116+
117+
state->pc = ptrauth_strip_insn_pac(state->pc);
118+
119+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
120+
if (tsk->ret_stack &&
121+
(state->pc == (unsigned long)return_to_handler)) {
122+
unsigned long orig_pc;
123+
/*
124+
* This is a case where function graph tracer has
125+
* modified a return address (LR) in a stack frame
126+
* to hook a function return.
127+
* So replace it to an original value.
128+
*/
129+
orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
130+
(void *)state->fp);
131+
if (WARN_ON_ONCE(state->pc == orig_pc))
132+
return -EINVAL;
133+
state->pc = orig_pc;
134+
}
135+
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
136+
#ifdef CONFIG_KRETPROBES
137+
if (is_kretprobe_trampoline(state->pc))
138+
state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur);
139+
#endif
140+
141+
return 0;
142+
}
143+
NOKPROBE_SYMBOL(unwind_next);
144+
145+
static void notrace unwind(struct unwind_state *state,
146+
stack_trace_consume_fn consume_entry, void *cookie)
147+
{
148+
while (1) {
149+
int ret;
150+
151+
if (!consume_entry(cookie, state->pc))
152+
break;
153+
ret = unwind_next(state);
154+
if (ret < 0)
155+
break;
156+
}
157+
}
158+
NOKPROBE_SYMBOL(unwind);
159+
70160
static bool dump_backtrace_entry(void *arg, unsigned long where)
71161
{
72162
char *loglvl = arg;

0 commit comments

Comments
 (0)