Skip to content

Commit 9f5fee0

Browse files
author
Marc Zyngier
committed
KVM: arm64: Move nVHE stacktrace unwinding into its own compilation unit
The unwinding code doesn't really belong to the exit handling code. Instead, move it to a file (conveniently named stacktrace.c to confuse the reviewer), and move all the stacktrace-related stuff there. It will be joined by more code very soon. Signed-off-by: Marc Zyngier <[email protected]> Reviewed-by: Kalesh Singh <[email protected]> Tested-by: Kalesh Singh <[email protected]> Reviewed-by: Oliver Upton <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 03fe9cd commit 9f5fee0

File tree

4 files changed

+123
-99
lines changed

4 files changed

+123
-99
lines changed

arch/arm64/include/asm/stacktrace/nvhe.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -172,5 +172,7 @@ static inline int notrace unwind_next(struct unwind_state *state)
172172
}
173173
NOKPROBE_SYMBOL(unwind_next);
174174

175+
void kvm_nvhe_dump_backtrace(unsigned long hyp_offset);
176+
175177
#endif /* __KVM_NVHE_HYPERVISOR__ */
176178
#endif /* __ASM_STACKTRACE_NVHE_H */

arch/arm64/kvm/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ obj-$(CONFIG_KVM) += hyp/
1212

1313
kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
1414
inject_fault.o va_layout.o handle_exit.o \
15-
guest.o debug.o reset.o sys_regs.o \
15+
guest.o debug.o reset.o sys_regs.o stacktrace.o \
1616
vgic-sys-reg-v3.o fpsimd.o pkvm.o \
1717
arch_timer.o trng.o vmid.o \
1818
vgic/vgic.o vgic/vgic-init.o \

arch/arm64/kvm/handle_exit.c

Lines changed: 0 additions & 98 deletions
Original file line numberDiff line numberDiff line change
@@ -319,104 +319,6 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
319319
kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
320320
}
321321

322-
/*
323-
* kvm_nvhe_dump_backtrace_entry - Symbolize and print an nVHE backtrace entry
324-
*
325-
* @arg : the hypervisor offset, used for address translation
326-
* @where : the program counter corresponding to the stack frame
327-
*/
328-
static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where)
329-
{
330-
unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
331-
unsigned long hyp_offset = (unsigned long)arg;
332-
333-
/* Mask tags and convert to kern addr */
334-
where = (where & va_mask) + hyp_offset;
335-
kvm_err(" [<%016lx>] %pB\n", where, (void *)(where + kaslr_offset()));
336-
337-
return true;
338-
}
339-
340-
static inline void kvm_nvhe_dump_backtrace_start(void)
341-
{
342-
kvm_err("nVHE call trace:\n");
343-
}
344-
345-
static inline void kvm_nvhe_dump_backtrace_end(void)
346-
{
347-
kvm_err("---[ end nVHE call trace ]---\n");
348-
}
349-
350-
/*
351-
* hyp_dump_backtrace - Dump the non-protected nVHE backtrace.
352-
*
353-
* @hyp_offset: hypervisor offset, used for address translation.
354-
*
355-
* The host can directly access HYP stack pages in non-protected
356-
* mode, so the unwinding is done directly from EL1. This removes
357-
* the need for shared buffers between host and hypervisor for
358-
* the stacktrace.
359-
*/
360-
static void hyp_dump_backtrace(unsigned long hyp_offset)
361-
{
362-
struct kvm_nvhe_stacktrace_info *stacktrace_info;
363-
struct unwind_state state;
364-
365-
stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
366-
367-
kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc);
368-
369-
kvm_nvhe_dump_backtrace_start();
370-
unwind(&state, kvm_nvhe_dump_backtrace_entry, (void *)hyp_offset);
371-
kvm_nvhe_dump_backtrace_end();
372-
}
373-
374-
#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
375-
DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)],
376-
pkvm_stacktrace);
377-
378-
/*
379-
* pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace.
380-
*
381-
* @hyp_offset: hypervisor offset, used for address translation.
382-
*
383-
* Dumping of the pKVM HYP backtrace is done by reading the
384-
* stack addresses from the shared stacktrace buffer, since the
385-
* host cannot directly access hypervisor memory in protected
386-
* mode.
387-
*/
388-
static void pkvm_dump_backtrace(unsigned long hyp_offset)
389-
{
390-
unsigned long *stacktrace
391-
= (unsigned long *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
392-
int i, size = NVHE_STACKTRACE_SIZE / sizeof(long);
393-
394-
kvm_nvhe_dump_backtrace_start();
395-
/* The saved stacktrace is terminated by a null entry */
396-
for (i = 0; i < size && stacktrace[i]; i++)
397-
kvm_nvhe_dump_backtrace_entry((void *)hyp_offset, stacktrace[i]);
398-
kvm_nvhe_dump_backtrace_end();
399-
}
400-
#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
401-
static void pkvm_dump_backtrace(unsigned long hyp_offset)
402-
{
403-
kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n");
404-
}
405-
#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
406-
407-
/*
408-
* kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace.
409-
*
410-
* @hyp_offset: hypervisor offset, used for address translation.
411-
*/
412-
static void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
413-
{
414-
if (is_protected_kvm_enabled())
415-
pkvm_dump_backtrace(hyp_offset);
416-
else
417-
hyp_dump_backtrace(hyp_offset);
418-
}
419-
420322
void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
421323
u64 elr_virt, u64 elr_phys,
422324
u64 par, uintptr_t vcpu,

arch/arm64/kvm/stacktrace.c

Lines changed: 120 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,120 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
/*
3+
* KVM nVHE hypervisor stack tracing support.
4+
*
5+
* The unwinder implementation depends on the nVHE mode:
6+
*
7+
* 1) Non-protected nVHE mode - the host can directly access the
8+
* HYP stack pages and unwind the HYP stack in EL1. This saves having
9+
* to allocate shared buffers for the host to read the unwinded
10+
* stacktrace.
11+
*
12+
* 2) pKVM (protected nVHE) mode - the host cannot directly access
13+
* the HYP memory. The stack is unwinded in EL2 and dumped to a shared
14+
* buffer where the host can read and print the stacktrace.
15+
*
16+
* Copyright (C) 2022 Google LLC
17+
*/
18+
19+
#include <linux/kvm.h>
20+
#include <linux/kvm_host.h>
21+
22+
#include <asm/stacktrace/nvhe.h>
23+
24+
/*
25+
* kvm_nvhe_dump_backtrace_entry - Symbolize and print an nVHE backtrace entry
26+
*
27+
* @arg : the hypervisor offset, used for address translation
28+
* @where : the program counter corresponding to the stack frame
29+
*/
30+
static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where)
31+
{
32+
unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
33+
unsigned long hyp_offset = (unsigned long)arg;
34+
35+
/* Mask tags and convert to kern addr */
36+
where = (where & va_mask) + hyp_offset;
37+
kvm_err(" [<%016lx>] %pB\n", where, (void *)(where + kaslr_offset()));
38+
39+
return true;
40+
}
41+
42+
static void kvm_nvhe_dump_backtrace_start(void)
43+
{
44+
kvm_err("nVHE call trace:\n");
45+
}
46+
47+
static void kvm_nvhe_dump_backtrace_end(void)
48+
{
49+
kvm_err("---[ end nVHE call trace ]---\n");
50+
}
51+
52+
/*
53+
* hyp_dump_backtrace - Dump the non-protected nVHE backtrace.
54+
*
55+
* @hyp_offset: hypervisor offset, used for address translation.
56+
*
57+
* The host can directly access HYP stack pages in non-protected
58+
* mode, so the unwinding is done directly from EL1. This removes
59+
* the need for shared buffers between host and hypervisor for
60+
* the stacktrace.
61+
*/
62+
static void hyp_dump_backtrace(unsigned long hyp_offset)
63+
{
64+
struct kvm_nvhe_stacktrace_info *stacktrace_info;
65+
struct unwind_state state;
66+
67+
stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
68+
69+
kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc);
70+
71+
kvm_nvhe_dump_backtrace_start();
72+
unwind(&state, kvm_nvhe_dump_backtrace_entry, (void *)hyp_offset);
73+
kvm_nvhe_dump_backtrace_end();
74+
}
75+
76+
#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
77+
DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)],
78+
pkvm_stacktrace);
79+
80+
/*
81+
* pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace.
82+
*
83+
* @hyp_offset: hypervisor offset, used for address translation.
84+
*
85+
* Dumping of the pKVM HYP backtrace is done by reading the
86+
* stack addresses from the shared stacktrace buffer, since the
87+
* host cannot directly access hypervisor memory in protected
88+
* mode.
89+
*/
90+
static void pkvm_dump_backtrace(unsigned long hyp_offset)
91+
{
92+
unsigned long *stacktrace
93+
= (unsigned long *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
94+
int i, size = NVHE_STACKTRACE_SIZE / sizeof(long);
95+
96+
kvm_nvhe_dump_backtrace_start();
97+
/* The saved stacktrace is terminated by a null entry */
98+
for (i = 0; i < size && stacktrace[i]; i++)
99+
kvm_nvhe_dump_backtrace_entry((void *)hyp_offset, stacktrace[i]);
100+
kvm_nvhe_dump_backtrace_end();
101+
}
102+
#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
103+
static void pkvm_dump_backtrace(unsigned long hyp_offset)
104+
{
105+
kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n");
106+
}
107+
#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
108+
109+
/*
110+
* kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace.
111+
*
112+
* @hyp_offset: hypervisor offset, used for address translation.
113+
*/
114+
void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
115+
{
116+
if (is_protected_kvm_enabled())
117+
pkvm_dump_backtrace(hyp_offset);
118+
else
119+
hyp_dump_backtrace(hyp_offset);
120+
}

0 commit comments

Comments
 (0)