|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | +/* |
| 3 | + * KVM nVHE hypervisor stack tracing support. |
| 4 | + * |
| 5 | + * The unwinder implementation depends on the nVHE mode: |
| 6 | + * |
| 7 | + * 1) Non-protected nVHE mode - the host can directly access the |
| 8 | + * HYP stack pages and unwind the HYP stack in EL1. This saves having |
| 9 | + * to allocate shared buffers for the host to read the unwinded |
| 10 | + * stacktrace. |
| 11 | + * |
| 12 | + * 2) pKVM (protected nVHE) mode - the host cannot directly access |
| 13 | + * the HYP memory. The stack is unwinded in EL2 and dumped to a shared |
| 14 | + * buffer where the host can read and print the stacktrace. |
| 15 | + * |
| 16 | + * Copyright (C) 2022 Google LLC |
| 17 | + */ |
| 18 | + |
| 19 | +#include <linux/kvm.h> |
| 20 | +#include <linux/kvm_host.h> |
| 21 | + |
| 22 | +#include <asm/stacktrace/nvhe.h> |
| 23 | + |
| 24 | +/* |
| 25 | + * kvm_nvhe_dump_backtrace_entry - Symbolize and print an nVHE backtrace entry |
| 26 | + * |
| 27 | + * @arg : the hypervisor offset, used for address translation |
| 28 | + * @where : the program counter corresponding to the stack frame |
| 29 | + */ |
| 30 | +static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where) |
| 31 | +{ |
| 32 | + unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0); |
| 33 | + unsigned long hyp_offset = (unsigned long)arg; |
| 34 | + |
| 35 | + /* Mask tags and convert to kern addr */ |
| 36 | + where = (where & va_mask) + hyp_offset; |
| 37 | + kvm_err(" [<%016lx>] %pB\n", where, (void *)(where + kaslr_offset())); |
| 38 | + |
| 39 | + return true; |
| 40 | +} |
| 41 | + |
| 42 | +static void kvm_nvhe_dump_backtrace_start(void) |
| 43 | +{ |
| 44 | + kvm_err("nVHE call trace:\n"); |
| 45 | +} |
| 46 | + |
| 47 | +static void kvm_nvhe_dump_backtrace_end(void) |
| 48 | +{ |
| 49 | + kvm_err("---[ end nVHE call trace ]---\n"); |
| 50 | +} |
| 51 | + |
| 52 | +/* |
| 53 | + * hyp_dump_backtrace - Dump the non-protected nVHE backtrace. |
| 54 | + * |
| 55 | + * @hyp_offset: hypervisor offset, used for address translation. |
| 56 | + * |
| 57 | + * The host can directly access HYP stack pages in non-protected |
| 58 | + * mode, so the unwinding is done directly from EL1. This removes |
| 59 | + * the need for shared buffers between host and hypervisor for |
| 60 | + * the stacktrace. |
| 61 | + */ |
| 62 | +static void hyp_dump_backtrace(unsigned long hyp_offset) |
| 63 | +{ |
| 64 | + struct kvm_nvhe_stacktrace_info *stacktrace_info; |
| 65 | + struct unwind_state state; |
| 66 | + |
| 67 | + stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); |
| 68 | + |
| 69 | + kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc); |
| 70 | + |
| 71 | + kvm_nvhe_dump_backtrace_start(); |
| 72 | + unwind(&state, kvm_nvhe_dump_backtrace_entry, (void *)hyp_offset); |
| 73 | + kvm_nvhe_dump_backtrace_end(); |
| 74 | +} |
| 75 | + |
| 76 | +#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE |
| 77 | +DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], |
| 78 | + pkvm_stacktrace); |
| 79 | + |
| 80 | +/* |
| 81 | + * pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace. |
| 82 | + * |
| 83 | + * @hyp_offset: hypervisor offset, used for address translation. |
| 84 | + * |
| 85 | + * Dumping of the pKVM HYP backtrace is done by reading the |
| 86 | + * stack addresses from the shared stacktrace buffer, since the |
| 87 | + * host cannot directly access hypervisor memory in protected |
| 88 | + * mode. |
| 89 | + */ |
| 90 | +static void pkvm_dump_backtrace(unsigned long hyp_offset) |
| 91 | +{ |
| 92 | + unsigned long *stacktrace |
| 93 | + = (unsigned long *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace); |
| 94 | + int i, size = NVHE_STACKTRACE_SIZE / sizeof(long); |
| 95 | + |
| 96 | + kvm_nvhe_dump_backtrace_start(); |
| 97 | + /* The saved stacktrace is terminated by a null entry */ |
| 98 | + for (i = 0; i < size && stacktrace[i]; i++) |
| 99 | + kvm_nvhe_dump_backtrace_entry((void *)hyp_offset, stacktrace[i]); |
| 100 | + kvm_nvhe_dump_backtrace_end(); |
| 101 | +} |
| 102 | +#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */ |
| 103 | +static void pkvm_dump_backtrace(unsigned long hyp_offset) |
| 104 | +{ |
| 105 | + kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n"); |
| 106 | +} |
| 107 | +#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */ |
| 108 | + |
| 109 | +/* |
| 110 | + * kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace. |
| 111 | + * |
| 112 | + * @hyp_offset: hypervisor offset, used for address translation. |
| 113 | + */ |
| 114 | +void kvm_nvhe_dump_backtrace(unsigned long hyp_offset) |
| 115 | +{ |
| 116 | + if (is_protected_kvm_enabled()) |
| 117 | + pkvm_dump_backtrace(hyp_offset); |
| 118 | + else |
| 119 | + hyp_dump_backtrace(hyp_offset); |
| 120 | +} |
0 commit comments