Skip to content

Commit 2d38f43

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/nv-eret-pauth into kvmarm-master/next
* kvm-arm64/nv-eret-pauth: : . : Add NV support for the ERETAA/ERETAB instructions. From the cover letter: : : "Although the current upstream NV support has *some* support for : correctly emulating ERET, that support is only partial as it doesn't : support the ERETAA and ERETAB variants. : : Supporting these instructions was cast aside for a long time as it : involves implementing some form of PAuth emulation, something I wasn't : overly keen on. But I have reached a point where enough of the : infrastructure is there that it actually makes sense. So here it is!" : . KVM: arm64: nv: Work around lack of pauth support in old toolchains KVM: arm64: Drop trapping of PAuth instructions/keys KVM: arm64: nv: Advertise support for PAuth KVM: arm64: nv: Handle ERETA[AB] instructions KVM: arm64: nv: Add emulation for ERETAx instructions KVM: arm64: nv: Add kvm_has_pauth() helper KVM: arm64: nv: Reinject PAC exceptions caused by HCR_EL2.API==0 KVM: arm64: nv: Handle HCR_EL2.{API,APK} independently KVM: arm64: nv: Honor HFGITR_EL2.ERET being set KVM: arm64: nv: Fast-track 'InHost' exception returns KVM: arm64: nv: Add trap forwarding for ERET and SMC KVM: arm64: nv: Configure HCR_EL2 for FEAT_NV2 KVM: arm64: nv: Drop VCPU_HYP_CONTEXT flag KVM: arm64: Constraint PAuth support to consistent implementations KVM: arm64: Add helpers for ESR_ELx_ERET_ISS_ERET* KVM: arm64: Harden __ctxt_sys_reg() against out-of-range values Signed-off-by: Marc Zyngier <[email protected]>
2 parents 34c0d5a + 5513394 commit 2d38f43

File tree

15 files changed

+524
-121
lines changed

15 files changed

+524
-121
lines changed

arch/arm64/include/asm/esr.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -404,6 +404,18 @@ static inline bool esr_fsc_is_access_flag_fault(unsigned long esr)
404404
return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_ACCESS;
405405
}
406406

407+
/* Indicate whether ESR.EC==0x1A is for an ERETAx instruction */
408+
static inline bool esr_iss_is_eretax(unsigned long esr)
409+
{
410+
return esr & ESR_ELx_ERET_ISS_ERET;
411+
}
412+
413+
/* Indicate which key is used for ERETAx (false: A-Key, true: B-Key) */
414+
static inline bool esr_iss_is_eretab(unsigned long esr)
415+
{
416+
return esr & ESR_ELx_ERET_ISS_ERETA;
417+
}
418+
407419
const char *esr_get_class_string(unsigned long esr);
408420
#endif /* __ASSEMBLY */
409421

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -125,16 +125,6 @@ static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
125125
vcpu->arch.hcr_el2 |= HCR_TWI;
126126
}
127127

128-
static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
129-
{
130-
vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
131-
}
132-
133-
static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
134-
{
135-
vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
136-
}
137-
138128
static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
139129
{
140130
return vcpu->arch.vsesr_el2;

arch/arm64/include/asm/kvm_host.h

Lines changed: 23 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -830,8 +830,6 @@ struct kvm_vcpu_arch {
830830
#define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5))
831831
/* Save TRBE context if active */
832832
#define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
833-
/* vcpu running in HYP context */
834-
#define VCPU_HYP_CONTEXT __vcpu_single_flag(iflags, BIT(7))
835833

836834
/* SVE enabled for host EL0 */
837835
#define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0))
@@ -909,7 +907,7 @@ struct kvm_vcpu_arch {
909907
* Don't bother with VNCR-based accesses in the nVHE code, it has no
910908
* business dealing with NV.
911909
*/
912-
static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
910+
static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
913911
{
914912
#if !defined (__KVM_NVHE_HYPERVISOR__)
915913
if (unlikely(cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
@@ -919,6 +917,13 @@ static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
919917
return (u64 *)&ctxt->sys_regs[r];
920918
}
921919

920+
#define __ctxt_sys_reg(c,r) \
921+
({ \
922+
BUILD_BUG_ON(__builtin_constant_p(r) && \
923+
(r) >= NR_SYS_REGS); \
924+
___ctxt_sys_reg(c, r); \
925+
})
926+
922927
#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
923928

924929
u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
@@ -1370,4 +1375,19 @@ bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
13701375
(get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, min) && \
13711376
get_idreg_field((kvm), id, fld) <= expand_field_sign(id, fld, max))
13721377

1378+
/* Check for a given level of PAuth support */
1379+
#define kvm_has_pauth(k, l) \
1380+
({ \
1381+
bool pa, pi, pa3; \
1382+
\
1383+
pa = kvm_has_feat((k), ID_AA64ISAR1_EL1, APA, l); \
1384+
pa &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPA, IMP); \
1385+
pi = kvm_has_feat((k), ID_AA64ISAR1_EL1, API, l); \
1386+
pi &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPI, IMP); \
1387+
pa3 = kvm_has_feat((k), ID_AA64ISAR2_EL1, APA3, l); \
1388+
pa3 &= kvm_has_feat((k), ID_AA64ISAR2_EL1, GPA3, IMP); \
1389+
\
1390+
(pa + pi + pa3) == 1; \
1391+
})
1392+
13731393
#endif /* __ARM64_KVM_HOST_H__ */

arch/arm64/include/asm/kvm_nested.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,20 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
6060
return ttbr0 & ~GENMASK_ULL(63, 48);
6161
}
6262

63+
extern bool forward_smc_trap(struct kvm_vcpu *vcpu);
6364

6465
int kvm_init_nv_sysregs(struct kvm *kvm);
6566

67+
#ifdef CONFIG_ARM64_PTR_AUTH
68+
bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr);
69+
#else
70+
static inline bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
71+
{
72+
/* We really should never execute this... */
73+
WARN_ON_ONCE(1);
74+
*elr = 0xbad9acc0debadbad;
75+
return false;
76+
}
77+
#endif
78+
6679
#endif /* __ARM64_KVM_NESTED_H */

arch/arm64/include/asm/kvm_ptrauth.h

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -99,5 +99,26 @@ alternative_else_nop_endif
9999
.macro ptrauth_switch_to_hyp g_ctxt, h_ctxt, reg1, reg2, reg3
100100
.endm
101101
#endif /* CONFIG_ARM64_PTR_AUTH */
102+
103+
#else /* !__ASSEMBLY */
104+
105+
#define __ptrauth_save_key(ctxt, key) \
106+
do { \
107+
u64 __val; \
108+
__val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
109+
ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \
110+
__val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
111+
ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \
112+
} while(0)
113+
114+
#define ptrauth_save_keys(ctxt) \
115+
do { \
116+
__ptrauth_save_key(ctxt, APIA); \
117+
__ptrauth_save_key(ctxt, APIB); \
118+
__ptrauth_save_key(ctxt, APDA); \
119+
__ptrauth_save_key(ctxt, APDB); \
120+
__ptrauth_save_key(ctxt, APGA); \
121+
} while(0)
122+
102123
#endif /* __ASSEMBLY__ */
103124
#endif /* __ASM_KVM_PTRAUTH_H */

arch/arm64/include/asm/pgtable-hwdef.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -297,6 +297,7 @@
297297
#define TCR_TBI1 (UL(1) << 38)
298298
#define TCR_HA (UL(1) << 39)
299299
#define TCR_HD (UL(1) << 40)
300+
#define TCR_TBID0 (UL(1) << 51)
300301
#define TCR_TBID1 (UL(1) << 52)
301302
#define TCR_NFD0 (UL(1) << 53)
302303
#define TCR_NFD1 (UL(1) << 54)

arch/arm64/kvm/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
2323
vgic/vgic-its.o vgic/vgic-debug.o
2424

2525
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o
26+
kvm-$(CONFIG_ARM64_PTR_AUTH) += pauth.o
2627

2728
always-y := hyp_constants.h hyp-constants.s
2829

arch/arm64/kvm/arm.c

Lines changed: 78 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -35,10 +35,11 @@
3535
#include <asm/virt.h>
3636
#include <asm/kvm_arm.h>
3737
#include <asm/kvm_asm.h>
38+
#include <asm/kvm_emulate.h>
3839
#include <asm/kvm_mmu.h>
3940
#include <asm/kvm_nested.h>
4041
#include <asm/kvm_pkvm.h>
41-
#include <asm/kvm_emulate.h>
42+
#include <asm/kvm_ptrauth.h>
4243
#include <asm/sections.h>
4344

4445
#include <kvm/arm_hypercalls.h>
@@ -218,6 +219,40 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
218219
kvm_arm_teardown_hypercalls(kvm);
219220
}
220221

222+
static bool kvm_has_full_ptr_auth(void)
223+
{
224+
bool apa, gpa, api, gpi, apa3, gpa3;
225+
u64 isar1, isar2, val;
226+
227+
/*
228+
* Check that:
229+
*
230+
* - both Address and Generic auth are implemented for a given
231+
* algorithm (Q5, IMPDEF or Q3)
232+
* - only a single algorithm is implemented.
233+
*/
234+
if (!system_has_full_ptr_auth())
235+
return false;
236+
237+
isar1 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
238+
isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
239+
240+
apa = !!FIELD_GET(ID_AA64ISAR1_EL1_APA_MASK, isar1);
241+
val = FIELD_GET(ID_AA64ISAR1_EL1_GPA_MASK, isar1);
242+
gpa = (val == ID_AA64ISAR1_EL1_GPA_IMP);
243+
244+
api = !!FIELD_GET(ID_AA64ISAR1_EL1_API_MASK, isar1);
245+
val = FIELD_GET(ID_AA64ISAR1_EL1_GPI_MASK, isar1);
246+
gpi = (val == ID_AA64ISAR1_EL1_GPI_IMP);
247+
248+
apa3 = !!FIELD_GET(ID_AA64ISAR2_EL1_APA3_MASK, isar2);
249+
val = FIELD_GET(ID_AA64ISAR2_EL1_GPA3_MASK, isar2);
250+
gpa3 = (val == ID_AA64ISAR2_EL1_GPA3_IMP);
251+
252+
return (apa == gpa && api == gpi && apa3 == gpa3 &&
253+
(apa + api + apa3) == 1);
254+
}
255+
221256
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
222257
{
223258
int r;
@@ -311,7 +346,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
311346
break;
312347
case KVM_CAP_ARM_PTRAUTH_ADDRESS:
313348
case KVM_CAP_ARM_PTRAUTH_GENERIC:
314-
r = system_has_full_ptr_auth();
349+
r = kvm_has_full_ptr_auth();
315350
break;
316351
case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
317352
if (kvm)
@@ -422,6 +457,44 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
422457

423458
}
424459

460+
static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu)
461+
{
462+
if (vcpu_has_ptrauth(vcpu)) {
463+
/*
464+
* Either we're running running an L2 guest, and the API/APK
465+
* bits come from L1's HCR_EL2, or API/APK are both set.
466+
*/
467+
if (unlikely(vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) {
468+
u64 val;
469+
470+
val = __vcpu_sys_reg(vcpu, HCR_EL2);
471+
val &= (HCR_API | HCR_APK);
472+
vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
473+
vcpu->arch.hcr_el2 |= val;
474+
} else {
475+
vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
476+
}
477+
478+
/*
479+
* Save the host keys if there is any chance for the guest
480+
* to use pauth, as the entry code will reload the guest
481+
* keys in that case.
482+
* Protected mode is the exception to that rule, as the
483+
* entry into the EL2 code eagerly switch back and forth
484+
* between host and hyp keys (and kvm_hyp_ctxt is out of
485+
* reach anyway).
486+
*/
487+
if (is_protected_kvm_enabled())
488+
return;
489+
490+
if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) {
491+
struct kvm_cpu_context *ctxt;
492+
ctxt = this_cpu_ptr_hyp_sym(kvm_hyp_ctxt);
493+
ptrauth_save_keys(ctxt);
494+
}
495+
}
496+
}
497+
425498
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
426499
{
427500
struct kvm_s2_mmu *mmu;
@@ -460,8 +533,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
460533
else
461534
vcpu_set_wfx_traps(vcpu);
462535

463-
if (vcpu_has_ptrauth(vcpu))
464-
vcpu_ptrauth_disable(vcpu);
536+
vcpu_set_pauth_traps(vcpu);
537+
465538
kvm_arch_vcpu_load_debug_state_flags(vcpu);
466539

467540
if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
@@ -1264,7 +1337,7 @@ static unsigned long system_supported_vcpu_features(void)
12641337
if (!system_supports_sve())
12651338
clear_bit(KVM_ARM_VCPU_SVE, &features);
12661339

1267-
if (!system_has_full_ptr_auth()) {
1340+
if (!kvm_has_full_ptr_auth()) {
12681341
clear_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features);
12691342
clear_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features);
12701343
}

arch/arm64/kvm/emulate-nested.c

Lines changed: 44 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -2117,6 +2117,26 @@ bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index)
21172117
return true;
21182118
}
21192119

2120+
static bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit)
2121+
{
2122+
bool control_bit_set;
2123+
2124+
if (!vcpu_has_nv(vcpu))
2125+
return false;
2126+
2127+
control_bit_set = __vcpu_sys_reg(vcpu, HCR_EL2) & control_bit;
2128+
if (!is_hyp_ctxt(vcpu) && control_bit_set) {
2129+
kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
2130+
return true;
2131+
}
2132+
return false;
2133+
}
2134+
2135+
bool forward_smc_trap(struct kvm_vcpu *vcpu)
2136+
{
2137+
return forward_traps(vcpu, HCR_TSC);
2138+
}
2139+
21202140
static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr)
21212141
{
21222142
u64 mode = spsr & PSR_MODE_MASK;
@@ -2152,37 +2172,39 @@ static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr)
21522172

21532173
void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
21542174
{
2155-
u64 spsr, elr, mode;
2156-
bool direct_eret;
2175+
u64 spsr, elr, esr;
21572176

21582177
/*
2159-
* Going through the whole put/load motions is a waste of time
2160-
* if this is a VHE guest hypervisor returning to its own
2161-
* userspace, or the hypervisor performing a local exception
2162-
* return. No need to save/restore registers, no need to
2163-
* switch S2 MMU. Just do the canonical ERET.
2178+
* Forward this trap to the virtual EL2 if the virtual
2179+
* HCR_EL2.NV bit is set and this is coming from !EL2.
21642180
*/
2165-
spsr = vcpu_read_sys_reg(vcpu, SPSR_EL2);
2166-
spsr = kvm_check_illegal_exception_return(vcpu, spsr);
2167-
2168-
mode = spsr & (PSR_MODE_MASK | PSR_MODE32_BIT);
2169-
2170-
direct_eret = (mode == PSR_MODE_EL0t &&
2171-
vcpu_el2_e2h_is_set(vcpu) &&
2172-
vcpu_el2_tge_is_set(vcpu));
2173-
direct_eret |= (mode == PSR_MODE_EL2h || mode == PSR_MODE_EL2t);
2174-
2175-
if (direct_eret) {
2176-
*vcpu_pc(vcpu) = vcpu_read_sys_reg(vcpu, ELR_EL2);
2177-
*vcpu_cpsr(vcpu) = spsr;
2178-
trace_kvm_nested_eret(vcpu, *vcpu_pc(vcpu), spsr);
2181+
if (forward_traps(vcpu, HCR_NV))
21792182
return;
2183+
2184+
/* Check for an ERETAx */
2185+
esr = kvm_vcpu_get_esr(vcpu);
2186+
if (esr_iss_is_eretax(esr) && !kvm_auth_eretax(vcpu, &elr)) {
2187+
/*
2188+
* Oh no, ERETAx failed to authenticate. If we have
2189+
* FPACCOMBINE, deliver an exception right away. If we
2190+
* don't, then let the mangled ELR value trickle down the
2191+
* ERET handling, and the guest will have a little surprise.
2192+
*/
2193+
if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE)) {
2194+
esr &= ESR_ELx_ERET_ISS_ERETA;
2195+
esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_FPAC);
2196+
kvm_inject_nested_sync(vcpu, esr);
2197+
return;
2198+
}
21802199
}
21812200

21822201
preempt_disable();
21832202
kvm_arch_vcpu_put(vcpu);
21842203

2185-
elr = __vcpu_sys_reg(vcpu, ELR_EL2);
2204+
spsr = __vcpu_sys_reg(vcpu, SPSR_EL2);
2205+
spsr = kvm_check_illegal_exception_return(vcpu, spsr);
2206+
if (!esr_iss_is_eretax(esr))
2207+
elr = __vcpu_sys_reg(vcpu, ELR_EL2);
21862208

21872209
trace_kvm_nested_eret(vcpu, elr, spsr);
21882210

0 commit comments

Comments
 (0)