Skip to content

Commit be08c3c

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/pkvm/fixed-features into kvmarm-master/next
* kvm-arm64/pkvm/fixed-features: (22 commits) : . : Add the pKVM fixed feature that allows a bunch of exceptions : to either be forbidden or be easily handled at EL2. : . KVM: arm64: pkvm: Give priority to standard traps over pvm handling KVM: arm64: pkvm: Pass vpcu instead of kvm to kvm_get_exit_handler_array() KVM: arm64: pkvm: Move kvm_handle_pvm_restricted around KVM: arm64: pkvm: Consolidate include files KVM: arm64: pkvm: Preserve pending SError on exit from AArch32 KVM: arm64: pkvm: Handle GICv3 traps as required KVM: arm64: pkvm: Drop sysregs that should never be routed to the host KVM: arm64: pkvm: Drop AArch32-specific registers KVM: arm64: pkvm: Make the ERR/ERX*_EL1 registers RAZ/WI KVM: arm64: pkvm: Use a single function to expose all id-regs KVM: arm64: Fix early exit ptrauth handling KVM: arm64: Handle protected guests at 32 bits KVM: arm64: Trap access to pVM restricted features KVM: arm64: Move sanitized copies of CPU features KVM: arm64: Initialize trap registers for protected VMs KVM: arm64: Add handlers for protected VM System Registers KVM: arm64: Simplify masking out MTE in feature id reg KVM: arm64: Add missing field descriptor for MDCR_EL2 KVM: arm64: Pass struct kvm to per-EC handlers KVM: arm64: Move early handlers to per-EC handlers ... Signed-off-by: Marc Zyngier <[email protected]>
2 parents 5f8b259 + 0730559 commit be08c3c

File tree

18 files changed

+1200
-155
lines changed

18 files changed

+1200
-155
lines changed

arch/arm64/include/asm/kvm_arm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -295,6 +295,7 @@
295295
#define MDCR_EL2_HPMFZO (UL(1) << 29)
296296
#define MDCR_EL2_MTPME (UL(1) << 28)
297297
#define MDCR_EL2_TDCC (UL(1) << 27)
298+
#define MDCR_EL2_HLP (UL(1) << 26)
298299
#define MDCR_EL2_HCCD (UL(1) << 23)
299300
#define MDCR_EL2_TTRF (UL(1) << 19)
300301
#define MDCR_EL2_HPMD (UL(1) << 17)

arch/arm64/include/asm/kvm_asm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,7 @@ enum __kvm_host_smccc_func {
7474
__KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr,
7575
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
7676
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs,
77+
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps,
7778
};
7879

7980
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]

arch/arm64/include/asm/kvm_host.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -781,6 +781,8 @@ static inline bool kvm_vm_is_protected(struct kvm *kvm)
781781
return false;
782782
}
783783

784+
void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
785+
784786
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
785787
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
786788

arch/arm64/include/asm/kvm_hyp.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,12 @@ int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
115115
void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
116116
#endif
117117

118+
extern u64 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val);
119+
extern u64 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val);
120+
extern u64 kvm_nvhe_sym(id_aa64isar0_el1_sys_val);
121+
extern u64 kvm_nvhe_sym(id_aa64isar1_el1_sys_val);
118122
extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val);
119123
extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
124+
extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val);
120125

121126
#endif /* __ARM64_KVM_HYP_H__ */

arch/arm64/kvm/arm.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -622,6 +622,14 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
622622

623623
ret = kvm_arm_pmu_v3_enable(vcpu);
624624

625+
/*
626+
* Initialize traps for protected VMs.
627+
* NOTE: Move to run in EL2 directly, rather than via a hypercall, once
628+
* the code is in place for first run initialization at EL2.
629+
*/
630+
if (kvm_vm_is_protected(kvm))
631+
kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
632+
625633
return ret;
626634
}
627635

@@ -1819,8 +1827,13 @@ static int kvm_hyp_init_protection(u32 hyp_va_bits)
18191827
void *addr = phys_to_virt(hyp_mem_base);
18201828
int ret;
18211829

1830+
kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1831+
kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
1832+
kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
1833+
kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
18221834
kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
18231835
kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
1836+
kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
18241837

18251838
ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP);
18261839
if (ret)
Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
/*
3+
* Copyright (C) 2015 - ARM Ltd
4+
* Author: Marc Zyngier <[email protected]>
5+
*/
6+
7+
#ifndef __ARM64_KVM_HYP_FAULT_H__
8+
#define __ARM64_KVM_HYP_FAULT_H__
9+
10+
#include <asm/kvm_asm.h>
11+
#include <asm/kvm_emulate.h>
12+
#include <asm/kvm_hyp.h>
13+
#include <asm/kvm_mmu.h>
14+
15+
static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
16+
{
17+
u64 par, tmp;
18+
19+
/*
20+
* Resolve the IPA the hard way using the guest VA.
21+
*
22+
* Stage-1 translation already validated the memory access
23+
* rights. As such, we can use the EL1 translation regime, and
24+
* don't have to distinguish between EL0 and EL1 access.
25+
*
26+
* We do need to save/restore PAR_EL1 though, as we haven't
27+
* saved the guest context yet, and we may return early...
28+
*/
29+
par = read_sysreg_par();
30+
if (!__kvm_at("s1e1r", far))
31+
tmp = read_sysreg_par();
32+
else
33+
tmp = SYS_PAR_EL1_F; /* back to the guest */
34+
write_sysreg(par, par_el1);
35+
36+
if (unlikely(tmp & SYS_PAR_EL1_F))
37+
return false; /* Translation failed, back to guest */
38+
39+
/* Convert PAR to HPFAR format */
40+
*hpfar = PAR_TO_HPFAR(tmp);
41+
return true;
42+
}
43+
44+
static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
45+
{
46+
u64 hpfar, far;
47+
48+
far = read_sysreg_el2(SYS_FAR);
49+
50+
/*
51+
* The HPFAR can be invalid if the stage 2 fault did not
52+
* happen during a stage 1 page table walk (the ESR_EL2.S1PTW
53+
* bit is clear) and one of the two following cases are true:
54+
* 1. The fault was due to a permission fault
55+
* 2. The processor carries errata 834220
56+
*
57+
* Therefore, for all non S1PTW faults where we either have a
58+
* permission fault or the errata workaround is enabled, we
59+
* resolve the IPA using the AT instruction.
60+
*/
61+
if (!(esr & ESR_ELx_S1PTW) &&
62+
(cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
63+
(esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
64+
if (!__translate_far_to_hpfar(far, &hpfar))
65+
return false;
66+
} else {
67+
hpfar = read_sysreg(hpfar_el2);
68+
}
69+
70+
fault->far_el2 = far;
71+
fault->hpfar_el2 = hpfar;
72+
return true;
73+
}
74+
75+
#endif

0 commit comments

Comments
 (0)