Skip to content

Commit e880b16

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/pkvm-fixed-features-6.14 into kvmarm-master/next
* kvm-arm64/pkvm-fixed-features-6.14: (24 commits) : . : Complete rework of the pKVM handling of features, catching up : with the rest of the code deals with it these days. : Patches courtesy of Fuad Tabba. From the cover letter: : : "This patch series uses the vm's feature id registers to track the : supported features, a framework similar to nested virt to set the : trap values, and removes the need to store cptr_el2 per vcpu in : favor of setting its value when traps are activated, as VHE mode : does." : : This branch drags the arm64/for-next/cpufeature branch to solve : ugly conflicts in -next. : . KVM: arm64: Fix FEAT_MTE in pKVM KVM: arm64: Use kvm_vcpu_has_feature() directly for struct kvm KVM: arm64: Convert the SVE guest vcpu flag to a vm flag KVM: arm64: Remove PtrAuth guest vcpu flag KVM: arm64: Fix the value of the CPTR_EL2 RES1 bitmask for nVHE KVM: arm64: Refactor kvm_reset_cptr_el2() KVM: arm64: Calculate cptr_el2 traps on activating traps KVM: arm64: Remove redundant setting of HCR_EL2 trap bit KVM: arm64: Remove fixed_config.h header KVM: arm64: Rework specifying restricted features for protected VMs KVM: arm64: Set protected VM traps based on its view of feature registers KVM: arm64: Fix RAS trapping in pKVM for protected VMs KVM: arm64: Initialize feature id registers for protected VMs KVM: arm64: Use KVM extension checks for allowed protected VM capabilities KVM: arm64: Remove KVM_ARM_VCPU_POWER_OFF from protected VMs allowed features in pKVM KVM: arm64: Move checking protected vcpu features to a separate function KVM: arm64: Group setting traps for protected VMs by control register KVM: arm64: Consolidate allowed and restricted VM feature checks arm64/sysreg: Get rid of CPACR_ELx SysregFields arm64/sysreg: Convert *_EL12 accessors to Mapping ... Signed-off-by: Marc Zyngier <[email protected]> # Conflicts: # arch/arm64/kvm/fpsimd.c # arch/arm64/kvm/hyp/nvhe/pkvm.c
2 parents d067012 + 4e26de2 commit e880b16

27 files changed

+533
-789
lines changed

arch/arm64/include/asm/cpucaps.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,8 @@ cpucap_is_possible(const unsigned int cap)
4646
return IS_ENABLED(CONFIG_ARM64_POE);
4747
case ARM64_HAS_GCS:
4848
return IS_ENABLED(CONFIG_ARM64_GCS);
49+
case ARM64_HAFT:
50+
return IS_ENABLED(CONFIG_ARM64_HAFT);
4951
case ARM64_UNMAP_KERNEL_AT_EL0:
5052
return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0);
5153
case ARM64_WORKAROUND_843419:

arch/arm64/include/asm/cpufeature.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -852,8 +852,7 @@ static inline bool system_supports_gcs(void)
852852

853853
static inline bool system_supports_haft(void)
854854
{
855-
return IS_ENABLED(CONFIG_ARM64_HAFT) &&
856-
cpus_have_final_cap(ARM64_HAFT);
855+
return cpus_have_final_cap(ARM64_HAFT);
857856
}
858857

859858
static __always_inline bool system_supports_mpam(void)

arch/arm64/include/asm/el2_setup.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@
154154
/* Coprocessor traps */
155155
.macro __init_el2_cptr
156156
__check_hvhe .LnVHE_\@, x1
157-
mov x0, #CPACR_ELx_FPEN
157+
mov x0, #CPACR_EL1_FPEN
158158
msr cpacr_el1, x0
159159
b .Lskip_set_cptr_\@
160160
.LnVHE_\@:
@@ -332,7 +332,7 @@
332332

333333
// (h)VHE case
334334
mrs x0, cpacr_el1 // Disable SVE traps
335-
orr x0, x0, #CPACR_ELx_ZEN
335+
orr x0, x0, #CPACR_EL1_ZEN
336336
msr cpacr_el1, x0
337337
b .Lskip_set_cptr_\@
338338

@@ -353,7 +353,7 @@
353353

354354
// (h)VHE case
355355
mrs x0, cpacr_el1 // Disable SME traps
356-
orr x0, x0, #CPACR_ELx_SMEN
356+
orr x0, x0, #CPACR_EL1_SMEN
357357
msr cpacr_el1, x0
358358
b .Lskip_set_cptr_sme_\@
359359

arch/arm64/include/asm/kvm_arm.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,7 @@
300300
#define CPTR_EL2_TSM (1 << 12)
301301
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
302302
#define CPTR_EL2_TZ (1 << 8)
303-
#define CPTR_NVHE_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */
303+
#define CPTR_NVHE_EL2_RES1 (BIT(13) | BIT(9) | GENMASK(7, 0))
304304
#define CPTR_NVHE_EL2_RES0 (GENMASK(63, 32) | \
305305
GENMASK(29, 21) | \
306306
GENMASK(19, 14) | \
@@ -391,8 +391,6 @@
391391
ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
392392
ECN(BKPT32), ECN(VECTOR32), ECN(BRK64), ECN(ERET)
393393

394-
#define CPACR_EL1_TTA (1 << 28)
395-
396394
#define kvm_mode_names \
397395
{ PSR_MODE_EL0t, "EL0t" }, \
398396
{ PSR_MODE_EL1t, "EL1t" }, \

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 32 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -556,13 +556,13 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
556556
({ \
557557
u64 cptr = 0; \
558558
\
559-
if ((set) & CPACR_ELx_FPEN) \
559+
if ((set) & CPACR_EL1_FPEN) \
560560
cptr |= CPTR_EL2_TFP; \
561-
if ((set) & CPACR_ELx_ZEN) \
561+
if ((set) & CPACR_EL1_ZEN) \
562562
cptr |= CPTR_EL2_TZ; \
563-
if ((set) & CPACR_ELx_SMEN) \
563+
if ((set) & CPACR_EL1_SMEN) \
564564
cptr |= CPTR_EL2_TSM; \
565-
if ((clr) & CPACR_ELx_TTA) \
565+
if ((clr) & CPACR_EL1_TTA) \
566566
cptr |= CPTR_EL2_TTA; \
567567
if ((clr) & CPTR_EL2_TAM) \
568568
cptr |= CPTR_EL2_TAM; \
@@ -576,13 +576,13 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
576576
({ \
577577
u64 cptr = 0; \
578578
\
579-
if ((clr) & CPACR_ELx_FPEN) \
579+
if ((clr) & CPACR_EL1_FPEN) \
580580
cptr |= CPTR_EL2_TFP; \
581-
if ((clr) & CPACR_ELx_ZEN) \
581+
if ((clr) & CPACR_EL1_ZEN) \
582582
cptr |= CPTR_EL2_TZ; \
583-
if ((clr) & CPACR_ELx_SMEN) \
583+
if ((clr) & CPACR_EL1_SMEN) \
584584
cptr |= CPTR_EL2_TSM; \
585-
if ((set) & CPACR_ELx_TTA) \
585+
if ((set) & CPACR_EL1_TTA) \
586586
cptr |= CPTR_EL2_TTA; \
587587
if ((set) & CPTR_EL2_TAM) \
588588
cptr |= CPTR_EL2_TAM; \
@@ -595,13 +595,13 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
595595
#define cpacr_clear_set(clr, set) \
596596
do { \
597597
BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0); \
598-
BUILD_BUG_ON((clr) & CPACR_ELx_E0POE); \
599-
__build_check_all_or_none((clr), CPACR_ELx_FPEN); \
600-
__build_check_all_or_none((set), CPACR_ELx_FPEN); \
601-
__build_check_all_or_none((clr), CPACR_ELx_ZEN); \
602-
__build_check_all_or_none((set), CPACR_ELx_ZEN); \
603-
__build_check_all_or_none((clr), CPACR_ELx_SMEN); \
604-
__build_check_all_or_none((set), CPACR_ELx_SMEN); \
598+
BUILD_BUG_ON((clr) & CPACR_EL1_E0POE); \
599+
__build_check_all_or_none((clr), CPACR_EL1_FPEN); \
600+
__build_check_all_or_none((set), CPACR_EL1_FPEN); \
601+
__build_check_all_or_none((clr), CPACR_EL1_ZEN); \
602+
__build_check_all_or_none((set), CPACR_EL1_ZEN); \
603+
__build_check_all_or_none((clr), CPACR_EL1_SMEN); \
604+
__build_check_all_or_none((set), CPACR_EL1_SMEN); \
605605
\
606606
if (has_vhe() || has_hvhe()) \
607607
sysreg_clear_set(cpacr_el1, clr, set); \
@@ -619,40 +619,40 @@ static __always_inline void kvm_write_cptr_el2(u64 val)
619619
write_sysreg(val, cptr_el2);
620620
}
621621

622-
static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
622+
/* Resets the value of cptr_el2 when returning to the host. */
623+
static __always_inline void __kvm_reset_cptr_el2(struct kvm *kvm)
623624
{
624625
u64 val;
625626

626627
if (has_vhe()) {
627-
val = (CPACR_ELx_FPEN | CPACR_EL1_ZEN_EL1EN);
628+
val = (CPACR_EL1_FPEN | CPACR_EL1_ZEN_EL1EN);
628629
if (cpus_have_final_cap(ARM64_SME))
629630
val |= CPACR_EL1_SMEN_EL1EN;
630631
} else if (has_hvhe()) {
631-
val = CPACR_ELx_FPEN;
632+
val = CPACR_EL1_FPEN;
632633

633-
if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
634-
val |= CPACR_ELx_ZEN;
634+
if (!kvm_has_sve(kvm) || !guest_owns_fp_regs())
635+
val |= CPACR_EL1_ZEN;
635636
if (cpus_have_final_cap(ARM64_SME))
636-
val |= CPACR_ELx_SMEN;
637+
val |= CPACR_EL1_SMEN;
637638
} else {
638639
val = CPTR_NVHE_EL2_RES1;
639640

640-
if (vcpu_has_sve(vcpu) && guest_owns_fp_regs())
641+
if (kvm_has_sve(kvm) && guest_owns_fp_regs())
641642
val |= CPTR_EL2_TZ;
642-
if (cpus_have_final_cap(ARM64_SME))
643-
val &= ~CPTR_EL2_TSM;
643+
if (!cpus_have_final_cap(ARM64_SME))
644+
val |= CPTR_EL2_TSM;
644645
}
645646

646-
return val;
647-
}
648-
649-
static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
650-
{
651-
u64 val = kvm_get_reset_cptr_el2(vcpu);
652-
653647
kvm_write_cptr_el2(val);
654648
}
655649

650+
#ifdef __KVM_NVHE_HYPERVISOR__
651+
#define kvm_reset_cptr_el2(v) __kvm_reset_cptr_el2(kern_hyp_va((v)->kvm))
652+
#else
653+
#define kvm_reset_cptr_el2(v) __kvm_reset_cptr_el2((v)->kvm)
654+
#endif
655+
656656
/*
657657
* Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
658658
* format if E2H isn't set.
@@ -685,7 +685,7 @@ static inline bool ____cptr_xen_trap_enabled(const struct kvm_vcpu *vcpu,
685685
#define __guest_hyp_cptr_xen_trap_enabled(vcpu, xen) \
686686
(!vcpu_has_nv(vcpu) ? false : \
687687
____cptr_xen_trap_enabled(vcpu, \
688-
SYS_FIELD_GET(CPACR_ELx, xen, \
688+
SYS_FIELD_GET(CPACR_EL1, xen, \
689689
vcpu_sanitised_cptr_el2(vcpu))))
690690

691691
static inline bool guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu *vcpu)
@@ -697,9 +697,4 @@ static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu *vcpu)
697697
{
698698
return __guest_hyp_cptr_xen_trap_enabled(vcpu, ZEN);
699699
}
700-
701-
static inline void kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
702-
{
703-
vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
704-
}
705700
#endif /* __ARM64_KVM_EMULATE_H__ */

arch/arm64/include/asm/kvm_host.h

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -332,6 +332,8 @@ struct kvm_arch {
332332
#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7
333333
/* Fine-Grained UNDEF initialised */
334334
#define KVM_ARCH_FLAG_FGU_INITIALIZED 8
335+
/* SVE exposed to guest */
336+
#define KVM_ARCH_FLAG_GUEST_HAS_SVE 9
335337
unsigned long flags;
336338

337339
/* VM-wide vCPU feature set */
@@ -722,7 +724,6 @@ struct kvm_vcpu_arch {
722724
u64 hcr_el2;
723725
u64 hcrx_el2;
724726
u64 mdcr_el2;
725-
u64 cptr_el2;
726727

727728
/* Exception Information */
728729
struct kvm_vcpu_fault_info fault;
@@ -871,14 +872,10 @@ struct kvm_vcpu_arch {
871872
#define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
872873
#define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
873874

874-
/* SVE exposed to guest */
875-
#define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0))
875+
/* KVM_ARM_VCPU_INIT completed */
876+
#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(0))
876877
/* SVE config completed */
877878
#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
878-
/* PTRAUTH exposed to guest */
879-
#define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2))
880-
/* KVM_ARM_VCPU_INIT completed */
881-
#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(3))
882879

883880
/* Exception pending */
884881
#define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
@@ -959,14 +956,21 @@ struct kvm_vcpu_arch {
959956
KVM_GUESTDBG_USE_HW | \
960957
KVM_GUESTDBG_SINGLESTEP)
961958

962-
#define vcpu_has_sve(vcpu) (system_supports_sve() && \
963-
vcpu_get_flag(vcpu, GUEST_HAS_SVE))
959+
#define kvm_has_sve(kvm) (system_supports_sve() && \
960+
test_bit(KVM_ARCH_FLAG_GUEST_HAS_SVE, &(kvm)->arch.flags))
961+
962+
#ifdef __KVM_NVHE_HYPERVISOR__
963+
#define vcpu_has_sve(vcpu) kvm_has_sve(kern_hyp_va((vcpu)->kvm))
964+
#else
965+
#define vcpu_has_sve(vcpu) kvm_has_sve((vcpu)->kvm)
966+
#endif
964967

965968
#ifdef CONFIG_ARM64_PTR_AUTH
966969
#define vcpu_has_ptrauth(vcpu) \
967970
((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
968971
cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
969-
vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH))
972+
(vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) || \
973+
vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
970974
#else
971975
#define vcpu_has_ptrauth(vcpu) false
972976
#endif
@@ -1432,6 +1436,7 @@ static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature)
14321436
return test_bit(feature, ka->vcpu_features);
14331437
}
14341438

1439+
#define kvm_vcpu_has_feature(k, f) __vcpu_has_feature(&(k)->arch, (f))
14351440
#define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f))
14361441

14371442
#define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED)

arch/arm64/include/asm/kvm_nested.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,14 +33,14 @@ static inline u64 translate_tcr_el2_to_tcr_el1(u64 tcr)
3333

3434
static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2)
3535
{
36-
u64 cpacr_el1 = CPACR_ELx_RES1;
36+
u64 cpacr_el1 = CPACR_EL1_RES1;
3737

3838
if (cptr_el2 & CPTR_EL2_TTA)
39-
cpacr_el1 |= CPACR_ELx_TTA;
39+
cpacr_el1 |= CPACR_EL1_TTA;
4040
if (!(cptr_el2 & CPTR_EL2_TFP))
41-
cpacr_el1 |= CPACR_ELx_FPEN;
41+
cpacr_el1 |= CPACR_EL1_FPEN;
4242
if (!(cptr_el2 & CPTR_EL2_TZ))
43-
cpacr_el1 |= CPACR_ELx_ZEN;
43+
cpacr_el1 |= CPACR_EL1_ZEN;
4444

4545
cpacr_el1 |= cptr_el2 & (CPTR_EL2_TCPAC | CPTR_EL2_TAM);
4646

arch/arm64/include/asm/kvm_pkvm.h

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,31 @@ int pkvm_init_host_vm(struct kvm *kvm);
2020
int pkvm_create_hyp_vm(struct kvm *kvm);
2121
void pkvm_destroy_hyp_vm(struct kvm *kvm);
2222

23+
/*
24+
* This functions as an allow-list of protected VM capabilities.
25+
* Features not explicitly allowed by this function are denied.
26+
*/
27+
static inline bool kvm_pvm_ext_allowed(long ext)
28+
{
29+
switch (ext) {
30+
case KVM_CAP_IRQCHIP:
31+
case KVM_CAP_ARM_PSCI:
32+
case KVM_CAP_ARM_PSCI_0_2:
33+
case KVM_CAP_NR_VCPUS:
34+
case KVM_CAP_MAX_VCPUS:
35+
case KVM_CAP_MAX_VCPU_ID:
36+
case KVM_CAP_MSI_DEVID:
37+
case KVM_CAP_ARM_VM_IPA_SIZE:
38+
case KVM_CAP_ARM_PMU_V3:
39+
case KVM_CAP_ARM_SVE:
40+
case KVM_CAP_ARM_PTRAUTH_ADDRESS:
41+
case KVM_CAP_ARM_PTRAUTH_GENERIC:
42+
return true;
43+
default:
44+
return false;
45+
}
46+
}
47+
2348
extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
2449
extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
2550

arch/arm64/kernel/cpufeature.c

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1004,17 +1004,16 @@ static void init_cpu_ftr_reg(u32 sys_reg, u64 new)
10041004
/* Override was valid */
10051005
ftr_new = tmp;
10061006
str = "forced";
1007-
} else if (ftr_ovr == tmp) {
1007+
} else {
10081008
/* Override was the safe value */
10091009
str = "already set";
10101010
}
10111011

1012-
if (str)
1013-
pr_warn("%s[%d:%d]: %s to %llx\n",
1014-
reg->name,
1015-
ftrp->shift + ftrp->width - 1,
1016-
ftrp->shift, str,
1017-
tmp & (BIT(ftrp->width) - 1));
1012+
pr_warn("%s[%d:%d]: %s to %llx\n",
1013+
reg->name,
1014+
ftrp->shift + ftrp->width - 1,
1015+
ftrp->shift, str,
1016+
tmp & (BIT(ftrp->width) - 1));
10181017
} else if ((ftr_mask & reg->override->val) == ftr_mask) {
10191018
reg->override->val &= ~ftr_mask;
10201019
pr_warn("%s[%d:%d]: impossible override, ignored\n",
@@ -2376,8 +2375,8 @@ static void cpu_enable_mops(const struct arm64_cpu_capabilities *__unused)
23762375
#ifdef CONFIG_ARM64_POE
23772376
static void cpu_enable_poe(const struct arm64_cpu_capabilities *__unused)
23782377
{
2379-
sysreg_clear_set(REG_TCR2_EL1, 0, TCR2_EL1x_E0POE);
2380-
sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_E0POE);
2378+
sysreg_clear_set(REG_TCR2_EL1, 0, TCR2_EL1_E0POE);
2379+
sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_E0POE);
23812380
}
23822381
#endif
23832382

0 commit comments

Comments
 (0)