Skip to content

Commit 377d0e5

Browse files
committed
Merge branch kvm-arm64/ctr-el0 into kvmarm/next
* kvm-arm64/ctr-el0: : Support for user changes to CTR_EL0, courtesy of Sebastian Ott : : Allow userspace to change the guest-visible value of CTR_EL0 for a VM, : so long as the requested value represents a subset of features supported : by hardware. In other words, prevent the VMM from over-promising the : capabilities of hardware. : : Make this happen by fitting CTR_EL0 into the existing infrastructure for : feature ID registers. KVM: selftests: Assert that MPIDR_EL1 is unchanged across vCPU reset KVM: arm64: nv: Unfudge ID_AA64PFR0_EL1 masking KVM: selftests: arm64: Test writes to CTR_EL0 KVM: arm64: rename functions for invariant sys regs KVM: arm64: show writable masks for feature registers KVM: arm64: Treat CTR_EL0 as a VM feature ID register KVM: arm64: unify code to prepare traps KVM: arm64: nv: Use accessors for modifying ID registers KVM: arm64: Add helper for writing ID regs KVM: arm64: Use read-only helper for reading VM ID registers KVM: arm64: Make idregs debugfs iterator search sysreg table directly KVM: arm64: Get sys_reg encoding from descriptor in idregs_debug_show() Signed-off-by: Oliver Upton <[email protected]>
2 parents 435a9f6 + b053966 commit 377d0e5

File tree

7 files changed

+264
-221
lines changed

7 files changed

+264
-221
lines changed

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 9 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -69,39 +69,17 @@ static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
6969

7070
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
7171
{
72-
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
73-
if (has_vhe() || has_hvhe())
74-
vcpu->arch.hcr_el2 |= HCR_E2H;
75-
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
76-
/* route synchronous external abort exceptions to EL2 */
77-
vcpu->arch.hcr_el2 |= HCR_TEA;
78-
/* trap error record accesses */
79-
vcpu->arch.hcr_el2 |= HCR_TERR;
80-
}
72+
if (!vcpu_has_run_once(vcpu))
73+
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
8174

82-
if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) {
83-
vcpu->arch.hcr_el2 |= HCR_FWB;
84-
} else {
85-
/*
86-
* For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
87-
* get set in SCTLR_EL1 such that we can detect when the guest
88-
* MMU gets turned on and do the necessary cache maintenance
89-
* then.
90-
*/
75+
/*
76+
* For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
77+
* get set in SCTLR_EL1 such that we can detect when the guest
78+
* MMU gets turned on and do the necessary cache maintenance
79+
* then.
80+
*/
81+
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
9182
vcpu->arch.hcr_el2 |= HCR_TVM;
92-
}
93-
94-
if (cpus_have_final_cap(ARM64_HAS_EVT) &&
95-
!cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
96-
vcpu->arch.hcr_el2 |= HCR_TID4;
97-
else
98-
vcpu->arch.hcr_el2 |= HCR_TID2;
99-
100-
if (vcpu_el1_is_32bit(vcpu))
101-
vcpu->arch.hcr_el2 &= ~HCR_RW;
102-
103-
if (kvm_has_mte(vcpu->kvm))
104-
vcpu->arch.hcr_el2 |= HCR_ATA;
10583
}
10684

10785
static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)

arch/arm64/include/asm/kvm_host.h

Lines changed: 22 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -362,11 +362,11 @@ struct kvm_arch {
362362
* Atomic access to multiple idregs are guarded by kvm_arch.config_lock.
363363
*/
364364
#define IDREG_IDX(id) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id))
365-
#define IDX_IDREG(idx) sys_reg(3, 0, 0, ((idx) >> 3) + 1, (idx) & Op2_mask)
366-
#define IDREG(kvm, id) ((kvm)->arch.id_regs[IDREG_IDX(id)])
367365
#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
368366
u64 id_regs[KVM_ARM_ID_REG_NUM];
369367

368+
u64 ctr_el0;
369+
370370
/* Masks for VNCR-baked sysregs */
371371
struct kvm_sysreg_masks *sysreg_masks;
372372

@@ -1180,7 +1180,7 @@ int __init populate_nv_trap_config(void);
11801180
bool lock_all_vcpus(struct kvm *kvm);
11811181
void unlock_all_vcpus(struct kvm *kvm);
11821182

1183-
void kvm_init_sysreg(struct kvm_vcpu *);
1183+
void kvm_calculate_traps(struct kvm_vcpu *vcpu);
11841184

11851185
/* MMIO helpers */
11861186
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
@@ -1391,6 +1391,24 @@ static inline void kvm_hyp_reserve(void) { }
13911391
void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
13921392
bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
13931393

1394+
static inline u64 *__vm_id_reg(struct kvm_arch *ka, u32 reg)
1395+
{
1396+
switch (reg) {
1397+
case sys_reg(3, 0, 0, 1, 0) ... sys_reg(3, 0, 0, 7, 7):
1398+
return &ka->id_regs[IDREG_IDX(reg)];
1399+
case SYS_CTR_EL0:
1400+
return &ka->ctr_el0;
1401+
default:
1402+
WARN_ON_ONCE(1);
1403+
return NULL;
1404+
}
1405+
}
1406+
1407+
#define kvm_read_vm_id_reg(kvm, reg) \
1408+
({ u64 __val = *__vm_id_reg(&(kvm)->arch, reg); __val; })
1409+
1410+
void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
1411+
13941412
#define __expand_field_sign_unsigned(id, fld, val) \
13951413
((u64)SYS_FIELD_VALUE(id, fld, val))
13961414

@@ -1407,7 +1425,7 @@ bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
14071425

14081426
#define get_idreg_field_unsigned(kvm, id, fld) \
14091427
({ \
1410-
u64 __val = IDREG((kvm), SYS_##id); \
1428+
u64 __val = kvm_read_vm_id_reg((kvm), SYS_##id); \
14111429
FIELD_GET(id##_##fld##_MASK, __val); \
14121430
})
14131431

arch/arm64/kvm/arm.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -836,7 +836,7 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
836836
* This needs to happen after NV has imposed its own restrictions on
837837
* the feature set
838838
*/
839-
kvm_init_sysreg(vcpu);
839+
kvm_calculate_traps(vcpu);
840840

841841
ret = kvm_timer_enable(vcpu);
842842
if (ret)

arch/arm64/kvm/nested.c

Lines changed: 123 additions & 135 deletions
Original file line numberDiff line numberDiff line change
@@ -799,142 +799,132 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
799799
* This list should get updated as new features get added to the NV
800800
* support, and new extension to the architecture.
801801
*/
802-
static u64 limit_nv_id_reg(u32 id, u64 val)
802+
static void limit_nv_id_regs(struct kvm *kvm)
803803
{
804-
u64 tmp;
805-
806-
switch (id) {
807-
case SYS_ID_AA64ISAR0_EL1:
808-
/* Support everything but TME */
809-
val &= ~NV_FTR(ISAR0, TME);
810-
break;
811-
812-
case SYS_ID_AA64ISAR1_EL1:
813-
/* Support everything but Spec Invalidation and LS64 */
814-
val &= ~(NV_FTR(ISAR1, LS64) |
815-
NV_FTR(ISAR1, SPECRES));
816-
break;
817-
818-
case SYS_ID_AA64PFR0_EL1:
819-
/* No AMU, MPAM, S-EL2, RAS or SVE */
820-
val &= ~(GENMASK_ULL(55, 52) |
821-
NV_FTR(PFR0, AMU) |
822-
NV_FTR(PFR0, MPAM) |
823-
NV_FTR(PFR0, SEL2) |
824-
NV_FTR(PFR0, RAS) |
825-
NV_FTR(PFR0, SVE) |
826-
NV_FTR(PFR0, EL3) |
827-
NV_FTR(PFR0, EL2) |
828-
NV_FTR(PFR0, EL1));
829-
/* 64bit EL1/EL2/EL3 only */
830-
val |= FIELD_PREP(NV_FTR(PFR0, EL1), 0b0001);
831-
val |= FIELD_PREP(NV_FTR(PFR0, EL2), 0b0001);
832-
val |= FIELD_PREP(NV_FTR(PFR0, EL3), 0b0001);
833-
break;
834-
835-
case SYS_ID_AA64PFR1_EL1:
836-
/* Only support BTI, SSBS, CSV2_frac */
837-
val &= (NV_FTR(PFR1, BT) |
838-
NV_FTR(PFR1, SSBS) |
839-
NV_FTR(PFR1, CSV2_frac));
840-
break;
841-
842-
case SYS_ID_AA64MMFR0_EL1:
843-
/* Hide ECV, ExS, Secure Memory */
844-
val &= ~(NV_FTR(MMFR0, ECV) |
845-
NV_FTR(MMFR0, EXS) |
846-
NV_FTR(MMFR0, TGRAN4_2) |
847-
NV_FTR(MMFR0, TGRAN16_2) |
848-
NV_FTR(MMFR0, TGRAN64_2) |
849-
NV_FTR(MMFR0, SNSMEM));
850-
851-
/* Disallow unsupported S2 page sizes */
852-
switch (PAGE_SIZE) {
853-
case SZ_64K:
854-
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN16_2), 0b0001);
855-
fallthrough;
856-
case SZ_16K:
857-
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN4_2), 0b0001);
858-
fallthrough;
859-
case SZ_4K:
860-
/* Support everything */
861-
break;
862-
}
863-
/*
864-
* Since we can't support a guest S2 page size smaller than
865-
* the host's own page size (due to KVM only populating its
866-
* own S2 using the kernel's page size), advertise the
867-
* limitation using FEAT_GTG.
868-
*/
869-
switch (PAGE_SIZE) {
870-
case SZ_4K:
871-
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN4_2), 0b0010);
872-
fallthrough;
873-
case SZ_16K:
874-
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN16_2), 0b0010);
875-
fallthrough;
876-
case SZ_64K:
877-
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN64_2), 0b0010);
878-
break;
879-
}
880-
/* Cap PARange to 48bits */
881-
tmp = FIELD_GET(NV_FTR(MMFR0, PARANGE), val);
882-
if (tmp > 0b0101) {
883-
val &= ~NV_FTR(MMFR0, PARANGE);
884-
val |= FIELD_PREP(NV_FTR(MMFR0, PARANGE), 0b0101);
885-
}
886-
break;
887-
888-
case SYS_ID_AA64MMFR1_EL1:
889-
val &= (NV_FTR(MMFR1, HCX) |
890-
NV_FTR(MMFR1, PAN) |
891-
NV_FTR(MMFR1, LO) |
892-
NV_FTR(MMFR1, HPDS) |
893-
NV_FTR(MMFR1, VH) |
894-
NV_FTR(MMFR1, VMIDBits));
895-
break;
896-
897-
case SYS_ID_AA64MMFR2_EL1:
898-
val &= ~(NV_FTR(MMFR2, BBM) |
899-
NV_FTR(MMFR2, TTL) |
900-
GENMASK_ULL(47, 44) |
901-
NV_FTR(MMFR2, ST) |
902-
NV_FTR(MMFR2, CCIDX) |
903-
NV_FTR(MMFR2, VARange));
904-
905-
/* Force TTL support */
906-
val |= FIELD_PREP(NV_FTR(MMFR2, TTL), 0b0001);
907-
break;
908-
909-
case SYS_ID_AA64MMFR4_EL1:
910-
val = 0;
911-
if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
912-
val |= FIELD_PREP(NV_FTR(MMFR4, E2H0),
913-
ID_AA64MMFR4_EL1_E2H0_NI_NV1);
914-
break;
915-
916-
case SYS_ID_AA64DFR0_EL1:
917-
/* Only limited support for PMU, Debug, BPs and WPs */
918-
val &= (NV_FTR(DFR0, PMUVer) |
919-
NV_FTR(DFR0, WRPs) |
920-
NV_FTR(DFR0, BRPs) |
921-
NV_FTR(DFR0, DebugVer));
922-
923-
/* Cap Debug to ARMv8.1 */
924-
tmp = FIELD_GET(NV_FTR(DFR0, DebugVer), val);
925-
if (tmp > 0b0111) {
926-
val &= ~NV_FTR(DFR0, DebugVer);
927-
val |= FIELD_PREP(NV_FTR(DFR0, DebugVer), 0b0111);
928-
}
804+
u64 val, tmp;
805+
806+
/* Support everything but TME */
807+
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64ISAR0_EL1);
808+
val &= ~NV_FTR(ISAR0, TME);
809+
kvm_set_vm_id_reg(kvm, SYS_ID_AA64ISAR0_EL1, val);
810+
811+
/* Support everything but Spec Invalidation and LS64 */
812+
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64ISAR1_EL1);
813+
val &= ~(NV_FTR(ISAR1, LS64) |
814+
NV_FTR(ISAR1, SPECRES));
815+
kvm_set_vm_id_reg(kvm, SYS_ID_AA64ISAR1_EL1, val);
816+
817+
/* No AMU, MPAM, S-EL2, RAS or SVE */
818+
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1);
819+
val &= ~(GENMASK_ULL(55, 52) |
820+
NV_FTR(PFR0, AMU) |
821+
NV_FTR(PFR0, MPAM) |
822+
NV_FTR(PFR0, SEL2) |
823+
NV_FTR(PFR0, RAS) |
824+
NV_FTR(PFR0, SVE) |
825+
NV_FTR(PFR0, EL3) |
826+
NV_FTR(PFR0, EL2) |
827+
NV_FTR(PFR0, EL1));
828+
/* 64bit EL1/EL2/EL3 only */
829+
val |= FIELD_PREP(NV_FTR(PFR0, EL1), 0b0001);
830+
val |= FIELD_PREP(NV_FTR(PFR0, EL2), 0b0001);
831+
val |= FIELD_PREP(NV_FTR(PFR0, EL3), 0b0001);
832+
kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, val);
833+
834+
/* Only support BTI, SSBS, CSV2_frac */
835+
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR1_EL1);
836+
val &= (NV_FTR(PFR1, BT) |
837+
NV_FTR(PFR1, SSBS) |
838+
NV_FTR(PFR1, CSV2_frac));
839+
kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR1_EL1, val);
840+
841+
/* Hide ECV, ExS, Secure Memory */
842+
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64MMFR0_EL1);
843+
val &= ~(NV_FTR(MMFR0, ECV) |
844+
NV_FTR(MMFR0, EXS) |
845+
NV_FTR(MMFR0, TGRAN4_2) |
846+
NV_FTR(MMFR0, TGRAN16_2) |
847+
NV_FTR(MMFR0, TGRAN64_2) |
848+
NV_FTR(MMFR0, SNSMEM));
849+
850+
/* Disallow unsupported S2 page sizes */
851+
switch (PAGE_SIZE) {
852+
case SZ_64K:
853+
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN16_2), 0b0001);
854+
fallthrough;
855+
case SZ_16K:
856+
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN4_2), 0b0001);
857+
fallthrough;
858+
case SZ_4K:
859+
/* Support everything */
929860
break;
930-
931-
default:
932-
/* Unknown register, just wipe it clean */
933-
val = 0;
861+
}
862+
/*
863+
* Since we can't support a guest S2 page size smaller than
864+
* the host's own page size (due to KVM only populating its
865+
* own S2 using the kernel's page size), advertise the
866+
* limitation using FEAT_GTG.
867+
*/
868+
switch (PAGE_SIZE) {
869+
case SZ_4K:
870+
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN4_2), 0b0010);
871+
fallthrough;
872+
case SZ_16K:
873+
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN16_2), 0b0010);
874+
fallthrough;
875+
case SZ_64K:
876+
val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN64_2), 0b0010);
934877
break;
935878
}
936-
937-
return val;
879+
/* Cap PARange to 48bits */
880+
tmp = FIELD_GET(NV_FTR(MMFR0, PARANGE), val);
881+
if (tmp > 0b0101) {
882+
val &= ~NV_FTR(MMFR0, PARANGE);
883+
val |= FIELD_PREP(NV_FTR(MMFR0, PARANGE), 0b0101);
884+
}
885+
kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR0_EL1, val);
886+
887+
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64MMFR1_EL1);
888+
val &= (NV_FTR(MMFR1, HCX) |
889+
NV_FTR(MMFR1, PAN) |
890+
NV_FTR(MMFR1, LO) |
891+
NV_FTR(MMFR1, HPDS) |
892+
NV_FTR(MMFR1, VH) |
893+
NV_FTR(MMFR1, VMIDBits));
894+
kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR1_EL1, val);
895+
896+
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64MMFR2_EL1);
897+
val &= ~(NV_FTR(MMFR2, BBM) |
898+
NV_FTR(MMFR2, TTL) |
899+
GENMASK_ULL(47, 44) |
900+
NV_FTR(MMFR2, ST) |
901+
NV_FTR(MMFR2, CCIDX) |
902+
NV_FTR(MMFR2, VARange));
903+
904+
/* Force TTL support */
905+
val |= FIELD_PREP(NV_FTR(MMFR2, TTL), 0b0001);
906+
kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR2_EL1, val);
907+
908+
val = 0;
909+
if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
910+
val |= FIELD_PREP(NV_FTR(MMFR4, E2H0),
911+
ID_AA64MMFR4_EL1_E2H0_NI_NV1);
912+
kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR4_EL1, val);
913+
914+
/* Only limited support for PMU, Debug, BPs and WPs */
915+
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1);
916+
val &= (NV_FTR(DFR0, PMUVer) |
917+
NV_FTR(DFR0, WRPs) |
918+
NV_FTR(DFR0, BRPs) |
919+
NV_FTR(DFR0, DebugVer));
920+
921+
/* Cap Debug to ARMv8.1 */
922+
tmp = FIELD_GET(NV_FTR(DFR0, DebugVer), val);
923+
if (tmp > 0b0111) {
924+
val &= ~NV_FTR(DFR0, DebugVer);
925+
val |= FIELD_PREP(NV_FTR(DFR0, DebugVer), 0b0111);
926+
}
927+
kvm_set_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1, val);
938928
}
939929

940930
u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr)
@@ -979,9 +969,7 @@ int kvm_init_nv_sysregs(struct kvm *kvm)
979969
goto out;
980970
}
981971

982-
for (int i = 0; i < KVM_ARM_ID_REG_NUM; i++)
983-
kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i),
984-
kvm->arch.id_regs[i]);
972+
limit_nv_id_regs(kvm);
985973

986974
/* VTTBR_EL2 */
987975
res0 = res1 = 0;

arch/arm64/kvm/pmu-emul.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ static u32 __kvm_pmu_event_mask(unsigned int pmuver)
5454

5555
static u32 kvm_pmu_event_mask(struct kvm *kvm)
5656
{
57-
u64 dfr0 = IDREG(kvm, SYS_ID_AA64DFR0_EL1);
57+
u64 dfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1);
5858
u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, dfr0);
5959

6060
return __kvm_pmu_event_mask(pmuver);

0 commit comments

Comments
 (0)