Skip to content

Commit bc512d6

Browse files
committed
KVM: arm64: Make PMEVTYPER<n>_EL0.NSH RES0 if EL2 isn't advertised
The NSH bit, which filters event counting at EL2, is required by the architecture if an implementation has EL2. Even though KVM doesn't support nested virt yet, it makes no effort to hide the existence of EL2 from the ID registers. Userspace can, however, change the value of PFR0 to hide EL2. Align KVM's sysreg emulation with the architecture and make NSH RES0 if EL2 isn't advertised. Keep in mind the bit is ignored when constructing the backing perf event. While at it, build the event type mask using explicit field definitions instead of relying on ARMV8_PMU_EVTYPE_MASK. KVM probably should've been doing this in the first place, as it avoids changes to the aforementioned mask affecting sysreg emulation. Reviewed-by: Suzuki K Poulose <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Oliver Upton <[email protected]>
1 parent 6465e26 commit bc512d6

File tree

3 files changed

+25
-9
lines changed

3 files changed

+25
-9
lines changed

arch/arm64/kvm/pmu-emul.c

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,18 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
6060
return __kvm_pmu_event_mask(pmuver);
6161
}
6262

63+
u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
64+
{
65+
u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 |
66+
kvm_pmu_event_mask(kvm);
67+
u64 pfr0 = IDREG(kvm, SYS_ID_AA64PFR0_EL1);
68+
69+
if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL2, pfr0))
70+
mask |= ARMV8_PMU_INCLUDE_EL2;
71+
72+
return mask;
73+
}
74+
6375
/**
6476
* kvm_pmc_is_64bit - determine if counter is 64bit
6577
* @pmc: counter context
@@ -657,18 +669,13 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
657669
u64 select_idx)
658670
{
659671
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
660-
u64 reg, mask;
672+
u64 reg;
661673

662674
if (!kvm_vcpu_has_pmu(vcpu))
663675
return;
664676

665-
mask = ARMV8_PMU_EVTYPE_MASK;
666-
mask &= ~ARMV8_PMU_EVTYPE_EVENT;
667-
mask |= kvm_pmu_event_mask(vcpu->kvm);
668-
669677
reg = counter_index_to_evtreg(pmc->idx);
670-
671-
__vcpu_sys_reg(vcpu, reg) = data & mask;
678+
__vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm);
672679

673680
kvm_pmu_create_perf_event(pmc);
674681
}

arch/arm64/kvm/sys_regs.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -746,8 +746,12 @@ static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
746746

747747
static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
748748
{
749+
/* This thing will UNDEF, who cares about the reset value? */
750+
if (!kvm_vcpu_has_pmu(vcpu))
751+
return 0;
752+
749753
reset_unknown(vcpu, r);
750-
__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
754+
__vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm);
751755

752756
return __vcpu_sys_reg(vcpu, r->reg);
753757
}
@@ -988,7 +992,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
988992
kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
989993
kvm_vcpu_pmu_restore_guest(vcpu);
990994
} else {
991-
p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
995+
p->regval = __vcpu_sys_reg(vcpu, reg);
992996
}
993997

994998
return true;

include/kvm/arm_pmu.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,7 @@ void kvm_vcpu_pmu_resync_el0(void);
101101
})
102102

103103
u8 kvm_arm_pmu_get_pmuver_limit(void);
104+
u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
104105

105106
#else
106107
struct kvm_pmu {
@@ -172,6 +173,10 @@ static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
172173
{
173174
return 0;
174175
}
176+
static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
177+
{
178+
return 0;
179+
}
175180
static inline void kvm_vcpu_pmu_resync_el0(void) {}
176181

177182
#endif

0 commit comments

Comments
 (0)