Skip to content

Commit 6f34024

Browse files
committed
KVM: arm64: Use a cpucap to determine if system supports FEAT_PMUv3
KVM is about to learn some new tricks to virtualize PMUv3 on IMPDEF hardware. As part of that, we now need to differentiate host support from guest support for PMUv3. Add a cpucap to determine if an architectural PMUv3 is present to guard host usage of PMUv3 controls. Tested-by: Janne Grunau <[email protected]> Reviewed-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Oliver Upton <[email protected]>
1 parent ed33572 commit 6f34024

File tree

7 files changed

+46
-8
lines changed

7 files changed

+46
-8
lines changed

arch/arm64/include/asm/cpucaps.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,8 @@ cpucap_is_possible(const unsigned int cap)
7171
* KVM MPAM support doesn't rely on the host kernel supporting MPAM.
7272
*/
7373
return true;
74+
case ARM64_HAS_PMUV3:
75+
return IS_ENABLED(CONFIG_HW_PERF_EVENTS);
7476
}
7577

7678
return true;

arch/arm64/include/asm/cpufeature.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -866,6 +866,11 @@ static __always_inline bool system_supports_mpam_hcr(void)
866866
return alternative_has_cap_unlikely(ARM64_MPAM_HCR);
867867
}
868868

869+
static inline bool system_supports_pmuv3(void)
870+
{
871+
return cpus_have_final_cap(ARM64_HAS_PMUV3);
872+
}
873+
869874
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
870875
bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
871876

arch/arm64/kernel/cpufeature.c

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1898,6 +1898,28 @@ static bool has_lpa2(const struct arm64_cpu_capabilities *entry, int scope)
18981898
}
18991899
#endif
19001900

1901+
#ifdef CONFIG_HW_PERF_EVENTS
1902+
static bool has_pmuv3(const struct arm64_cpu_capabilities *entry, int scope)
1903+
{
1904+
u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1905+
unsigned int pmuver;
1906+
1907+
/*
1908+
* PMUVer follows the standard ID scheme for an unsigned field with the
1909+
* exception of 0xF (IMP_DEF) which is treated specially and implies
1910+
* FEAT_PMUv3 is not implemented.
1911+
*
1912+
* See DDI0487L.a D24.1.3.2 for more details.
1913+
*/
1914+
pmuver = cpuid_feature_extract_unsigned_field(dfr0,
1915+
ID_AA64DFR0_EL1_PMUVer_SHIFT);
1916+
if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
1917+
return false;
1918+
1919+
return pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP;
1920+
}
1921+
#endif
1922+
19011923
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
19021924
#define KPTI_NG_TEMP_VA (-(1UL << PMD_SHIFT))
19031925

@@ -2998,6 +3020,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
29983020
.matches = has_cpuid_feature,
29993021
ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, GCS, IMP)
30003022
},
3023+
#endif
3024+
#ifdef CONFIG_HW_PERF_EVENTS
3025+
{
3026+
.desc = "PMUv3",
3027+
.capability = ARM64_HAS_PMUV3,
3028+
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
3029+
.matches = has_pmuv3,
3030+
},
30013031
#endif
30023032
{},
30033033
};

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
244244
* counter, which could make a PMXEVCNTR_EL0 access UNDEF at
245245
* EL1 instead of being trapped to EL2.
246246
*/
247-
if (kvm_arm_support_pmu_v3()) {
247+
if (system_supports_pmuv3()) {
248248
struct kvm_cpu_context *hctxt;
249249

250250
write_sysreg(0, pmselr_el0);
@@ -281,7 +281,7 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
281281
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
282282

283283
write_sysreg(0, hstr_el2);
284-
if (kvm_arm_support_pmu_v3()) {
284+
if (system_supports_pmuv3()) {
285285
struct kvm_cpu_context *hctxt;
286286

287287
hctxt = host_data_ptr(host_ctxt);

arch/arm64/kvm/pmu.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr)
4141
{
4242
struct kvm_pmu_events *pmu = kvm_get_pmu_events();
4343

44-
if (!kvm_arm_support_pmu_v3() || !kvm_pmu_switch_needed(attr))
44+
if (!system_supports_pmuv3() || !kvm_pmu_switch_needed(attr))
4545
return;
4646

4747
if (!attr->exclude_host)
@@ -57,7 +57,7 @@ void kvm_clr_pmu_events(u64 clr)
5757
{
5858
struct kvm_pmu_events *pmu = kvm_get_pmu_events();
5959

60-
if (!kvm_arm_support_pmu_v3())
60+
if (!system_supports_pmuv3())
6161
return;
6262

6363
pmu->events_host &= ~clr;
@@ -133,7 +133,7 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
133133
struct kvm_pmu_events *pmu;
134134
u64 events_guest, events_host;
135135

136-
if (!kvm_arm_support_pmu_v3() || !has_vhe())
136+
if (!system_supports_pmuv3() || !has_vhe())
137137
return;
138138

139139
preempt_disable();
@@ -154,7 +154,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
154154
struct kvm_pmu_events *pmu;
155155
u64 events_guest, events_host;
156156

157-
if (!kvm_arm_support_pmu_v3() || !has_vhe())
157+
if (!system_supports_pmuv3() || !has_vhe())
158158
return;
159159

160160
pmu = kvm_get_pmu_events();
@@ -180,7 +180,7 @@ bool kvm_set_pmuserenr(u64 val)
180180
struct kvm_cpu_context *hctxt;
181181
struct kvm_vcpu *vcpu;
182182

183-
if (!kvm_arm_support_pmu_v3() || !has_vhe())
183+
if (!system_supports_pmuv3() || !has_vhe())
184184
return false;
185185

186186
vcpu = kvm_get_running_vcpu();

arch/arm64/tools/cpucaps

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ HAS_LSE_ATOMICS
4545
HAS_MOPS
4646
HAS_NESTED_VIRT
4747
HAS_PAN
48+
HAS_PMUV3
4849
HAS_S1PIE
4950
HAS_S1POE
5051
HAS_RAS_EXTN

include/kvm/arm_pmu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ void kvm_vcpu_pmu_resync_el0(void);
8686
*/
8787
#define kvm_pmu_update_vcpu_events(vcpu) \
8888
do { \
89-
if (!has_vhe() && kvm_arm_support_pmu_v3()) \
89+
if (!has_vhe() && system_supports_pmuv3()) \
9090
vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
9191
} while (0)
9292

0 commit comments

Comments
 (0)