Skip to content

Commit 57fc267

Browse files
reijiw-kvmoupton
authored andcommitted
KVM: arm64: PMU: Add a helper to read a vCPU's PMCR_EL0
Add a helper to read a vCPU's PMCR_EL0, and use it whenever KVM reads a vCPU's PMCR_EL0. Currently, the PMCR_EL0 value is tracked per vCPU. The following patches will make (only) PMCR_EL0.N track per guest. Having the new helper will be useful to combine the PMCR_EL0.N field (tracked per guest) and the other fields (tracked per vCPU) to provide the value of PMCR_EL0. No functional change intended. Reviewed-by: Sebastian Ott <[email protected]> Signed-off-by: Reiji Watanabe <[email protected]> Signed-off-by: Raghavendra Rao Ananta <[email protected]> Reviewed-by: Eric Auger <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Oliver Upton <[email protected]>
1 parent 4277335 commit 57fc267

File tree

4 files changed

+25
-11
lines changed

4 files changed

+25
-11
lines changed

arch/arm64/kvm/arm.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -801,8 +801,7 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
801801
}
802802

803803
if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
804-
kvm_pmu_handle_pmcr(vcpu,
805-
__vcpu_sys_reg(vcpu, PMCR_EL0));
804+
kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
806805

807806
if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu))
808807
kvm_vcpu_pmu_restore_guest(vcpu);

arch/arm64/kvm/pmu-emul.c

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
7272

7373
static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
7474
{
75-
u64 val = __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), PMCR_EL0);
75+
u64 val = kvm_vcpu_read_pmcr(kvm_pmc_to_vcpu(pmc));
7676

7777
return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) ||
7878
(pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC));
@@ -250,7 +250,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
250250

251251
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
252252
{
253-
u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
253+
u64 val = kvm_vcpu_read_pmcr(vcpu) >> ARMV8_PMU_PMCR_N_SHIFT;
254254

255255
val &= ARMV8_PMU_PMCR_N_MASK;
256256
if (val == 0)
@@ -272,7 +272,7 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
272272
if (!kvm_vcpu_has_pmu(vcpu))
273273
return;
274274

275-
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
275+
if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
276276
return;
277277

278278
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
@@ -324,7 +324,7 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
324324
{
325325
u64 reg = 0;
326326

327-
if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
327+
if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) {
328328
reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
329329
reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
330330
reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
@@ -426,7 +426,7 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
426426
{
427427
int i;
428428

429-
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
429+
if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E))
430430
return;
431431

432432
/* Weed out disabled counters */
@@ -569,7 +569,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
569569
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
570570
{
571571
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
572-
return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
572+
return (kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) &&
573573
(__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx));
574574
}
575575

@@ -1084,3 +1084,12 @@ u8 kvm_arm_pmu_get_pmuver_limit(void)
10841084
ID_AA64DFR0_EL1_PMUVer_V3P5);
10851085
return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp);
10861086
}
1087+
1088+
/**
1089+
* kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU
1090+
* @vcpu: The vcpu pointer
1091+
*/
1092+
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
1093+
{
1094+
return __vcpu_sys_reg(vcpu, PMCR_EL0);
1095+
}

arch/arm64/kvm/sys_regs.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -822,15 +822,15 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
822822
* Only update writeable bits of PMCR (continuing into
823823
* kvm_pmu_handle_pmcr() as well)
824824
*/
825-
val = __vcpu_sys_reg(vcpu, PMCR_EL0);
825+
val = kvm_vcpu_read_pmcr(vcpu);
826826
val &= ~ARMV8_PMU_PMCR_MASK;
827827
val |= p->regval & ARMV8_PMU_PMCR_MASK;
828828
if (!kvm_supports_32bit_el0())
829829
val |= ARMV8_PMU_PMCR_LC;
830830
kvm_pmu_handle_pmcr(vcpu, val);
831831
} else {
832832
/* PMCR.P & PMCR.C are RAZ */
833-
val = __vcpu_sys_reg(vcpu, PMCR_EL0)
833+
val = kvm_vcpu_read_pmcr(vcpu)
834834
& ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
835835
p->regval = val;
836836
}
@@ -879,7 +879,7 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
879879
{
880880
u64 pmcr, val;
881881

882-
pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
882+
pmcr = kvm_vcpu_read_pmcr(vcpu);
883883
val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
884884
if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
885885
kvm_inject_undefined(vcpu);

include/kvm/arm_pmu.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@ void kvm_vcpu_pmu_resync_el0(void);
103103
u8 kvm_arm_pmu_get_pmuver_limit(void);
104104
int kvm_arm_set_default_pmu(struct kvm *kvm);
105105

106+
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
106107
#else
107108
struct kvm_pmu {
108109
};
@@ -180,6 +181,11 @@ static inline int kvm_arm_set_default_pmu(struct kvm *kvm)
180181
return -ENODEV;
181182
}
182183

184+
static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
185+
{
186+
return 0;
187+
}
188+
183189
#endif
184190

185191
#endif

0 commit comments

Comments
 (0)