@@ -89,7 +89,7 @@ static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
8989
9090static bool kvm_pmc_has_64bit_overflow (struct kvm_pmc * pmc )
9191{
92- u64 val = __vcpu_sys_reg (kvm_pmc_to_vcpu (pmc ), PMCR_EL0 );
92+ u64 val = kvm_vcpu_read_pmcr (kvm_pmc_to_vcpu (pmc ));
9393
9494 return (pmc -> idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP )) ||
9595 (pmc -> idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC ));
@@ -267,7 +267,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
267267
268268u64 kvm_pmu_valid_counter_mask (struct kvm_vcpu * vcpu )
269269{
270- u64 val = __vcpu_sys_reg (vcpu , PMCR_EL0 ) >> ARMV8_PMU_PMCR_N_SHIFT ;
270+ u64 val = kvm_vcpu_read_pmcr (vcpu ) >> ARMV8_PMU_PMCR_N_SHIFT ;
271271
272272 val &= ARMV8_PMU_PMCR_N_MASK ;
273273 if (val == 0 )
@@ -289,7 +289,7 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
289289 if (!kvm_vcpu_has_pmu (vcpu ))
290290 return ;
291291
292- if (!(__vcpu_sys_reg (vcpu , PMCR_EL0 ) & ARMV8_PMU_PMCR_E ) || !val )
292+ if (!(kvm_vcpu_read_pmcr (vcpu ) & ARMV8_PMU_PMCR_E ) || !val )
293293 return ;
294294
295295 for (i = 0 ; i < ARMV8_PMU_MAX_COUNTERS ; i ++ ) {
@@ -341,7 +341,7 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
341341{
342342 u64 reg = 0 ;
343343
344- if ((__vcpu_sys_reg (vcpu , PMCR_EL0 ) & ARMV8_PMU_PMCR_E )) {
344+ if ((kvm_vcpu_read_pmcr (vcpu ) & ARMV8_PMU_PMCR_E )) {
345345 reg = __vcpu_sys_reg (vcpu , PMOVSSET_EL0 );
346346 reg &= __vcpu_sys_reg (vcpu , PMCNTENSET_EL0 );
347347 reg &= __vcpu_sys_reg (vcpu , PMINTENSET_EL1 );
@@ -443,7 +443,7 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
443443{
444444 int i ;
445445
446- if (!(__vcpu_sys_reg (vcpu , PMCR_EL0 ) & ARMV8_PMU_PMCR_E ))
446+ if (!(kvm_vcpu_read_pmcr (vcpu ) & ARMV8_PMU_PMCR_E ))
447447 return ;
448448
449449 /* Weed out disabled counters */
@@ -586,7 +586,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
586586static bool kvm_pmu_counter_is_enabled (struct kvm_pmc * pmc )
587587{
588588 struct kvm_vcpu * vcpu = kvm_pmc_to_vcpu (pmc );
589- return (__vcpu_sys_reg (vcpu , PMCR_EL0 ) & ARMV8_PMU_PMCR_E ) &&
589+ return (kvm_vcpu_read_pmcr (vcpu ) & ARMV8_PMU_PMCR_E ) &&
590590 (__vcpu_sys_reg (vcpu , PMCNTENSET_EL0 ) & BIT (pmc -> idx ));
591591}
592592
@@ -735,10 +735,9 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void)
735735 * It is still necessary to get a valid cpu, though, to probe for the
736736 * default PMU instance as userspace is not required to specify a PMU
737737 * type. In order to uphold the preexisting behavior KVM selects the
738- * PMU instance for the core where the first call to the
739- * KVM_ARM_VCPU_PMU_V3_CTRL attribute group occurs. A dependent use case
740- * would be a user with disdain of all things big.LITTLE that affines
741- * the VMM to a particular cluster of cores.
738+ * PMU instance for the core during vcpu init. A dependent use
739+ * case would be a user with disdain of all things big.LITTLE that
740+ * affines the VMM to a particular cluster of cores.
742741 *
743742 * In any case, userspace should just do the sane thing and use the UAPI
744743 * to select a PMU type directly. But, be wary of the baggage being
@@ -804,6 +803,17 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
804803 return val & mask ;
805804}
806805
806+ void kvm_vcpu_reload_pmu (struct kvm_vcpu * vcpu )
807+ {
808+ u64 mask = kvm_pmu_valid_counter_mask (vcpu );
809+
810+ kvm_pmu_handle_pmcr (vcpu , kvm_vcpu_read_pmcr (vcpu ));
811+
812+ __vcpu_sys_reg (vcpu , PMOVSSET_EL0 ) &= mask ;
813+ __vcpu_sys_reg (vcpu , PMINTENSET_EL1 ) &= mask ;
814+ __vcpu_sys_reg (vcpu , PMCNTENSET_EL0 ) &= mask ;
815+ }
816+
807817int kvm_arm_pmu_v3_enable (struct kvm_vcpu * vcpu )
808818{
809819 if (!kvm_vcpu_has_pmu (vcpu ))
@@ -892,6 +902,52 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
892902 return true;
893903}
894904
905+ /**
906+ * kvm_arm_pmu_get_max_counters - Return the max number of PMU counters.
907+ * @kvm: The kvm pointer
908+ */
909+ u8 kvm_arm_pmu_get_max_counters (struct kvm * kvm )
910+ {
911+ struct arm_pmu * arm_pmu = kvm -> arch .arm_pmu ;
912+
913+ /*
914+ * The arm_pmu->num_events considers the cycle counter as well.
915+ * Ignore that and return only the general-purpose counters.
916+ */
917+ return arm_pmu -> num_events - 1 ;
918+ }
919+
920+ static void kvm_arm_set_pmu (struct kvm * kvm , struct arm_pmu * arm_pmu )
921+ {
922+ lockdep_assert_held (& kvm -> arch .config_lock );
923+
924+ kvm -> arch .arm_pmu = arm_pmu ;
925+ kvm -> arch .pmcr_n = kvm_arm_pmu_get_max_counters (kvm );
926+ }
927+
928+ /**
929+ * kvm_arm_set_default_pmu - No PMU set, get the default one.
930+ * @kvm: The kvm pointer
931+ *
932+ * The observant among you will notice that the supported_cpus
933+ * mask does not get updated for the default PMU even though it
934+ * is quite possible the selected instance supports only a
935+ * subset of cores in the system. This is intentional, and
936+ * upholds the preexisting behavior on heterogeneous systems
937+ * where vCPUs can be scheduled on any core but the guest
938+ * counters could stop working.
939+ */
940+ int kvm_arm_set_default_pmu (struct kvm * kvm )
941+ {
942+ struct arm_pmu * arm_pmu = kvm_pmu_probe_armpmu ();
943+
944+ if (!arm_pmu )
945+ return - ENODEV ;
946+
947+ kvm_arm_set_pmu (kvm , arm_pmu );
948+ return 0 ;
949+ }
950+
895951static int kvm_arm_pmu_v3_set_pmu (struct kvm_vcpu * vcpu , int pmu_id )
896952{
897953 struct kvm * kvm = vcpu -> kvm ;
@@ -911,7 +967,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
911967 break ;
912968 }
913969
914- kvm -> arch . arm_pmu = arm_pmu ;
970+ kvm_arm_set_pmu ( kvm , arm_pmu ) ;
915971 cpumask_copy (kvm -> arch .supported_cpus , & arm_pmu -> supported_cpus );
916972 ret = 0 ;
917973 break ;
@@ -934,23 +990,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
934990 if (vcpu -> arch .pmu .created )
935991 return - EBUSY ;
936992
937- if (!kvm -> arch .arm_pmu ) {
938- /*
939- * No PMU set, get the default one.
940- *
941- * The observant among you will notice that the supported_cpus
942- * mask does not get updated for the default PMU even though it
943- * is quite possible the selected instance supports only a
944- * subset of cores in the system. This is intentional, and
945- * upholds the preexisting behavior on heterogeneous systems
946- * where vCPUs can be scheduled on any core but the guest
947- * counters could stop working.
948- */
949- kvm -> arch .arm_pmu = kvm_pmu_probe_armpmu ();
950- if (!kvm -> arch .arm_pmu )
951- return - ENODEV ;
952- }
953-
954993 switch (attr -> attr ) {
955994 case KVM_ARM_VCPU_PMU_V3_IRQ : {
956995 int __user * uaddr = (int __user * )(long )attr -> addr ;
@@ -1090,3 +1129,15 @@ u8 kvm_arm_pmu_get_pmuver_limit(void)
10901129 ID_AA64DFR0_EL1_PMUVer_V3P5 );
10911130 return FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_PMUVer ), tmp );
10921131}
1132+
1133+ /**
1134+ * kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU
1135+ * @vcpu: The vcpu pointer
1136+ */
1137+ u64 kvm_vcpu_read_pmcr (struct kvm_vcpu * vcpu )
1138+ {
1139+ u64 pmcr = __vcpu_sys_reg (vcpu , PMCR_EL0 ) &
1140+ ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT );
1141+
1142+ return pmcr | ((u64 )vcpu -> kvm -> arch .pmcr_n << ARMV8_PMU_PMCR_N_SHIFT );
1143+ }
0 commit comments