@@ -111,6 +111,11 @@ static u32 counter_index_to_evtreg(u64 idx)
111111 return (idx == ARMV8_PMU_CYCLE_IDX ) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx ;
112112}
113113
114+ static u64 kvm_pmc_read_evtreg (const struct kvm_pmc * pmc )
115+ {
116+ return __vcpu_sys_reg (kvm_pmc_to_vcpu (pmc ), counter_index_to_evtreg (pmc -> idx ));
117+ }
118+
114119static u64 kvm_pmu_get_pmc_value (struct kvm_pmc * pmc )
115120{
116121 struct kvm_vcpu * vcpu = kvm_pmc_to_vcpu (pmc );
@@ -619,6 +624,24 @@ static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
619624 (__vcpu_sys_reg (vcpu , PMCNTENSET_EL0 ) & BIT (pmc -> idx ));
620625}
621626
627+ static bool kvm_pmc_counts_at_el0 (struct kvm_pmc * pmc )
628+ {
629+ u64 evtreg = kvm_pmc_read_evtreg (pmc );
630+ bool nsu = evtreg & ARMV8_PMU_EXCLUDE_NS_EL0 ;
631+ bool u = evtreg & ARMV8_PMU_EXCLUDE_EL0 ;
632+
633+ return u == nsu ;
634+ }
635+
636+ static bool kvm_pmc_counts_at_el1 (struct kvm_pmc * pmc )
637+ {
638+ u64 evtreg = kvm_pmc_read_evtreg (pmc );
639+ bool nsk = evtreg & ARMV8_PMU_EXCLUDE_NS_EL1 ;
640+ bool p = evtreg & ARMV8_PMU_EXCLUDE_EL1 ;
641+
642+ return p == nsk ;
643+ }
644+
622645/**
623646 * kvm_pmu_create_perf_event - create a perf event for a counter
624647 * @pmc: Counter context
@@ -629,17 +652,15 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
629652 struct arm_pmu * arm_pmu = vcpu -> kvm -> arch .arm_pmu ;
630653 struct perf_event * event ;
631654 struct perf_event_attr attr ;
632- u64 eventsel , reg , data ;
633- bool p , u , nsk , nsu ;
655+ u64 eventsel , evtreg ;
634656
635- reg = counter_index_to_evtreg (pmc -> idx );
636- data = __vcpu_sys_reg (vcpu , reg );
657+ evtreg = kvm_pmc_read_evtreg (pmc );
637658
638659 kvm_pmu_stop_counter (pmc );
639660 if (pmc -> idx == ARMV8_PMU_CYCLE_IDX )
640661 eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES ;
641662 else
642- eventsel = data & kvm_pmu_event_mask (vcpu -> kvm );
663+ eventsel = evtreg & kvm_pmu_event_mask (vcpu -> kvm );
643664
644665 /*
645666 * Neither SW increment nor chained events need to be backed
@@ -657,18 +678,13 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
657678 !test_bit (eventsel , vcpu -> kvm -> arch .pmu_filter ))
658679 return ;
659680
660- p = data & ARMV8_PMU_EXCLUDE_EL1 ;
661- u = data & ARMV8_PMU_EXCLUDE_EL0 ;
662- nsk = data & ARMV8_PMU_EXCLUDE_NS_EL1 ;
663- nsu = data & ARMV8_PMU_EXCLUDE_NS_EL0 ;
664-
665681 memset (& attr , 0 , sizeof (struct perf_event_attr ));
666682 attr .type = arm_pmu -> pmu .type ;
667683 attr .size = sizeof (attr );
668684 attr .pinned = 1 ;
669685 attr .disabled = !kvm_pmu_counter_is_enabled (pmc );
670- attr .exclude_user = ( u != nsu );
671- attr .exclude_kernel = ( p != nsk );
686+ attr .exclude_user = ! kvm_pmc_counts_at_el0 ( pmc );
687+ attr .exclude_kernel = ! kvm_pmc_counts_at_el1 ( pmc );
672688 attr .exclude_hv = 1 ; /* Don't count EL2 events */
673689 attr .exclude_host = 1 ; /* Don't count host events */
674690 attr .config = eventsel ;
0 commit comments