@@ -77,6 +77,16 @@ static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
7777 return container_of (vcpu_arch , struct kvm_vcpu , arch );
7878}
7979
80+ static u32 counter_index_to_reg (u64 idx )
81+ {
82+ return (idx == ARMV8_PMU_CYCLE_IDX ) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx ;
83+ }
84+
85+ static u32 counter_index_to_evtreg (u64 idx )
86+ {
87+ return (idx == ARMV8_PMU_CYCLE_IDX ) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx ;
88+ }
89+
8090/**
8191 * kvm_pmu_get_counter_value - get PMU counter value
8292 * @vcpu: The vcpu pointer
@@ -91,8 +101,7 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
91101 if (!kvm_vcpu_has_pmu (vcpu ))
92102 return 0 ;
93103
94- reg = (pmc -> idx == ARMV8_PMU_CYCLE_IDX )
95- ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc -> idx ;
104+ reg = counter_index_to_reg (select_idx );
96105 counter = __vcpu_sys_reg (vcpu , reg );
97106
98107 /*
@@ -122,8 +131,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
122131 if (!kvm_vcpu_has_pmu (vcpu ))
123132 return ;
124133
125- reg = (select_idx == ARMV8_PMU_CYCLE_IDX )
126- ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx ;
134+ reg = counter_index_to_reg (select_idx );
127135 __vcpu_sys_reg (vcpu , reg ) += (s64 )val - kvm_pmu_get_counter_value (vcpu , select_idx );
128136
129137 /* Recreate the perf event to reflect the updated sample_period */
@@ -158,10 +166,7 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
158166
159167 val = kvm_pmu_get_counter_value (vcpu , pmc -> idx );
160168
161- if (pmc -> idx == ARMV8_PMU_CYCLE_IDX )
162- reg = PMCCNTR_EL0 ;
163- else
164- reg = PMEVCNTR0_EL0 + pmc -> idx ;
169+ reg = counter_index_to_reg (pmc -> idx );
165170
166171 __vcpu_sys_reg (vcpu , reg ) = val ;
167172
@@ -404,16 +409,16 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
404409 u64 type , reg ;
405410
406411 /* Filter on event type */
407- type = __vcpu_sys_reg (vcpu , PMEVTYPER0_EL0 + i );
412+ type = __vcpu_sys_reg (vcpu , counter_index_to_evtreg ( i ) );
408413 type &= kvm_pmu_event_mask (vcpu -> kvm );
409414 if (type != event )
410415 continue ;
411416
412417 /* Increment this counter */
413- reg = __vcpu_sys_reg (vcpu , PMEVCNTR0_EL0 + i ) + 1 ;
418+ reg = __vcpu_sys_reg (vcpu , counter_index_to_reg ( i ) ) + 1 ;
414419 if (!kvm_pmu_idx_is_64bit (vcpu , i ))
415420 reg = lower_32_bits (reg );
416- __vcpu_sys_reg (vcpu , PMEVCNTR0_EL0 + i ) = reg ;
421+ __vcpu_sys_reg (vcpu , counter_index_to_reg ( i ) ) = reg ;
417422
418423 /* No overflow? move on */
419424 if (kvm_pmu_idx_has_64bit_overflow (vcpu , i ) ? reg : lower_32_bits (reg ))
@@ -549,8 +554,7 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
549554 struct perf_event_attr attr ;
550555 u64 eventsel , counter , reg , data ;
551556
552- reg = (pmc -> idx == ARMV8_PMU_CYCLE_IDX )
553- ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc -> idx ;
557+ reg = counter_index_to_evtreg (select_idx );
554558 data = __vcpu_sys_reg (vcpu , reg );
555559
556560 kvm_pmu_stop_counter (vcpu , pmc );
@@ -632,8 +636,7 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
632636 mask &= ~ARMV8_PMU_EVTYPE_EVENT ;
633637 mask |= kvm_pmu_event_mask (vcpu -> kvm );
634638
635- reg = (select_idx == ARMV8_PMU_CYCLE_IDX )
636- ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx ;
639+ reg = counter_index_to_evtreg (select_idx );
637640
638641 __vcpu_sys_reg (vcpu , reg ) = data & mask ;
639642
0 commit comments