@@ -24,6 +24,7 @@ static DEFINE_MUTEX(arm_pmus_lock);
2424
2525static void kvm_pmu_create_perf_event (struct kvm_pmc * pmc );
2626static void kvm_pmu_release_perf_event (struct kvm_pmc * pmc );
27+ static bool kvm_pmu_counter_is_enabled (struct kvm_pmc * pmc );
2728
2829static struct kvm_vcpu * kvm_pmc_to_vcpu (const struct kvm_pmc * pmc )
2930{
@@ -327,65 +328,44 @@ u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
327328 return GENMASK (val - 1 , 0 ) | BIT (ARMV8_PMU_CYCLE_IDX );
328329}
329330
330- /**
331- * kvm_pmu_enable_counter_mask - enable selected PMU counters
332- * @vcpu: The vcpu pointer
333- * @val: the value guest writes to PMCNTENSET register
334- *
335- * Call perf_event_enable to start counting the perf event
336- */
337- void kvm_pmu_enable_counter_mask (struct kvm_vcpu * vcpu , u64 val )
331+ static void kvm_pmc_enable_perf_event (struct kvm_pmc * pmc )
338332{
339- int i ;
340- if (!kvm_vcpu_has_pmu (vcpu ))
341- return ;
342-
343- if (!(kvm_vcpu_read_pmcr (vcpu ) & ARMV8_PMU_PMCR_E ) || !val )
333+ if (!pmc -> perf_event ) {
334+ kvm_pmu_create_perf_event (pmc );
344335 return ;
336+ }
345337
346- for (i = 0 ; i < KVM_ARMV8_PMU_MAX_COUNTERS ; i ++ ) {
347- struct kvm_pmc * pmc ;
348-
349- if (!(val & BIT (i )))
350- continue ;
351-
352- pmc = kvm_vcpu_idx_to_pmc (vcpu , i );
338+ perf_event_enable (pmc -> perf_event );
339+ if (pmc -> perf_event -> state != PERF_EVENT_STATE_ACTIVE )
340+ kvm_debug ("fail to enable perf event\n" );
341+ }
353342
354- if (!pmc -> perf_event ) {
355- kvm_pmu_create_perf_event (pmc );
356- } else {
357- perf_event_enable (pmc -> perf_event );
358- if (pmc -> perf_event -> state != PERF_EVENT_STATE_ACTIVE )
359- kvm_debug ("fail to enable perf event\n" );
360- }
361- }
343+ static void kvm_pmc_disable_perf_event (struct kvm_pmc * pmc )
344+ {
345+ if (pmc -> perf_event )
346+ perf_event_disable (pmc -> perf_event );
362347}
363348
364- /**
365- * kvm_pmu_disable_counter_mask - disable selected PMU counters
366- * @vcpu: The vcpu pointer
367- * @val: the value guest writes to PMCNTENCLR register
368- *
369- * Call perf_event_disable to stop counting the perf event
370- */
371- void kvm_pmu_disable_counter_mask (struct kvm_vcpu * vcpu , u64 val )
349+ void kvm_pmu_reprogram_counter_mask (struct kvm_vcpu * vcpu , u64 val )
372350{
373351 int i ;
374352
375353 if (!kvm_vcpu_has_pmu (vcpu ) || !val )
376354 return ;
377355
378356 for (i = 0 ; i < KVM_ARMV8_PMU_MAX_COUNTERS ; i ++ ) {
379- struct kvm_pmc * pmc ;
357+ struct kvm_pmc * pmc = kvm_vcpu_idx_to_pmc ( vcpu , i ) ;
380358
381359 if (!(val & BIT (i )))
382360 continue ;
383361
384- pmc = kvm_vcpu_idx_to_pmc ( vcpu , i );
385-
386- if ( pmc -> perf_event )
387- perf_event_disable (pmc -> perf_event );
362+ if ( kvm_pmu_counter_is_enabled ( pmc ))
363+ kvm_pmc_enable_perf_event ( pmc );
364+ else
365+ kvm_pmc_disable_perf_event (pmc );
388366 }
367+
368+ kvm_vcpu_pmu_restore_guest (vcpu );
389369}
390370
391371/*
@@ -626,27 +606,28 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
626606 if (!kvm_has_feat (vcpu -> kvm , ID_AA64DFR0_EL1 , PMUVer , V3P5 ))
627607 val &= ~ARMV8_PMU_PMCR_LP ;
628608
609+ /* Request a reload of the PMU to enable/disable affected counters */
610+ if ((__vcpu_sys_reg (vcpu , PMCR_EL0 ) ^ val ) & ARMV8_PMU_PMCR_E )
611+ kvm_make_request (KVM_REQ_RELOAD_PMU , vcpu );
612+
629613 /* The reset bits don't indicate any state, and shouldn't be saved. */
630614 __vcpu_sys_reg (vcpu , PMCR_EL0 ) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P );
631615
632- if (val & ARMV8_PMU_PMCR_E ) {
633- kvm_pmu_enable_counter_mask (vcpu ,
634- __vcpu_sys_reg (vcpu , PMCNTENSET_EL0 ));
635- } else {
636- kvm_pmu_disable_counter_mask (vcpu ,
637- __vcpu_sys_reg (vcpu , PMCNTENSET_EL0 ));
638- }
639-
640616 if (val & ARMV8_PMU_PMCR_C )
641617 kvm_pmu_set_counter_value (vcpu , ARMV8_PMU_CYCLE_IDX , 0 );
642618
643619 if (val & ARMV8_PMU_PMCR_P ) {
644- unsigned long mask = kvm_pmu_accessible_counter_mask (vcpu );
645- mask &= ~BIT (ARMV8_PMU_CYCLE_IDX );
620+ /*
621+ * Unlike other PMU sysregs, the controls in PMCR_EL0 always apply
622+ * to the 'guest' range of counters and never the 'hyp' range.
623+ */
624+ unsigned long mask = kvm_pmu_implemented_counter_mask (vcpu ) &
625+ ~kvm_pmu_hyp_counter_mask (vcpu ) &
626+ ~BIT (ARMV8_PMU_CYCLE_IDX );
627+
646628 for_each_set_bit (i , & mask , 32 )
647629 kvm_pmu_set_pmc_value (kvm_vcpu_idx_to_pmc (vcpu , i ), 0 , true);
648630 }
649- kvm_vcpu_pmu_restore_guest (vcpu );
650631}
651632
652633static bool kvm_pmu_counter_is_enabled (struct kvm_pmc * pmc )
@@ -910,11 +891,11 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
910891{
911892 u64 mask = kvm_pmu_implemented_counter_mask (vcpu );
912893
913- kvm_pmu_handle_pmcr (vcpu , kvm_vcpu_read_pmcr (vcpu ));
914-
915894 __vcpu_sys_reg (vcpu , PMOVSSET_EL0 ) &= mask ;
916895 __vcpu_sys_reg (vcpu , PMINTENSET_EL1 ) &= mask ;
917896 __vcpu_sys_reg (vcpu , PMCNTENSET_EL0 ) &= mask ;
897+
898+ kvm_pmu_reprogram_counter_mask (vcpu , mask );
918899}
919900
920901int kvm_arm_pmu_v3_enable (struct kvm_vcpu * vcpu )
0 commit comments