Skip to content

Commit 76c9fc5

Browse files
eaugerMarc Zyngier
authored andcommitted
KVM: arm64: pmu: Don't mark a counter as chained if the odd one is disabled
At the moment we update the chain bitmap on type setting. This does not take into account the enable state of the odd register. Let's make sure a counter is never considered as chained if the high counter is disabled. We recompute the chain state on enable/disable and type changes. Also let create_perf_event() use the chain bitmap and not use kvm_pmu_idx_has_chain_evtype(). Suggested-by: Marc Zyngier <[email protected]> Signed-off-by: Eric Auger <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 3837407 commit 76c9fc5

File tree

1 file changed

+33
-29
lines changed

1 file changed

+33
-29
lines changed

virt/kvm/arm/pmu.c

Lines changed: 33 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@
1515
#include <kvm/arm_vgic.h>
1616

1717
static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
18+
static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
19+
static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
1820

1921
#define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
2022

@@ -75,6 +77,13 @@ static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
7577

7678
return pmc;
7779
}
80+
static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc)
81+
{
82+
if (kvm_pmu_idx_is_high_counter(pmc->idx))
83+
return pmc - 1;
84+
else
85+
return pmc + 1;
86+
}
7887

7988
/**
8089
* kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
@@ -294,15 +303,9 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
294303

295304
pmc = &pmu->pmc[i];
296305

297-
/*
298-
* For high counters of chained events we must recreate the
299-
* perf event with the long (64bit) attribute set.
300-
*/
301-
if (kvm_pmu_pmc_is_chained(pmc) &&
302-
kvm_pmu_idx_is_high_counter(i)) {
303-
kvm_pmu_create_perf_event(vcpu, i);
304-
continue;
305-
}
306+
/* A change in the enable state may affect the chain state */
307+
kvm_pmu_update_pmc_chained(vcpu, i);
308+
kvm_pmu_create_perf_event(vcpu, i);
306309

307310
/* At this point, pmc must be the canonical */
308311
if (pmc->perf_event) {
@@ -335,15 +338,9 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
335338

336339
pmc = &pmu->pmc[i];
337340

338-
/*
339-
* For high counters of chained events we must recreate the
340-
* perf event with the long (64bit) attribute unset.
341-
*/
342-
if (kvm_pmu_pmc_is_chained(pmc) &&
343-
kvm_pmu_idx_is_high_counter(i)) {
344-
kvm_pmu_create_perf_event(vcpu, i);
345-
continue;
346-
}
341+
/* A change in the enable state may affect the chain state */
342+
kvm_pmu_update_pmc_chained(vcpu, i);
343+
kvm_pmu_create_perf_event(vcpu, i);
347344

348345
/* At this point, pmc must be the canonical */
349346
if (pmc->perf_event)
@@ -585,15 +582,14 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
585582

586583
counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
587584

588-
if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
585+
if (kvm_pmu_pmc_is_chained(pmc)) {
589586
/**
590587
* The initial sample period (overflow count) of an event. For
591588
* chained counters we only support overflow interrupts on the
592589
* high counter.
593590
*/
594591
attr.sample_period = (-counter) & GENMASK(63, 0);
595-
if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
596-
attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
592+
attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
597593

598594
event = perf_event_create_kernel_counter(&attr, -1, current,
599595
kvm_pmu_perf_overflow,
@@ -624,25 +620,33 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
624620
* @select_idx: The number of selected counter
625621
*
626622
* Update the chained bitmap based on the event type written in the
627-
* typer register.
623+
* typer register and the enable state of the odd register.
628624
*/
629625
static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
630626
{
631627
struct kvm_pmu *pmu = &vcpu->arch.pmu;
632-
struct kvm_pmc *pmc = &pmu->pmc[select_idx];
628+
struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc;
629+
bool new_state, old_state;
630+
631+
old_state = kvm_pmu_pmc_is_chained(pmc);
632+
new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) &&
633+
kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1);
633634

634-
if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
635+
if (old_state == new_state)
636+
return;
637+
638+
canonical_pmc = kvm_pmu_get_canonical_pmc(pmc);
639+
kvm_pmu_stop_counter(vcpu, canonical_pmc);
640+
if (new_state) {
635641
/*
636642
* During promotion from !chained to chained we must ensure
637643
* the adjacent counter is stopped and its event destroyed
638644
*/
639-
if (!kvm_pmu_pmc_is_chained(pmc))
640-
kvm_pmu_stop_counter(vcpu, pmc);
641-
645+
kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc));
642646
set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
643-
} else {
644-
clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
647+
return;
645648
}
649+
clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
646650
}
647651

648652
/**

0 commit comments

Comments
 (0)