|
15 | 15 | #include <kvm/arm_vgic.h>
|
16 | 16 |
|
17 | 17 | static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
|
| 18 | +static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx); |
| 19 | +static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc); |
18 | 20 |
|
19 | 21 | #define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
|
20 | 22 |
|
@@ -75,6 +77,13 @@ static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
|
75 | 77 |
|
76 | 78 | return pmc;
|
77 | 79 | }
|
| 80 | +static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc) |
| 81 | +{ |
| 82 | + if (kvm_pmu_idx_is_high_counter(pmc->idx)) |
| 83 | + return pmc - 1; |
| 84 | + else |
| 85 | + return pmc + 1; |
| 86 | +} |
78 | 87 |
|
79 | 88 | /**
|
80 | 89 | * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
|
@@ -294,15 +303,9 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
294 | 303 |
|
295 | 304 | pmc = &pmu->pmc[i];
|
296 | 305 |
|
297 |
| - /* |
298 |
| - * For high counters of chained events we must recreate the |
299 |
| - * perf event with the long (64bit) attribute set. |
300 |
| - */ |
301 |
| - if (kvm_pmu_pmc_is_chained(pmc) && |
302 |
| - kvm_pmu_idx_is_high_counter(i)) { |
303 |
| - kvm_pmu_create_perf_event(vcpu, i); |
304 |
| - continue; |
305 |
| - } |
| 306 | + /* A change in the enable state may affect the chain state */ |
| 307 | + kvm_pmu_update_pmc_chained(vcpu, i); |
| 308 | + kvm_pmu_create_perf_event(vcpu, i); |
306 | 309 |
|
307 | 310 | /* At this point, pmc must be the canonical */
|
308 | 311 | if (pmc->perf_event) {
|
@@ -335,15 +338,9 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
|
335 | 338 |
|
336 | 339 | pmc = &pmu->pmc[i];
|
337 | 340 |
|
338 |
| - /* |
339 |
| - * For high counters of chained events we must recreate the |
340 |
| - * perf event with the long (64bit) attribute unset. |
341 |
| - */ |
342 |
| - if (kvm_pmu_pmc_is_chained(pmc) && |
343 |
| - kvm_pmu_idx_is_high_counter(i)) { |
344 |
| - kvm_pmu_create_perf_event(vcpu, i); |
345 |
| - continue; |
346 |
| - } |
| 341 | + /* A change in the enable state may affect the chain state */ |
| 342 | + kvm_pmu_update_pmc_chained(vcpu, i); |
| 343 | + kvm_pmu_create_perf_event(vcpu, i); |
347 | 344 |
|
348 | 345 | /* At this point, pmc must be the canonical */
|
349 | 346 | if (pmc->perf_event)
|
@@ -585,15 +582,14 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
|
585 | 582 |
|
586 | 583 | counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
|
587 | 584 |
|
588 |
| - if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) { |
| 585 | + if (kvm_pmu_pmc_is_chained(pmc)) { |
589 | 586 | /**
|
590 | 587 | * The initial sample period (overflow count) of an event. For
|
591 | 588 | * chained counters we only support overflow interrupts on the
|
592 | 589 | * high counter.
|
593 | 590 | */
|
594 | 591 | attr.sample_period = (-counter) & GENMASK(63, 0);
|
595 |
| - if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1)) |
596 |
| - attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED; |
| 592 | + attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED; |
597 | 593 |
|
598 | 594 | event = perf_event_create_kernel_counter(&attr, -1, current,
|
599 | 595 | kvm_pmu_perf_overflow,
|
@@ -624,25 +620,33 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
|
624 | 620 | * @select_idx: The number of selected counter
|
625 | 621 | *
|
626 | 622 | * Update the chained bitmap based on the event type written in the
|
627 |
| - * typer register. |
| 623 | + * typer register and the enable state of the odd register. |
628 | 624 | */
|
629 | 625 | static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
|
630 | 626 | {
|
631 | 627 | struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
632 |
| - struct kvm_pmc *pmc = &pmu->pmc[select_idx]; |
| 628 | + struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc; |
| 629 | + bool new_state, old_state; |
| 630 | + |
| 631 | + old_state = kvm_pmu_pmc_is_chained(pmc); |
| 632 | + new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) && |
| 633 | + kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1); |
633 | 634 |
|
634 |
| - if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) { |
| 635 | + if (old_state == new_state) |
| 636 | + return; |
| 637 | + |
| 638 | + canonical_pmc = kvm_pmu_get_canonical_pmc(pmc); |
| 639 | + kvm_pmu_stop_counter(vcpu, canonical_pmc); |
| 640 | + if (new_state) { |
635 | 641 | /*
|
636 | 642 | * During promotion from !chained to chained we must ensure
|
637 | 643 | * the adjacent counter is stopped and its event destroyed
|
638 | 644 | */
|
639 |
| - if (!kvm_pmu_pmc_is_chained(pmc)) |
640 |
| - kvm_pmu_stop_counter(vcpu, pmc); |
641 |
| - |
| 645 | + kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc)); |
642 | 646 | set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
|
643 |
| - } else { |
644 |
| - clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); |
| 647 | + return; |
645 | 648 | }
|
| 649 | + clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); |
646 | 650 | }
|
647 | 651 |
|
648 | 652 | /**
|
|
0 commit comments