|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
| 2 | +/* |
| 3 | + * vpmu_counter_access - Test vPMU event counter access |
| 4 | + * |
| 5 | + * Copyright (c) 2023 Google LLC. |
| 6 | + * |
| 7 | + * This test checks if the guest can see the same number of the PMU event |
| 8 | + * counters (PMCR_EL0.N) that userspace sets. |
| 9 | + * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host. |
| 10 | + */ |
| 11 | +#include <kvm_util.h> |
| 12 | +#include <processor.h> |
| 13 | +#include <test_util.h> |
| 14 | +#include <vgic.h> |
| 15 | +#include <perf/arm_pmuv3.h> |
| 16 | +#include <linux/bitfield.h> |
| 17 | + |
| 18 | +/* The max number of the PMU event counters (excluding the cycle counter) */ |
| 19 | +#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1) |
| 20 | + |
| 21 | +struct vpmu_vm { |
| 22 | + struct kvm_vm *vm; |
| 23 | + struct kvm_vcpu *vcpu; |
| 24 | + int gic_fd; |
| 25 | +}; |
| 26 | + |
| 27 | +static struct vpmu_vm vpmu_vm; |
| 28 | + |
| 29 | +static uint64_t get_pmcr_n(uint64_t pmcr) |
| 30 | +{ |
| 31 | + return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; |
| 32 | +} |
| 33 | + |
| 34 | +static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) |
| 35 | +{ |
| 36 | + *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); |
| 37 | + *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); |
| 38 | +} |
| 39 | + |
| 40 | +static void guest_sync_handler(struct ex_regs *regs) |
| 41 | +{ |
| 42 | + uint64_t esr, ec; |
| 43 | + |
| 44 | + esr = read_sysreg(esr_el1); |
| 45 | + ec = (esr >> ESR_EC_SHIFT) & ESR_EC_MASK; |
| 46 | + __GUEST_ASSERT(0, "PC: 0x%lx; ESR: 0x%lx; EC: 0x%lx", regs->pc, esr, ec); |
| 47 | +} |
| 48 | + |
| 49 | +/* |
| 50 | + * The guest is configured with PMUv3 with @expected_pmcr_n number of |
| 51 | + * event counters. |
| 52 | + * Check if @expected_pmcr_n is consistent with PMCR_EL0.N. |
| 53 | + */ |
| 54 | +static void guest_code(uint64_t expected_pmcr_n) |
| 55 | +{ |
| 56 | + uint64_t pmcr, pmcr_n; |
| 57 | + |
| 58 | + __GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS, |
| 59 | + "Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%lx", |
| 60 | + expected_pmcr_n, ARMV8_PMU_MAX_GENERAL_COUNTERS); |
| 61 | + |
| 62 | + pmcr = read_sysreg(pmcr_el0); |
| 63 | + pmcr_n = get_pmcr_n(pmcr); |
| 64 | + |
| 65 | + /* Make sure that PMCR_EL0.N indicates the value userspace set */ |
| 66 | + __GUEST_ASSERT(pmcr_n == expected_pmcr_n, |
| 67 | + "Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx", |
| 68 | + expected_pmcr_n, pmcr_n); |
| 69 | + |
| 70 | + GUEST_DONE(); |
| 71 | +} |
| 72 | + |
| 73 | +#define GICD_BASE_GPA 0x8000000ULL |
| 74 | +#define GICR_BASE_GPA 0x80A0000ULL |
| 75 | + |
| 76 | +/* Create a VM that has one vCPU with PMUv3 configured. */ |
| 77 | +static void create_vpmu_vm(void *guest_code) |
| 78 | +{ |
| 79 | + struct kvm_vcpu_init init; |
| 80 | + uint8_t pmuver, ec; |
| 81 | + uint64_t dfr0, irq = 23; |
| 82 | + struct kvm_device_attr irq_attr = { |
| 83 | + .group = KVM_ARM_VCPU_PMU_V3_CTRL, |
| 84 | + .attr = KVM_ARM_VCPU_PMU_V3_IRQ, |
| 85 | + .addr = (uint64_t)&irq, |
| 86 | + }; |
| 87 | + struct kvm_device_attr init_attr = { |
| 88 | + .group = KVM_ARM_VCPU_PMU_V3_CTRL, |
| 89 | + .attr = KVM_ARM_VCPU_PMU_V3_INIT, |
| 90 | + }; |
| 91 | + |
| 92 | + /* The test creates the vpmu_vm multiple times. Ensure a clean state */ |
| 93 | + memset(&vpmu_vm, 0, sizeof(vpmu_vm)); |
| 94 | + |
| 95 | + vpmu_vm.vm = vm_create(1); |
| 96 | + vm_init_descriptor_tables(vpmu_vm.vm); |
| 97 | + for (ec = 0; ec < ESR_EC_NUM; ec++) { |
| 98 | + vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec, |
| 99 | + guest_sync_handler); |
| 100 | + } |
| 101 | + |
| 102 | + /* Create vCPU with PMUv3 */ |
| 103 | + vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init); |
| 104 | + init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3); |
| 105 | + vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code); |
| 106 | + vcpu_init_descriptor_tables(vpmu_vm.vcpu); |
| 107 | + vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64, |
| 108 | + GICD_BASE_GPA, GICR_BASE_GPA); |
| 109 | + __TEST_REQUIRE(vpmu_vm.gic_fd >= 0, |
| 110 | + "Failed to create vgic-v3, skipping"); |
| 111 | + |
| 112 | + /* Make sure that PMUv3 support is indicated in the ID register */ |
| 113 | + vcpu_get_reg(vpmu_vm.vcpu, |
| 114 | + KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0); |
| 115 | + pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0); |
| 116 | + TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && |
| 117 | + pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP, |
| 118 | + "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver); |
| 119 | + |
| 120 | + /* Initialize vPMU */ |
| 121 | + vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr); |
| 122 | + vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr); |
| 123 | +} |
| 124 | + |
| 125 | +static void destroy_vpmu_vm(void) |
| 126 | +{ |
| 127 | + close(vpmu_vm.gic_fd); |
| 128 | + kvm_vm_free(vpmu_vm.vm); |
| 129 | +} |
| 130 | + |
| 131 | +static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n) |
| 132 | +{ |
| 133 | + struct ucall uc; |
| 134 | + |
| 135 | + vcpu_args_set(vcpu, 1, pmcr_n); |
| 136 | + vcpu_run(vcpu); |
| 137 | + switch (get_ucall(vcpu, &uc)) { |
| 138 | + case UCALL_ABORT: |
| 139 | + REPORT_GUEST_ASSERT(uc); |
| 140 | + break; |
| 141 | + case UCALL_DONE: |
| 142 | + break; |
| 143 | + default: |
| 144 | + TEST_FAIL("Unknown ucall %lu", uc.cmd); |
| 145 | + break; |
| 146 | + } |
| 147 | +} |
| 148 | + |
| 149 | +static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail) |
| 150 | +{ |
| 151 | + struct kvm_vcpu *vcpu; |
| 152 | + uint64_t pmcr, pmcr_orig; |
| 153 | + |
| 154 | + create_vpmu_vm(guest_code); |
| 155 | + vcpu = vpmu_vm.vcpu; |
| 156 | + |
| 157 | + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr_orig); |
| 158 | + pmcr = pmcr_orig; |
| 159 | + |
| 160 | + /* |
| 161 | + * Setting a larger value of PMCR.N should not modify the field, and |
| 162 | + * return a success. |
| 163 | + */ |
| 164 | + set_pmcr_n(&pmcr, pmcr_n); |
| 165 | + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr); |
| 166 | + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr); |
| 167 | + |
| 168 | + if (expect_fail) |
| 169 | + TEST_ASSERT(pmcr_orig == pmcr, |
| 170 | + "PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx\n", |
| 171 | + pmcr, pmcr_n); |
| 172 | + else |
| 173 | + TEST_ASSERT(pmcr_n == get_pmcr_n(pmcr), |
| 174 | + "Failed to update PMCR.N to %lu (received: %lu)\n", |
| 175 | + pmcr_n, get_pmcr_n(pmcr)); |
| 176 | +} |
| 177 | + |
| 178 | +/* |
| 179 | + * Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n, |
| 180 | + * and run the test. |
| 181 | + */ |
| 182 | +static void run_test(uint64_t pmcr_n) |
| 183 | +{ |
| 184 | + uint64_t sp; |
| 185 | + struct kvm_vcpu *vcpu; |
| 186 | + struct kvm_vcpu_init init; |
| 187 | + |
| 188 | + pr_debug("Test with pmcr_n %lu\n", pmcr_n); |
| 189 | + |
| 190 | + test_create_vpmu_vm_with_pmcr_n(pmcr_n, false); |
| 191 | + vcpu = vpmu_vm.vcpu; |
| 192 | + |
| 193 | + /* Save the initial sp to restore them later to run the guest again */ |
| 194 | + vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp); |
| 195 | + |
| 196 | + run_vcpu(vcpu, pmcr_n); |
| 197 | + |
| 198 | + /* |
| 199 | + * Reset and re-initialize the vCPU, and run the guest code again to |
| 200 | + * check if PMCR_EL0.N is preserved. |
| 201 | + */ |
| 202 | + vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init); |
| 203 | + init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3); |
| 204 | + aarch64_vcpu_setup(vcpu, &init); |
| 205 | + vcpu_init_descriptor_tables(vcpu); |
| 206 | + vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), sp); |
| 207 | + vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); |
| 208 | + |
| 209 | + run_vcpu(vcpu, pmcr_n); |
| 210 | + |
| 211 | + destroy_vpmu_vm(); |
| 212 | +} |
| 213 | + |
| 214 | +/* |
| 215 | + * Create a guest with one vCPU, and attempt to set the PMCR_EL0.N for |
| 216 | + * the vCPU to @pmcr_n, which is larger than the host value. |
| 217 | + * The attempt should fail as @pmcr_n is too big to set for the vCPU. |
| 218 | + */ |
| 219 | +static void run_error_test(uint64_t pmcr_n) |
| 220 | +{ |
| 221 | + pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n); |
| 222 | + |
| 223 | + test_create_vpmu_vm_with_pmcr_n(pmcr_n, true); |
| 224 | + destroy_vpmu_vm(); |
| 225 | +} |
| 226 | + |
| 227 | +/* |
| 228 | + * Return the default number of implemented PMU event counters excluding |
| 229 | + * the cycle counter (i.e. PMCR_EL0.N value) for the guest. |
| 230 | + */ |
| 231 | +static uint64_t get_pmcr_n_limit(void) |
| 232 | +{ |
| 233 | + uint64_t pmcr; |
| 234 | + |
| 235 | + create_vpmu_vm(guest_code); |
| 236 | + vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr); |
| 237 | + destroy_vpmu_vm(); |
| 238 | + return get_pmcr_n(pmcr); |
| 239 | +} |
| 240 | + |
| 241 | +int main(void) |
| 242 | +{ |
| 243 | + uint64_t i, pmcr_n; |
| 244 | + |
| 245 | + TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3)); |
| 246 | + |
| 247 | + pmcr_n = get_pmcr_n_limit(); |
| 248 | + for (i = 0; i <= pmcr_n; i++) |
| 249 | + run_test(i); |
| 250 | + |
| 251 | + for (i = pmcr_n + 1; i < ARMV8_PMU_MAX_COUNTERS; i++) |
| 252 | + run_error_test(i); |
| 253 | + |
| 254 | + return 0; |
| 255 | +} |
0 commit comments