|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | + |
| 3 | +// Check that, on a GICv3 system, not configuring GICv3 correctly |
| 4 | +// results in all of the sysregs generating an UNDEF exception. |
| 5 | + |
| 6 | +#include <test_util.h> |
| 7 | +#include <kvm_util.h> |
| 8 | +#include <processor.h> |
| 9 | + |
| 10 | +static volatile bool handled; |
| 11 | + |
| 12 | +#define __check_sr_read(r) \ |
| 13 | + ({ \ |
| 14 | + uint64_t val; \ |
| 15 | + \ |
| 16 | + handled = false; \ |
| 17 | + dsb(sy); \ |
| 18 | + val = read_sysreg_s(SYS_ ## r); \ |
| 19 | + val; \ |
| 20 | + }) |
| 21 | + |
| 22 | +#define __check_sr_write(r) \ |
| 23 | + do { \ |
| 24 | + handled = false; \ |
| 25 | + dsb(sy); \ |
| 26 | + write_sysreg_s(0, SYS_ ## r); \ |
| 27 | + isb(); \ |
| 28 | + } while(0) |
| 29 | + |
| 30 | +/* Fatal checks */ |
| 31 | +#define check_sr_read(r) \ |
| 32 | + do { \ |
| 33 | + __check_sr_read(r); \ |
| 34 | + __GUEST_ASSERT(handled, #r " no read trap"); \ |
| 35 | + } while(0) |
| 36 | + |
| 37 | +#define check_sr_write(r) \ |
| 38 | + do { \ |
| 39 | + __check_sr_write(r); \ |
| 40 | + __GUEST_ASSERT(handled, #r " no write trap"); \ |
| 41 | + } while(0) |
| 42 | + |
| 43 | +#define check_sr_rw(r) \ |
| 44 | + do { \ |
| 45 | + check_sr_read(r); \ |
| 46 | + check_sr_write(r); \ |
| 47 | + } while(0) |
| 48 | + |
| 49 | +static void guest_code(void) |
| 50 | +{ |
| 51 | + uint64_t val; |
| 52 | + |
| 53 | + /* |
| 54 | + * Check that we advertise that ID_AA64PFR0_EL1.GIC == 0, having |
| 55 | + * hidden the feature at runtime without any other userspace action. |
| 56 | + */ |
| 57 | + __GUEST_ASSERT(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), |
| 58 | + read_sysreg(id_aa64pfr0_el1)) == 0, |
| 59 | + "GICv3 wrongly advertised"); |
| 60 | + |
| 61 | + /* |
| 62 | + * Access all GICv3 registers, and fail if we don't get an UNDEF. |
| 63 | + * Note that we happily access all the APxRn registers without |
| 64 | + * checking their existance, as all we want to see is a failure. |
| 65 | + */ |
| 66 | + check_sr_rw(ICC_PMR_EL1); |
| 67 | + check_sr_read(ICC_IAR0_EL1); |
| 68 | + check_sr_write(ICC_EOIR0_EL1); |
| 69 | + check_sr_rw(ICC_HPPIR0_EL1); |
| 70 | + check_sr_rw(ICC_BPR0_EL1); |
| 71 | + check_sr_rw(ICC_AP0R0_EL1); |
| 72 | + check_sr_rw(ICC_AP0R1_EL1); |
| 73 | + check_sr_rw(ICC_AP0R2_EL1); |
| 74 | + check_sr_rw(ICC_AP0R3_EL1); |
| 75 | + check_sr_rw(ICC_AP1R0_EL1); |
| 76 | + check_sr_rw(ICC_AP1R1_EL1); |
| 77 | + check_sr_rw(ICC_AP1R2_EL1); |
| 78 | + check_sr_rw(ICC_AP1R3_EL1); |
| 79 | + check_sr_write(ICC_DIR_EL1); |
| 80 | + check_sr_read(ICC_RPR_EL1); |
| 81 | + check_sr_write(ICC_SGI1R_EL1); |
| 82 | + check_sr_write(ICC_ASGI1R_EL1); |
| 83 | + check_sr_write(ICC_SGI0R_EL1); |
| 84 | + check_sr_read(ICC_IAR1_EL1); |
| 85 | + check_sr_write(ICC_EOIR1_EL1); |
| 86 | + check_sr_rw(ICC_HPPIR1_EL1); |
| 87 | + check_sr_rw(ICC_BPR1_EL1); |
| 88 | + check_sr_rw(ICC_CTLR_EL1); |
| 89 | + check_sr_rw(ICC_IGRPEN0_EL1); |
| 90 | + check_sr_rw(ICC_IGRPEN1_EL1); |
| 91 | + |
| 92 | + /* |
| 93 | + * ICC_SRE_EL1 may not be trappable, as ICC_SRE_EL2.Enable can |
| 94 | + * be RAO/WI. Engage in non-fatal accesses, starting with a |
| 95 | + * write of 0 to try and disable SRE, and let's see if it |
| 96 | + * sticks. |
| 97 | + */ |
| 98 | + __check_sr_write(ICC_SRE_EL1); |
| 99 | + if (!handled) |
| 100 | + GUEST_PRINTF("ICC_SRE_EL1 write not trapping (OK)\n"); |
| 101 | + |
| 102 | + val = __check_sr_read(ICC_SRE_EL1); |
| 103 | + if (!handled) { |
| 104 | + __GUEST_ASSERT((val & BIT(0)), |
| 105 | + "ICC_SRE_EL1 not trapped but ICC_SRE_EL1.SRE not set\n"); |
| 106 | + GUEST_PRINTF("ICC_SRE_EL1 read not trapping (OK)\n"); |
| 107 | + } |
| 108 | + |
| 109 | + GUEST_DONE(); |
| 110 | +} |
| 111 | + |
| 112 | +static void guest_undef_handler(struct ex_regs *regs) |
| 113 | +{ |
| 114 | + /* Success, we've gracefully exploded! */ |
| 115 | + handled = true; |
| 116 | + regs->pc += 4; |
| 117 | +} |
| 118 | + |
| 119 | +static void test_run_vcpu(struct kvm_vcpu *vcpu) |
| 120 | +{ |
| 121 | + struct ucall uc; |
| 122 | + |
| 123 | + do { |
| 124 | + vcpu_run(vcpu); |
| 125 | + |
| 126 | + switch (get_ucall(vcpu, &uc)) { |
| 127 | + case UCALL_ABORT: |
| 128 | + REPORT_GUEST_ASSERT(uc); |
| 129 | + break; |
| 130 | + case UCALL_PRINTF: |
| 131 | + printf("%s", uc.buffer); |
| 132 | + break; |
| 133 | + case UCALL_DONE: |
| 134 | + break; |
| 135 | + default: |
| 136 | + TEST_FAIL("Unknown ucall %lu", uc.cmd); |
| 137 | + } |
| 138 | + } while (uc.cmd != UCALL_DONE); |
| 139 | +} |
| 140 | + |
| 141 | +static void test_guest_no_gicv3(void) |
| 142 | +{ |
| 143 | + struct kvm_vcpu *vcpu; |
| 144 | + struct kvm_vm *vm; |
| 145 | + |
| 146 | + /* Create a VM without a GICv3 */ |
| 147 | + vm = vm_create_with_one_vcpu(&vcpu, guest_code); |
| 148 | + |
| 149 | + vm_init_descriptor_tables(vm); |
| 150 | + vcpu_init_descriptor_tables(vcpu); |
| 151 | + |
| 152 | + vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, |
| 153 | + ESR_EC_UNKNOWN, guest_undef_handler); |
| 154 | + |
| 155 | + test_run_vcpu(vcpu); |
| 156 | + |
| 157 | + kvm_vm_free(vm); |
| 158 | +} |
| 159 | + |
| 160 | +int main(int argc, char *argv[]) |
| 161 | +{ |
| 162 | + struct kvm_vcpu *vcpu; |
| 163 | + struct kvm_vm *vm; |
| 164 | + uint64_t pfr0; |
| 165 | + |
| 166 | + vm = vm_create_with_one_vcpu(&vcpu, NULL); |
| 167 | + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &pfr0); |
| 168 | + __TEST_REQUIRE(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), pfr0), |
| 169 | + "GICv3 not supported."); |
| 170 | + kvm_vm_free(vm); |
| 171 | + |
| 172 | + test_guest_no_gicv3(); |
| 173 | + |
| 174 | + return 0; |
| 175 | +} |
0 commit comments