|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
| 2 | +/* |
| 3 | + * Copyright (C) 2021 Google LLC |
| 4 | + * Author: Fuad Tabba <[email protected]> |
| 5 | + */ |
| 6 | + |
| 7 | +#include <linux/kvm_host.h> |
| 8 | +#include <linux/mm.h> |
| 9 | +#include <asm/kvm_fixed_config.h> |
| 10 | +#include <nvhe/sys_regs.h> |
| 11 | +#include <nvhe/trap_handler.h> |
| 12 | + |
| 13 | +/* |
| 14 | + * Set trap register values based on features in ID_AA64PFR0. |
| 15 | + */ |
| 16 | +static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) |
| 17 | +{ |
| 18 | + const u64 feature_ids = get_pvm_id_aa64pfr0(vcpu); |
| 19 | + u64 hcr_set = HCR_RW; |
| 20 | + u64 hcr_clear = 0; |
| 21 | + u64 cptr_set = 0; |
| 22 | + |
| 23 | + /* Protected KVM does not support AArch32 guests. */ |
| 24 | + BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), |
| 25 | + PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY); |
| 26 | + BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), |
| 27 | + PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY); |
| 28 | + |
| 29 | + /* |
| 30 | + * Linux guests assume support for floating-point and Advanced SIMD. Do |
| 31 | + * not change the trapping behavior for these from the KVM default. |
| 32 | + */ |
| 33 | + BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_FP), |
| 34 | + PVM_ID_AA64PFR0_ALLOW)); |
| 35 | + BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD), |
| 36 | + PVM_ID_AA64PFR0_ALLOW)); |
| 37 | + |
| 38 | + /* Trap RAS unless all current versions are supported */ |
| 39 | + if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), feature_ids) < |
| 40 | + ID_AA64PFR0_RAS_V1P1) { |
| 41 | + hcr_set |= HCR_TERR | HCR_TEA; |
| 42 | + hcr_clear |= HCR_FIEN; |
| 43 | + } |
| 44 | + |
| 45 | + /* Trap AMU */ |
| 46 | + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_AMU), feature_ids)) { |
| 47 | + hcr_clear |= HCR_AMVOFFEN; |
| 48 | + cptr_set |= CPTR_EL2_TAM; |
| 49 | + } |
| 50 | + |
| 51 | + /* Trap SVE */ |
| 52 | + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_SVE), feature_ids)) |
| 53 | + cptr_set |= CPTR_EL2_TZ; |
| 54 | + |
| 55 | + vcpu->arch.hcr_el2 |= hcr_set; |
| 56 | + vcpu->arch.hcr_el2 &= ~hcr_clear; |
| 57 | + vcpu->arch.cptr_el2 |= cptr_set; |
| 58 | +} |
| 59 | + |
| 60 | +/* |
| 61 | + * Set trap register values based on features in ID_AA64PFR1. |
| 62 | + */ |
| 63 | +static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu) |
| 64 | +{ |
| 65 | + const u64 feature_ids = get_pvm_id_aa64pfr1(vcpu); |
| 66 | + u64 hcr_set = 0; |
| 67 | + u64 hcr_clear = 0; |
| 68 | + |
| 69 | + /* Memory Tagging: Trap and Treat as Untagged if not supported. */ |
| 70 | + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), feature_ids)) { |
| 71 | + hcr_set |= HCR_TID5; |
| 72 | + hcr_clear |= HCR_DCT | HCR_ATA; |
| 73 | + } |
| 74 | + |
| 75 | + vcpu->arch.hcr_el2 |= hcr_set; |
| 76 | + vcpu->arch.hcr_el2 &= ~hcr_clear; |
| 77 | +} |
| 78 | + |
| 79 | +/* |
| 80 | + * Set trap register values based on features in ID_AA64DFR0. |
| 81 | + */ |
| 82 | +static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) |
| 83 | +{ |
| 84 | + const u64 feature_ids = get_pvm_id_aa64dfr0(vcpu); |
| 85 | + u64 mdcr_set = 0; |
| 86 | + u64 mdcr_clear = 0; |
| 87 | + u64 cptr_set = 0; |
| 88 | + |
| 89 | + /* Trap/constrain PMU */ |
| 90 | + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), feature_ids)) { |
| 91 | + mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR; |
| 92 | + mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME | |
| 93 | + MDCR_EL2_HPMN_MASK; |
| 94 | + } |
| 95 | + |
| 96 | + /* Trap Debug */ |
| 97 | + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), feature_ids)) |
| 98 | + mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE; |
| 99 | + |
| 100 | + /* Trap OS Double Lock */ |
| 101 | + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DOUBLELOCK), feature_ids)) |
| 102 | + mdcr_set |= MDCR_EL2_TDOSA; |
| 103 | + |
| 104 | + /* Trap SPE */ |
| 105 | + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER), feature_ids)) { |
| 106 | + mdcr_set |= MDCR_EL2_TPMS; |
| 107 | + mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; |
| 108 | + } |
| 109 | + |
| 110 | + /* Trap Trace Filter */ |
| 111 | + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACE_FILT), feature_ids)) |
| 112 | + mdcr_set |= MDCR_EL2_TTRF; |
| 113 | + |
| 114 | + /* Trap Trace */ |
| 115 | + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACEVER), feature_ids)) |
| 116 | + cptr_set |= CPTR_EL2_TTA; |
| 117 | + |
| 118 | + vcpu->arch.mdcr_el2 |= mdcr_set; |
| 119 | + vcpu->arch.mdcr_el2 &= ~mdcr_clear; |
| 120 | + vcpu->arch.cptr_el2 |= cptr_set; |
| 121 | +} |
| 122 | + |
| 123 | +/* |
| 124 | + * Set trap register values based on features in ID_AA64MMFR0. |
| 125 | + */ |
| 126 | +static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu) |
| 127 | +{ |
| 128 | + const u64 feature_ids = get_pvm_id_aa64mmfr0(vcpu); |
| 129 | + u64 mdcr_set = 0; |
| 130 | + |
| 131 | + /* Trap Debug Communications Channel registers */ |
| 132 | + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_FGT), feature_ids)) |
| 133 | + mdcr_set |= MDCR_EL2_TDCC; |
| 134 | + |
| 135 | + vcpu->arch.mdcr_el2 |= mdcr_set; |
| 136 | +} |
| 137 | + |
| 138 | +/* |
| 139 | + * Set trap register values based on features in ID_AA64MMFR1. |
| 140 | + */ |
| 141 | +static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu) |
| 142 | +{ |
| 143 | + const u64 feature_ids = get_pvm_id_aa64mmfr1(vcpu); |
| 144 | + u64 hcr_set = 0; |
| 145 | + |
| 146 | + /* Trap LOR */ |
| 147 | + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_LOR), feature_ids)) |
| 148 | + hcr_set |= HCR_TLOR; |
| 149 | + |
| 150 | + vcpu->arch.hcr_el2 |= hcr_set; |
| 151 | +} |
| 152 | + |
| 153 | +/* |
| 154 | + * Set baseline trap register values. |
| 155 | + */ |
| 156 | +static void pvm_init_trap_regs(struct kvm_vcpu *vcpu) |
| 157 | +{ |
| 158 | + const u64 hcr_trap_feat_regs = HCR_TID3; |
| 159 | + const u64 hcr_trap_impdef = HCR_TACR | HCR_TIDCP | HCR_TID1; |
| 160 | + |
| 161 | + /* |
| 162 | + * Always trap: |
| 163 | + * - Feature id registers: to control features exposed to guests |
| 164 | + * - Implementation-defined features |
| 165 | + */ |
| 166 | + vcpu->arch.hcr_el2 |= hcr_trap_feat_regs | hcr_trap_impdef; |
| 167 | + |
| 168 | + /* Clear res0 and set res1 bits to trap potential new features. */ |
| 169 | + vcpu->arch.hcr_el2 &= ~(HCR_RES0); |
| 170 | + vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0); |
| 171 | + vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1; |
| 172 | + vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0); |
| 173 | +} |
| 174 | + |
| 175 | +/* |
| 176 | + * Initialize trap register values for protected VMs. |
| 177 | + */ |
| 178 | +void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu) |
| 179 | +{ |
| 180 | + pvm_init_trap_regs(vcpu); |
| 181 | + pvm_init_traps_aa64pfr0(vcpu); |
| 182 | + pvm_init_traps_aa64pfr1(vcpu); |
| 183 | + pvm_init_traps_aa64dfr0(vcpu); |
| 184 | + pvm_init_traps_aa64mmfr0(vcpu); |
| 185 | + pvm_init_traps_aa64mmfr1(vcpu); |
| 186 | +} |
0 commit comments