@@ -52,9 +52,7 @@ static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
52
52
53
53
static void pvm_init_traps_hcr (struct kvm_vcpu * vcpu )
54
54
{
55
- const u64 id_aa64pfr0 = pvm_read_id_reg (vcpu , SYS_ID_AA64PFR0_EL1 );
56
- const u64 id_aa64pfr1 = pvm_read_id_reg (vcpu , SYS_ID_AA64PFR1_EL1 );
57
- const u64 id_aa64mmfr1 = pvm_read_id_reg (vcpu , SYS_ID_AA64MMFR1_EL1 );
55
+ struct kvm * kvm = vcpu -> kvm ;
58
56
u64 val = vcpu -> arch .hcr_el2 ;
59
57
60
58
/* No support for AArch32. */
@@ -70,62 +68,54 @@ static void pvm_init_traps_hcr(struct kvm_vcpu *vcpu)
70
68
*/
71
69
val |= HCR_TACR | HCR_TIDCP | HCR_TID3 | HCR_TID1 ;
72
70
73
- /* Trap RAS */
74
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_RAS ), id_aa64pfr0 )) {
71
+ if (!kvm_has_feat (kvm , ID_AA64PFR0_EL1 , RAS , IMP )) {
75
72
val |= HCR_TERR | HCR_TEA ;
76
73
val &= ~(HCR_FIEN );
77
74
}
78
75
79
- /* Trap AMU */
80
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_AMU ), id_aa64pfr0 ))
76
+ if (!kvm_has_feat (kvm , ID_AA64PFR0_EL1 , AMU , IMP ))
81
77
val &= ~(HCR_AMVOFFEN );
82
78
83
- /* Memory Tagging: Trap and Treat as Untagged if not supported. */
84
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR1_EL1_MTE ), id_aa64pfr1 )) {
79
+ if (!kvm_has_feat (kvm , ID_AA64PFR1_EL1 , MTE , IMP )) {
85
80
val |= HCR_TID5 ;
86
81
val &= ~(HCR_DCT | HCR_ATA );
87
82
}
88
83
89
- /* Trap LOR */
90
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64MMFR1_EL1_LO ), id_aa64mmfr1 ))
84
+ if (!kvm_has_feat (kvm , ID_AA64MMFR1_EL1 , LO , IMP ))
91
85
val |= HCR_TLOR ;
92
86
93
87
vcpu -> arch .hcr_el2 = val ;
94
88
}
95
89
96
90
static void pvm_init_traps_cptr (struct kvm_vcpu * vcpu )
97
91
{
98
- const u64 id_aa64pfr0 = pvm_read_id_reg (vcpu , SYS_ID_AA64PFR0_EL1 );
99
- const u64 id_aa64pfr1 = pvm_read_id_reg (vcpu , SYS_ID_AA64PFR1_EL1 );
100
- const u64 id_aa64dfr0 = pvm_read_id_reg (vcpu , SYS_ID_AA64DFR0_EL1 );
92
+ struct kvm * kvm = vcpu -> kvm ;
101
93
u64 val = vcpu -> arch .cptr_el2 ;
102
94
103
95
if (!has_hvhe ()) {
104
96
val |= CPTR_NVHE_EL2_RES1 ;
105
97
val &= ~(CPTR_NVHE_EL2_RES0 );
106
98
}
107
99
108
- /* Trap AMU */
109
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_AMU ), id_aa64pfr0 ))
100
+ if (!kvm_has_feat (kvm , ID_AA64PFR0_EL1 , AMU , IMP ))
110
101
val |= CPTR_EL2_TAM ;
111
102
112
- /* Trap SVE */
113
- if (!FIELD_GET ( ARM64_FEATURE_MASK ( ID_AA64PFR0_EL1_SVE ), id_aa64pfr0 )) {
103
+ /* SVE can be disabled by userspace even if supported. */
104
+ if (!vcpu_has_sve ( vcpu )) {
114
105
if (has_hvhe ())
115
106
val &= ~(CPACR_ELx_ZEN );
116
107
else
117
108
val |= CPTR_EL2_TZ ;
118
109
}
119
110
120
111
/* No SME support in KVM. */
121
- BUG_ON (FIELD_GET ( ARM64_FEATURE_MASK ( ID_AA64PFR1_EL1_SME ), id_aa64pfr1 ));
112
+ BUG_ON (kvm_has_feat ( kvm , ID_AA64PFR1_EL1 , SME , IMP ));
122
113
if (has_hvhe ())
123
114
val &= ~(CPACR_ELx_SMEN );
124
115
else
125
116
val |= CPTR_EL2_TSM ;
126
117
127
- /* Trap Trace */
128
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_TraceVer ), id_aa64dfr0 )) {
118
+ if (!kvm_has_feat (kvm , ID_AA64DFR0_EL1 , TraceVer , IMP )) {
129
119
if (has_hvhe ())
130
120
val |= CPACR_EL1_TTA ;
131
121
else
@@ -137,40 +127,33 @@ static void pvm_init_traps_cptr(struct kvm_vcpu *vcpu)
137
127
138
128
static void pvm_init_traps_mdcr (struct kvm_vcpu * vcpu )
139
129
{
140
- const u64 id_aa64dfr0 = pvm_read_id_reg (vcpu , SYS_ID_AA64DFR0_EL1 );
141
- const u64 id_aa64mmfr0 = pvm_read_id_reg (vcpu , SYS_ID_AA64MMFR0_EL1 );
130
+ struct kvm * kvm = vcpu -> kvm ;
142
131
u64 val = vcpu -> arch .mdcr_el2 ;
143
132
144
- /* Trap/constrain PMU */
145
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_PMUVer ), id_aa64dfr0 )) {
133
+ if (!kvm_has_feat (kvm , ID_AA64DFR0_EL1 , PMUVer , IMP )) {
146
134
val |= MDCR_EL2_TPM | MDCR_EL2_TPMCR ;
147
135
val &= ~(MDCR_EL2_HPME | MDCR_EL2_MTPME | MDCR_EL2_HPMN_MASK );
148
136
}
149
137
150
- /* Trap Debug */
151
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_DebugVer ), id_aa64dfr0 ))
138
+ if (!kvm_has_feat (kvm , ID_AA64DFR0_EL1 , DebugVer , IMP ))
152
139
val |= MDCR_EL2_TDRA | MDCR_EL2_TDA ;
153
140
154
- /* Trap OS Double Lock */
155
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_DoubleLock ), id_aa64dfr0 ))
141
+ if (!kvm_has_feat (kvm , ID_AA64DFR0_EL1 , DoubleLock , IMP ))
156
142
val |= MDCR_EL2_TDOSA ;
157
143
158
- /* Trap SPE */
159
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_PMSVer ), id_aa64dfr0 )) {
144
+ if (!kvm_has_feat (kvm , ID_AA64DFR0_EL1 , PMSVer , IMP )) {
160
145
val |= MDCR_EL2_TPMS ;
161
146
val &= ~MDCR_EL2_E2PB_MASK ;
162
147
}
163
148
164
- /* Trap Trace Filter */
165
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_TraceFilt ), id_aa64dfr0 ))
149
+ if (!kvm_has_feat (kvm , ID_AA64DFR0_EL1 , TraceFilt , IMP ))
166
150
val |= MDCR_EL2_TTRF ;
167
151
168
- /* Trap External Trace */
169
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_ExtTrcBuff ), id_aa64dfr0 ))
152
+ if (!kvm_has_feat (kvm , ID_AA64DFR0_EL1 , ExtTrcBuff , IMP ))
170
153
val |= MDCR_EL2_E2TB_MASK ;
171
154
172
155
/* Trap Debug Communications Channel registers */
173
- if (!FIELD_GET ( ARM64_FEATURE_MASK ( ID_AA64MMFR0_EL1_FGT ), id_aa64mmfr0 ))
156
+ if (!kvm_has_feat ( kvm , ID_AA64MMFR0_EL1 , FGT , IMP ))
174
157
val |= MDCR_EL2_TDCC ;
175
158
176
159
vcpu -> arch .mdcr_el2 = val ;
@@ -182,31 +165,24 @@ static void pvm_init_traps_mdcr(struct kvm_vcpu *vcpu)
182
165
*/
183
166
static int pkvm_check_pvm_cpu_features (struct kvm_vcpu * vcpu )
184
167
{
185
- /*
186
- * PAuth is allowed if supported by the system and the vcpu.
187
- * Properly checking for PAuth requires checking various fields in
188
- * ID_AA64ISAR1_EL1 and ID_AA64ISAR2_EL1. The way that fixed config
189
- * is controlled now in pKVM does not easily allow that. This will
190
- * change later to follow the changes upstream wrt fixed configuration
191
- * and nested virt.
192
- */
193
- BUILD_BUG_ON (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64ISAR1_EL1_GPI ),
194
- PVM_ID_AA64ISAR1_ALLOW ));
168
+ struct kvm * kvm = vcpu -> kvm ;
195
169
196
170
/* Protected KVM does not support AArch32 guests. */
197
- BUILD_BUG_ON (FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_EL0 ),
198
- PVM_ID_AA64PFR0_ALLOW ) != ID_AA64PFR0_EL1_EL0_IMP );
199
- BUILD_BUG_ON (FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_EL1 ),
200
- PVM_ID_AA64PFR0_ALLOW ) != ID_AA64PFR0_EL1_EL1_IMP );
171
+ if (kvm_has_feat (kvm , ID_AA64PFR0_EL1 , EL0 , AARCH32 ) ||
172
+ kvm_has_feat (kvm , ID_AA64PFR0_EL1 , EL1 , AARCH32 ))
173
+ return - EINVAL ;
201
174
202
175
/*
203
176
* Linux guests assume support for floating-point and Advanced SIMD. Do
204
177
* not change the trapping behavior for these from the KVM default.
205
178
*/
206
- BUILD_BUG_ON (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_FP ),
207
- PVM_ID_AA64PFR0_ALLOW ));
208
- BUILD_BUG_ON (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_AdvSIMD ),
209
- PVM_ID_AA64PFR0_ALLOW ));
179
+ if (!kvm_has_feat (kvm , ID_AA64PFR0_EL1 , FP , IMP ) ||
180
+ !kvm_has_feat (kvm , ID_AA64PFR0_EL1 , AdvSIMD , IMP ))
181
+ return - EINVAL ;
182
+
183
+ /* No SME support in KVM right now. Check to catch if it changes. */
184
+ if (kvm_has_feat (kvm , ID_AA64PFR1_EL1 , SME , IMP ))
185
+ return - EINVAL ;
210
186
211
187
return 0 ;
212
188
}
0 commit comments