@@ -23,233 +23,204 @@ unsigned int kvm_arm_vmid_bits;
23
23
24
24
unsigned int kvm_host_sve_max_vl ;
25
25
26
- /*
27
- * Set trap register values based on features in ID_AA64PFR0.
28
- */
29
- static void pvm_init_traps_aa64pfr0 (struct kvm_vcpu * vcpu )
26
+ static void pkvm_vcpu_reset_hcr (struct kvm_vcpu * vcpu )
30
27
{
31
- const u64 feature_ids = pvm_read_id_reg (vcpu , SYS_ID_AA64PFR0_EL1 );
32
- u64 hcr_set = HCR_RW ;
33
- u64 hcr_clear = 0 ;
34
- u64 cptr_set = 0 ;
35
- u64 cptr_clear = 0 ;
36
-
37
- /* Protected KVM does not support AArch32 guests. */
38
- BUILD_BUG_ON (FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_EL0 ),
39
- PVM_ID_AA64PFR0_ALLOW ) != ID_AA64PFR0_EL1_EL0_IMP );
40
- BUILD_BUG_ON (FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_EL1 ),
41
- PVM_ID_AA64PFR0_ALLOW ) != ID_AA64PFR0_EL1_EL1_IMP );
42
-
43
- /*
44
- * Linux guests assume support for floating-point and Advanced SIMD. Do
45
- * not change the trapping behavior for these from the KVM default.
46
- */
47
- BUILD_BUG_ON (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_FP ),
48
- PVM_ID_AA64PFR0_ALLOW ));
49
- BUILD_BUG_ON (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_AdvSIMD ),
50
- PVM_ID_AA64PFR0_ALLOW ));
28
+ vcpu -> arch .hcr_el2 = HCR_GUEST_FLAGS ;
51
29
52
30
if (has_hvhe ())
53
- hcr_set |= HCR_E2H ;
31
+ vcpu -> arch . hcr_el2 |= HCR_E2H ;
54
32
55
- /* Trap RAS unless all current versions are supported */
56
- if ( FIELD_GET ( ARM64_FEATURE_MASK ( ID_AA64PFR0_EL1_RAS ), feature_ids ) <
57
- ID_AA64PFR0_EL1_RAS_V1P1 ) {
58
- hcr_set |= HCR_TERR | HCR_TEA ;
59
- hcr_clear |= HCR_FIEN ;
33
+ if ( cpus_have_final_cap ( ARM64_HAS_RAS_EXTN )) {
34
+ /* route synchronous external abort exceptions to EL2 */
35
+ vcpu -> arch . hcr_el2 |= HCR_TEA ;
36
+ /* trap error record accesses */
37
+ vcpu -> arch . hcr_el2 |= HCR_TERR ;
60
38
}
61
39
62
- /* Trap AMU */
63
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_AMU ), feature_ids )) {
64
- hcr_clear |= HCR_AMVOFFEN ;
65
- cptr_set |= CPTR_EL2_TAM ;
66
- }
40
+ if (cpus_have_final_cap (ARM64_HAS_STAGE2_FWB ))
41
+ vcpu -> arch .hcr_el2 |= HCR_FWB ;
67
42
68
- /* Trap SVE */
69
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_SVE ), feature_ids )) {
70
- if (has_hvhe ())
71
- cptr_clear |= CPACR_ELx_ZEN ;
72
- else
73
- cptr_set |= CPTR_EL2_TZ ;
74
- }
43
+ if (cpus_have_final_cap (ARM64_HAS_EVT ) &&
44
+ !cpus_have_final_cap (ARM64_MISMATCHED_CACHE_TYPE ))
45
+ vcpu -> arch .hcr_el2 |= HCR_TID4 ;
46
+ else
47
+ vcpu -> arch .hcr_el2 |= HCR_TID2 ;
75
48
76
- vcpu -> arch .hcr_el2 |= hcr_set ;
77
- vcpu -> arch .hcr_el2 &= ~hcr_clear ;
78
- vcpu -> arch .cptr_el2 |= cptr_set ;
79
- vcpu -> arch .cptr_el2 &= ~cptr_clear ;
49
+ if (vcpu_has_ptrauth (vcpu ))
50
+ vcpu -> arch .hcr_el2 |= (HCR_API | HCR_APK );
80
51
}
81
52
82
- /*
83
- * Set trap register values based on features in ID_AA64PFR1.
84
- */
85
- static void pvm_init_traps_aa64pfr1 (struct kvm_vcpu * vcpu )
53
+ static void pvm_init_traps_hcr (struct kvm_vcpu * vcpu )
86
54
{
87
- const u64 feature_ids = pvm_read_id_reg (vcpu , SYS_ID_AA64PFR1_EL1 );
88
- u64 hcr_set = 0 ;
89
- u64 hcr_clear = 0 ;
55
+ const u64 id_aa64pfr0 = pvm_read_id_reg (vcpu , SYS_ID_AA64PFR0_EL1 );
56
+ const u64 id_aa64pfr1 = pvm_read_id_reg (vcpu , SYS_ID_AA64PFR1_EL1 );
57
+ const u64 id_aa64mmfr1 = pvm_read_id_reg (vcpu , SYS_ID_AA64MMFR1_EL1 );
58
+ u64 val = vcpu -> arch .hcr_el2 ;
59
+
60
+ /* No support for AArch32. */
61
+ val |= HCR_RW ;
62
+
63
+ if (has_hvhe ())
64
+ val |= HCR_E2H ;
65
+
66
+ /*
67
+ * Always trap:
68
+ * - Feature id registers: to control features exposed to guests
69
+ * - Implementation-defined features
70
+ */
71
+ val |= HCR_TACR | HCR_TIDCP | HCR_TID3 | HCR_TID1 ;
72
+
73
+ /* Trap RAS unless all current versions are supported */
74
+ if (FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_RAS ), id_aa64pfr0 ) <
75
+ ID_AA64PFR0_EL1_RAS_V1P1 ) {
76
+ val |= HCR_TERR | HCR_TEA ;
77
+ val &= ~(HCR_FIEN );
78
+ }
79
+
80
+ /* Trap AMU */
81
+ if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_AMU ), id_aa64pfr0 ))
82
+ val &= ~(HCR_AMVOFFEN );
90
83
91
84
/* Memory Tagging: Trap and Treat as Untagged if not supported. */
92
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR1_EL1_MTE ), feature_ids )) {
93
- hcr_set |= HCR_TID5 ;
94
- hcr_clear |= HCR_DCT | HCR_ATA ;
85
+ if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR1_EL1_MTE ), id_aa64pfr1 )) {
86
+ val |= HCR_TID5 ;
87
+ val &= ~( HCR_DCT | HCR_ATA ) ;
95
88
}
96
89
97
- vcpu -> arch .hcr_el2 |= hcr_set ;
98
- vcpu -> arch .hcr_el2 &= ~hcr_clear ;
90
+ /* Trap LOR */
91
+ if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64MMFR1_EL1_LO ), id_aa64mmfr1 ))
92
+ val |= HCR_TLOR ;
93
+
94
+ vcpu -> arch .hcr_el2 = val ;
99
95
}
100
96
101
- /*
102
- * Set trap register values based on features in ID_AA64DFR0.
103
- */
104
- static void pvm_init_traps_aa64dfr0 (struct kvm_vcpu * vcpu )
97
+ static void pvm_init_traps_cptr (struct kvm_vcpu * vcpu )
105
98
{
106
- const u64 feature_ids = pvm_read_id_reg (vcpu , SYS_ID_AA64DFR0_EL1 );
107
- u64 mdcr_set = 0 ;
108
- u64 mdcr_clear = 0 ;
109
- u64 cptr_set = 0 ;
99
+ const u64 id_aa64pfr0 = pvm_read_id_reg (vcpu , SYS_ID_AA64PFR0_EL1 );
100
+ const u64 id_aa64pfr1 = pvm_read_id_reg ( vcpu , SYS_ID_AA64PFR1_EL1 ) ;
101
+ const u64 id_aa64dfr0 = pvm_read_id_reg ( vcpu , SYS_ID_AA64DFR0_EL1 ) ;
102
+ u64 val = vcpu -> arch . cptr_el2 ;
110
103
111
- /* Trap/constrain PMU */
112
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_PMUVer ), feature_ids )) {
113
- mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR ;
114
- mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
115
- MDCR_EL2_HPMN_MASK ;
104
+ if (!has_hvhe ()) {
105
+ val |= CPTR_NVHE_EL2_RES1 ;
106
+ val &= ~(CPTR_NVHE_EL2_RES0 );
116
107
}
117
108
118
- /* Trap Debug */
119
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_DebugVer ), feature_ids ))
120
- mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE ;
121
-
122
- /* Trap OS Double Lock */
123
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_DoubleLock ), feature_ids ))
124
- mdcr_set |= MDCR_EL2_TDOSA ;
109
+ /* Trap AMU */
110
+ if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_AMU ), id_aa64pfr0 ))
111
+ val |= CPTR_EL2_TAM ;
125
112
126
- /* Trap SPE */
127
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_PMSVer ), feature_ids )) {
128
- mdcr_set |= MDCR_EL2_TPMS ;
129
- mdcr_clear |= MDCR_EL2_E2PB_MASK ;
113
+ /* Trap SVE */
114
+ if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_SVE ), id_aa64pfr0 )) {
115
+ if (has_hvhe ())
116
+ val &= ~(CPACR_ELx_ZEN );
117
+ else
118
+ val |= CPTR_EL2_TZ ;
130
119
}
131
120
132
- /* Trap Trace Filter */
133
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_TraceFilt ), feature_ids ))
134
- mdcr_set |= MDCR_EL2_TTRF ;
121
+ /* No SME support in KVM. */
122
+ BUG_ON (FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR1_EL1_SME ), id_aa64pfr1 ));
123
+ if (has_hvhe ())
124
+ val &= ~(CPACR_ELx_SMEN );
125
+ else
126
+ val |= CPTR_EL2_TSM ;
135
127
136
128
/* Trap Trace */
137
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_TraceVer ), feature_ids )) {
129
+ if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_TraceVer ), id_aa64dfr0 )) {
138
130
if (has_hvhe ())
139
- cptr_set |= CPACR_EL1_TTA ;
131
+ val |= CPACR_EL1_TTA ;
140
132
else
141
- cptr_set |= CPTR_EL2_TTA ;
133
+ val |= CPTR_EL2_TTA ;
142
134
}
143
135
144
- /* Trap External Trace */
145
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_ExtTrcBuff ), feature_ids ))
146
- mdcr_clear |= MDCR_EL2_E2TB_MASK ;
147
-
148
- vcpu -> arch .mdcr_el2 |= mdcr_set ;
149
- vcpu -> arch .mdcr_el2 &= ~mdcr_clear ;
150
- vcpu -> arch .cptr_el2 |= cptr_set ;
151
- }
152
-
153
- /*
154
- * Set trap register values based on features in ID_AA64MMFR0.
155
- */
156
- static void pvm_init_traps_aa64mmfr0 (struct kvm_vcpu * vcpu )
157
- {
158
- const u64 feature_ids = pvm_read_id_reg (vcpu , SYS_ID_AA64MMFR0_EL1 );
159
- u64 mdcr_set = 0 ;
160
-
161
- /* Trap Debug Communications Channel registers */
162
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64MMFR0_EL1_FGT ), feature_ids ))
163
- mdcr_set |= MDCR_EL2_TDCC ;
164
-
165
- vcpu -> arch .mdcr_el2 |= mdcr_set ;
136
+ vcpu -> arch .cptr_el2 = val ;
166
137
}
167
138
168
- /*
169
- * Set trap register values based on features in ID_AA64MMFR1.
170
- */
171
- static void pvm_init_traps_aa64mmfr1 (struct kvm_vcpu * vcpu )
172
- {
173
- const u64 feature_ids = pvm_read_id_reg (vcpu , SYS_ID_AA64MMFR1_EL1 );
174
- u64 hcr_set = 0 ;
175
-
176
- /* Trap LOR */
177
- if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64MMFR1_EL1_LO ), feature_ids ))
178
- hcr_set |= HCR_TLOR ;
179
-
180
- vcpu -> arch .hcr_el2 |= hcr_set ;
181
- }
182
-
183
- /*
184
- * Set baseline trap register values.
185
- */
186
- static void pvm_init_trap_regs (struct kvm_vcpu * vcpu )
139
+ static void pvm_init_traps_mdcr (struct kvm_vcpu * vcpu )
187
140
{
188
- const u64 hcr_trap_feat_regs = HCR_TID3 ;
189
- const u64 hcr_trap_impdef = HCR_TACR | HCR_TIDCP | HCR_TID1 ;
190
-
191
- /*
192
- * Always trap:
193
- * - Feature id registers: to control features exposed to guests
194
- * - Implementation-defined features
195
- */
196
- vcpu -> arch .hcr_el2 |= hcr_trap_feat_regs | hcr_trap_impdef ;
141
+ const u64 id_aa64dfr0 = pvm_read_id_reg (vcpu , SYS_ID_AA64DFR0_EL1 );
142
+ const u64 id_aa64mmfr0 = pvm_read_id_reg (vcpu , SYS_ID_AA64MMFR0_EL1 );
143
+ u64 val = vcpu -> arch .mdcr_el2 ;
197
144
198
- /* Clear res0 and set res1 bits to trap potential new features. */
199
- vcpu -> arch .hcr_el2 &= ~(HCR_RES0 );
200
- vcpu -> arch .mdcr_el2 &= ~(MDCR_EL2_RES0 );
201
- if (!has_hvhe ()) {
202
- vcpu -> arch .cptr_el2 |= CPTR_NVHE_EL2_RES1 ;
203
- vcpu -> arch .cptr_el2 &= ~(CPTR_NVHE_EL2_RES0 );
145
+ /* Trap/constrain PMU */
146
+ if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_PMUVer ), id_aa64dfr0 )) {
147
+ val |= MDCR_EL2_TPM | MDCR_EL2_TPMCR ;
148
+ val &= ~(MDCR_EL2_HPME | MDCR_EL2_MTPME | MDCR_EL2_HPMN_MASK );
204
149
}
205
- }
206
150
207
- static void pkvm_vcpu_reset_hcr ( struct kvm_vcpu * vcpu )
208
- {
209
- vcpu -> arch . hcr_el2 = HCR_GUEST_FLAGS ;
151
+ /* Trap Debug */
152
+ if (! FIELD_GET ( ARM64_FEATURE_MASK ( ID_AA64DFR0_EL1_DebugVer ), id_aa64dfr0 ))
153
+ val |= MDCR_EL2_TDRA | MDCR_EL2_TDA ;
210
154
211
- if (has_hvhe ())
212
- vcpu -> arch .hcr_el2 |= HCR_E2H ;
155
+ /* Trap OS Double Lock */
156
+ if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_DoubleLock ), id_aa64dfr0 ))
157
+ val |= MDCR_EL2_TDOSA ;
213
158
214
- if (cpus_have_final_cap (ARM64_HAS_RAS_EXTN )) {
215
- /* route synchronous external abort exceptions to EL2 */
216
- vcpu -> arch .hcr_el2 |= HCR_TEA ;
217
- /* trap error record accesses */
218
- vcpu -> arch .hcr_el2 |= HCR_TERR ;
159
+ /* Trap SPE */
160
+ if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_PMSVer ), id_aa64dfr0 )) {
161
+ val |= MDCR_EL2_TPMS ;
162
+ val &= ~MDCR_EL2_E2PB_MASK ;
219
163
}
220
164
221
- if (cpus_have_final_cap (ARM64_HAS_STAGE2_FWB ))
222
- vcpu -> arch .hcr_el2 |= HCR_FWB ;
165
+ /* Trap Trace Filter */
166
+ if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_TraceFilt ), id_aa64dfr0 ))
167
+ val |= MDCR_EL2_TTRF ;
223
168
224
- if (cpus_have_final_cap (ARM64_HAS_EVT ) &&
225
- !cpus_have_final_cap (ARM64_MISMATCHED_CACHE_TYPE ))
226
- vcpu -> arch .hcr_el2 |= HCR_TID4 ;
227
- else
228
- vcpu -> arch .hcr_el2 |= HCR_TID2 ;
169
+ /* Trap External Trace */
170
+ if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64DFR0_EL1_ExtTrcBuff ), id_aa64dfr0 ))
171
+ val |= MDCR_EL2_E2TB_MASK ;
229
172
230
- if (vcpu_has_ptrauth (vcpu ))
231
- vcpu -> arch .hcr_el2 |= (HCR_API | HCR_APK );
173
+ /* Trap Debug Communications Channel registers */
174
+ if (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64MMFR0_EL1_FGT ), id_aa64mmfr0 ))
175
+ val |= MDCR_EL2_TDCC ;
176
+
177
+ vcpu -> arch .mdcr_el2 = val ;
232
178
}
233
179
234
180
/*
235
181
* Initialize trap register values in protected mode.
236
182
*/
237
- static void pkvm_vcpu_init_traps (struct kvm_vcpu * vcpu )
183
+ static void pkvm_vcpu_init_traps (struct pkvm_hyp_vcpu * hyp_vcpu )
238
184
{
185
+ struct kvm_vcpu * vcpu = & hyp_vcpu -> vcpu ;
186
+
239
187
vcpu -> arch .cptr_el2 = kvm_get_reset_cptr_el2 (vcpu );
240
188
vcpu -> arch .mdcr_el2 = 0 ;
241
189
242
190
pkvm_vcpu_reset_hcr (vcpu );
243
191
244
- if ((!vcpu_is_protected ( vcpu )))
192
+ if ((!pkvm_hyp_vcpu_is_protected ( hyp_vcpu )))
245
193
return ;
246
194
247
- pvm_init_trap_regs (vcpu );
248
- pvm_init_traps_aa64pfr0 (vcpu );
249
- pvm_init_traps_aa64pfr1 (vcpu );
250
- pvm_init_traps_aa64dfr0 (vcpu );
251
- pvm_init_traps_aa64mmfr0 (vcpu );
252
- pvm_init_traps_aa64mmfr1 (vcpu );
195
+ /*
196
+ * PAuth is allowed if supported by the system and the vcpu.
197
+ * Properly checking for PAuth requires checking various fields in
198
+ * ID_AA64ISAR1_EL1 and ID_AA64ISAR2_EL1. The way that fixed config
199
+ * is controlled now in pKVM does not easily allow that. This will
200
+ * change later to follow the changes upstream wrt fixed configuration
201
+ * and nested virt.
202
+ */
203
+ BUILD_BUG_ON (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64ISAR1_EL1_GPI ),
204
+ PVM_ID_AA64ISAR1_ALLOW ));
205
+
206
+ /* Protected KVM does not support AArch32 guests. */
207
+ BUILD_BUG_ON (FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_EL0 ),
208
+ PVM_ID_AA64PFR0_ALLOW ) != ID_AA64PFR0_EL1_EL0_IMP );
209
+ BUILD_BUG_ON (FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_EL1 ),
210
+ PVM_ID_AA64PFR0_ALLOW ) != ID_AA64PFR0_EL1_EL1_IMP );
211
+
212
+ /*
213
+ * Linux guests assume support for floating-point and Advanced SIMD. Do
214
+ * not change the trapping behavior for these from the KVM default.
215
+ */
216
+ BUILD_BUG_ON (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_FP ),
217
+ PVM_ID_AA64PFR0_ALLOW ));
218
+ BUILD_BUG_ON (!FIELD_GET (ARM64_FEATURE_MASK (ID_AA64PFR0_EL1_AdvSIMD ),
219
+ PVM_ID_AA64PFR0_ALLOW ));
220
+
221
+ pvm_init_traps_hcr (vcpu );
222
+ pvm_init_traps_cptr (vcpu );
223
+ pvm_init_traps_mdcr (vcpu );
253
224
}
254
225
255
226
/*
@@ -448,7 +419,7 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
448
419
449
420
pkvm_vcpu_init_sve (hyp_vcpu , host_vcpu );
450
421
pkvm_vcpu_init_ptrauth (hyp_vcpu );
451
- pkvm_vcpu_init_traps (& hyp_vcpu -> vcpu );
422
+ pkvm_vcpu_init_traps (hyp_vcpu );
452
423
done :
453
424
if (ret )
454
425
unpin_host_vcpu (host_vcpu );
0 commit comments