@@ -79,14 +79,48 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
79
79
clr |= ~hfg & __ ## reg ## _nMASK; \
80
80
} while(0)
81
81
82
- #define update_fgt_traps_cs (vcpu , reg , clr , set ) \
82
+ #define reg_to_fgt_group_id (reg ) \
83
+ ({ \
84
+ enum fgt_group_id id; \
85
+ switch(reg) { \
86
+ case HFGRTR_EL2: \
87
+ case HFGWTR_EL2: \
88
+ id = HFGxTR_GROUP; \
89
+ break; \
90
+ case HFGITR_EL2: \
91
+ id = HFGITR_GROUP; \
92
+ break; \
93
+ case HDFGRTR_EL2: \
94
+ case HDFGWTR_EL2: \
95
+ id = HDFGRTR_GROUP; \
96
+ break; \
97
+ case HAFGRTR_EL2: \
98
+ id = HAFGRTR_GROUP; \
99
+ break; \
100
+ default: \
101
+ BUILD_BUG_ON(1); \
102
+ } \
103
+ \
104
+ id; \
105
+ })
106
+
107
+ #define compute_undef_clr_set (vcpu , kvm , reg , clr , set ) \
108
+ do { \
109
+ u64 hfg = kvm->arch.fgu[reg_to_fgt_group_id(reg)]; \
110
+ set |= hfg & __ ## reg ## _MASK; \
111
+ clr |= hfg & __ ## reg ## _nMASK; \
112
+ } while(0)
113
+
114
+ #define update_fgt_traps_cs (hctxt , vcpu , kvm , reg , clr , set ) \
83
115
do { \
84
- struct kvm_cpu_context *hctxt = \
85
- &this_cpu_ptr(&kvm_host_data)->host_ctxt; \
86
116
u64 c = 0, s = 0; \
87
117
\
88
118
ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
89
- compute_clr_set(vcpu, reg, c, s); \
119
+ if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) \
120
+ compute_clr_set(vcpu, reg, c, s); \
121
+ \
122
+ compute_undef_clr_set(vcpu, kvm, reg, c, s); \
123
+ \
90
124
s |= set; \
91
125
c |= clr; \
92
126
if (c || s) { \
@@ -97,8 +131,8 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
97
131
} \
98
132
} while(0)
99
133
100
- #define update_fgt_traps (vcpu , reg ) \
101
- update_fgt_traps_cs(vcpu, reg, 0, 0)
134
+ #define update_fgt_traps (hctxt , vcpu , kvm , reg ) \
135
+ update_fgt_traps_cs(hctxt, vcpu, kvm , reg, 0, 0)
102
136
103
137
/*
104
138
* Validate the fine grain trap masks.
@@ -122,6 +156,7 @@ static inline bool cpu_has_amu(void)
122
156
static inline void __activate_traps_hfgxtr (struct kvm_vcpu * vcpu )
123
157
{
124
158
struct kvm_cpu_context * hctxt = & this_cpu_ptr (& kvm_host_data )-> host_ctxt ;
159
+ struct kvm * kvm = kern_hyp_va (vcpu -> kvm );
125
160
u64 r_clr = 0 , w_clr = 0 , r_set = 0 , w_set = 0 , tmp ;
126
161
u64 r_val , w_val ;
127
162
@@ -157,6 +192,9 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
157
192
compute_clr_set (vcpu , HFGWTR_EL2 , w_clr , w_set );
158
193
}
159
194
195
+ compute_undef_clr_set (vcpu , kvm , HFGRTR_EL2 , r_clr , r_set );
196
+ compute_undef_clr_set (vcpu , kvm , HFGWTR_EL2 , w_clr , w_set );
197
+
160
198
/* The default to trap everything not handled or supported in KVM. */
161
199
tmp = HFGxTR_EL2_nAMAIR2_EL1 | HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nS2POR_EL1 |
162
200
HFGxTR_EL2_nPOR_EL1 | HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nACCDATA_EL1 ;
@@ -172,36 +210,39 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
172
210
write_sysreg_s (r_val , SYS_HFGRTR_EL2 );
173
211
write_sysreg_s (w_val , SYS_HFGWTR_EL2 );
174
212
175
- if (!vcpu_has_nv (vcpu ) || is_hyp_ctxt (vcpu ))
176
- return ;
177
-
178
- update_fgt_traps (vcpu , HFGITR_EL2 );
179
- update_fgt_traps (vcpu , HDFGRTR_EL2 );
180
- update_fgt_traps (vcpu , HDFGWTR_EL2 );
213
+ update_fgt_traps (hctxt , vcpu , kvm , HFGITR_EL2 );
214
+ update_fgt_traps (hctxt , vcpu , kvm , HDFGRTR_EL2 );
215
+ update_fgt_traps (hctxt , vcpu , kvm , HDFGWTR_EL2 );
181
216
182
217
if (cpu_has_amu ())
183
- update_fgt_traps (vcpu , HAFGRTR_EL2 );
218
+ update_fgt_traps (hctxt , vcpu , kvm , HAFGRTR_EL2 );
184
219
}
185
220
221
+ #define __deactivate_fgt (htcxt , vcpu , kvm , reg ) \
222
+ do { \
223
+ if ((vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) || \
224
+ kvm->arch.fgu[reg_to_fgt_group_id(reg)]) \
225
+ write_sysreg_s(ctxt_sys_reg(hctxt, reg), \
226
+ SYS_ ## reg); \
227
+ } while(0)
228
+
186
229
static inline void __deactivate_traps_hfgxtr (struct kvm_vcpu * vcpu )
187
230
{
188
231
struct kvm_cpu_context * hctxt = & this_cpu_ptr (& kvm_host_data )-> host_ctxt ;
232
+ struct kvm * kvm = kern_hyp_va (vcpu -> kvm );
189
233
190
234
if (!cpus_have_final_cap (ARM64_HAS_FGT ))
191
235
return ;
192
236
193
237
write_sysreg_s (ctxt_sys_reg (hctxt , HFGRTR_EL2 ), SYS_HFGRTR_EL2 );
194
238
write_sysreg_s (ctxt_sys_reg (hctxt , HFGWTR_EL2 ), SYS_HFGWTR_EL2 );
195
239
196
- if (!vcpu_has_nv (vcpu ) || is_hyp_ctxt (vcpu ))
197
- return ;
198
-
199
- write_sysreg_s (ctxt_sys_reg (hctxt , HFGITR_EL2 ), SYS_HFGITR_EL2 );
200
- write_sysreg_s (ctxt_sys_reg (hctxt , HDFGRTR_EL2 ), SYS_HDFGRTR_EL2 );
201
- write_sysreg_s (ctxt_sys_reg (hctxt , HDFGWTR_EL2 ), SYS_HDFGWTR_EL2 );
240
+ __deactivate_fgt (hctxt , vcpu , kvm , HFGITR_EL2 );
241
+ __deactivate_fgt (hctxt , vcpu , kvm , HDFGRTR_EL2 );
242
+ __deactivate_fgt (hctxt , vcpu , kvm , HDFGWTR_EL2 );
202
243
203
244
if (cpu_has_amu ())
204
- write_sysreg_s ( ctxt_sys_reg ( hctxt , HAFGRTR_EL2 ), SYS_HAFGRTR_EL2 );
245
+ __deactivate_fgt ( hctxt , vcpu , kvm , HAFGRTR_EL2 );
205
246
}
206
247
207
248
static inline void __activate_traps_common (struct kvm_vcpu * vcpu )
0 commit comments