@@ -124,33 +124,12 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
124
124
125
125
static __always_inline unsigned long * vcpu_pc (const struct kvm_vcpu * vcpu )
126
126
{
127
- return (unsigned long * )& vcpu_gp_regs (vcpu )-> regs .pc ;
128
- }
129
-
130
- static inline unsigned long * __vcpu_elr_el1 (const struct kvm_vcpu * vcpu )
131
- {
132
- return (unsigned long * )& vcpu_gp_regs (vcpu )-> elr_el1 ;
133
- }
134
-
135
- static inline unsigned long vcpu_read_elr_el1 (const struct kvm_vcpu * vcpu )
136
- {
137
- if (vcpu -> arch .sysregs_loaded_on_cpu )
138
- return read_sysreg_el1 (SYS_ELR );
139
- else
140
- return * __vcpu_elr_el1 (vcpu );
141
- }
142
-
143
- static inline void vcpu_write_elr_el1 (const struct kvm_vcpu * vcpu , unsigned long v )
144
- {
145
- if (vcpu -> arch .sysregs_loaded_on_cpu )
146
- write_sysreg_el1 (v , SYS_ELR );
147
- else
148
- * __vcpu_elr_el1 (vcpu ) = v ;
127
+ return (unsigned long * )& vcpu_gp_regs (vcpu )-> pc ;
149
128
}
150
129
151
130
static __always_inline unsigned long * vcpu_cpsr (const struct kvm_vcpu * vcpu )
152
131
{
153
- return (unsigned long * )& vcpu_gp_regs (vcpu )-> regs . pstate ;
132
+ return (unsigned long * )& vcpu_gp_regs (vcpu )-> pstate ;
154
133
}
155
134
156
135
static __always_inline bool vcpu_mode_is_32bit (const struct kvm_vcpu * vcpu )
@@ -179,14 +158,14 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
179
158
static __always_inline unsigned long vcpu_get_reg (const struct kvm_vcpu * vcpu ,
180
159
u8 reg_num )
181
160
{
182
- return (reg_num == 31 ) ? 0 : vcpu_gp_regs (vcpu )-> regs . regs [reg_num ];
161
+ return (reg_num == 31 ) ? 0 : vcpu_gp_regs (vcpu )-> regs [reg_num ];
183
162
}
184
163
185
164
static __always_inline void vcpu_set_reg (struct kvm_vcpu * vcpu , u8 reg_num ,
186
165
unsigned long val )
187
166
{
188
167
if (reg_num != 31 )
189
- vcpu_gp_regs (vcpu )-> regs . regs [reg_num ] = val ;
168
+ vcpu_gp_regs (vcpu )-> regs [reg_num ] = val ;
190
169
}
191
170
192
171
static inline unsigned long vcpu_read_spsr (const struct kvm_vcpu * vcpu )
@@ -197,7 +176,7 @@ static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
197
176
if (vcpu -> arch .sysregs_loaded_on_cpu )
198
177
return read_sysreg_el1 (SYS_SPSR );
199
178
else
200
- return vcpu_gp_regs (vcpu ) -> spsr [ KVM_SPSR_EL1 ] ;
179
+ return __vcpu_sys_reg (vcpu , SPSR_EL1 ) ;
201
180
}
202
181
203
182
static inline void vcpu_write_spsr (struct kvm_vcpu * vcpu , unsigned long v )
@@ -210,7 +189,7 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
210
189
if (vcpu -> arch .sysregs_loaded_on_cpu )
211
190
write_sysreg_el1 (v , SYS_SPSR );
212
191
else
213
- vcpu_gp_regs (vcpu ) -> spsr [ KVM_SPSR_EL1 ] = v ;
192
+ __vcpu_sys_reg (vcpu , SPSR_EL1 ) = v ;
214
193
}
215
194
216
195
/*
@@ -259,14 +238,14 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
259
238
return mode != PSR_MODE_EL0t ;
260
239
}
261
240
262
- static __always_inline u32 kvm_vcpu_get_hsr (const struct kvm_vcpu * vcpu )
241
+ static __always_inline u32 kvm_vcpu_get_esr (const struct kvm_vcpu * vcpu )
263
242
{
264
243
return vcpu -> arch .fault .esr_el2 ;
265
244
}
266
245
267
246
static __always_inline int kvm_vcpu_get_condition (const struct kvm_vcpu * vcpu )
268
247
{
269
- u32 esr = kvm_vcpu_get_hsr (vcpu );
248
+ u32 esr = kvm_vcpu_get_esr (vcpu );
270
249
271
250
if (esr & ESR_ELx_CV )
272
251
return (esr & ESR_ELx_COND_MASK ) >> ESR_ELx_COND_SHIFT ;
@@ -291,64 +270,64 @@ static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
291
270
292
271
static inline u32 kvm_vcpu_hvc_get_imm (const struct kvm_vcpu * vcpu )
293
272
{
294
- return kvm_vcpu_get_hsr (vcpu ) & ESR_ELx_xVC_IMM_MASK ;
273
+ return kvm_vcpu_get_esr (vcpu ) & ESR_ELx_xVC_IMM_MASK ;
295
274
}
296
275
297
276
static __always_inline bool kvm_vcpu_dabt_isvalid (const struct kvm_vcpu * vcpu )
298
277
{
299
- return !!(kvm_vcpu_get_hsr (vcpu ) & ESR_ELx_ISV );
278
+ return !!(kvm_vcpu_get_esr (vcpu ) & ESR_ELx_ISV );
300
279
}
301
280
302
281
static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized (const struct kvm_vcpu * vcpu )
303
282
{
304
- return kvm_vcpu_get_hsr (vcpu ) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC );
283
+ return kvm_vcpu_get_esr (vcpu ) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC );
305
284
}
306
285
307
286
static inline bool kvm_vcpu_dabt_issext (const struct kvm_vcpu * vcpu )
308
287
{
309
- return !!(kvm_vcpu_get_hsr (vcpu ) & ESR_ELx_SSE );
288
+ return !!(kvm_vcpu_get_esr (vcpu ) & ESR_ELx_SSE );
310
289
}
311
290
312
291
static inline bool kvm_vcpu_dabt_issf (const struct kvm_vcpu * vcpu )
313
292
{
314
- return !!(kvm_vcpu_get_hsr (vcpu ) & ESR_ELx_SF );
293
+ return !!(kvm_vcpu_get_esr (vcpu ) & ESR_ELx_SF );
315
294
}
316
295
317
296
static __always_inline int kvm_vcpu_dabt_get_rd (const struct kvm_vcpu * vcpu )
318
297
{
319
- return (kvm_vcpu_get_hsr (vcpu ) & ESR_ELx_SRT_MASK ) >> ESR_ELx_SRT_SHIFT ;
298
+ return (kvm_vcpu_get_esr (vcpu ) & ESR_ELx_SRT_MASK ) >> ESR_ELx_SRT_SHIFT ;
320
299
}
321
300
322
301
static __always_inline bool kvm_vcpu_dabt_iss1tw (const struct kvm_vcpu * vcpu )
323
302
{
324
- return !!(kvm_vcpu_get_hsr (vcpu ) & ESR_ELx_S1PTW );
303
+ return !!(kvm_vcpu_get_esr (vcpu ) & ESR_ELx_S1PTW );
325
304
}
326
305
327
306
static __always_inline bool kvm_vcpu_dabt_iswrite (const struct kvm_vcpu * vcpu )
328
307
{
329
- return !!(kvm_vcpu_get_hsr (vcpu ) & ESR_ELx_WNR ) ||
308
+ return !!(kvm_vcpu_get_esr (vcpu ) & ESR_ELx_WNR ) ||
330
309
kvm_vcpu_dabt_iss1tw (vcpu ); /* AF/DBM update */
331
310
}
332
311
333
312
static inline bool kvm_vcpu_dabt_is_cm (const struct kvm_vcpu * vcpu )
334
313
{
335
- return !!(kvm_vcpu_get_hsr (vcpu ) & ESR_ELx_CM );
314
+ return !!(kvm_vcpu_get_esr (vcpu ) & ESR_ELx_CM );
336
315
}
337
316
338
317
static __always_inline unsigned int kvm_vcpu_dabt_get_as (const struct kvm_vcpu * vcpu )
339
318
{
340
- return 1 << ((kvm_vcpu_get_hsr (vcpu ) & ESR_ELx_SAS ) >> ESR_ELx_SAS_SHIFT );
319
+ return 1 << ((kvm_vcpu_get_esr (vcpu ) & ESR_ELx_SAS ) >> ESR_ELx_SAS_SHIFT );
341
320
}
342
321
343
322
/* This one is not specific to Data Abort */
344
323
static __always_inline bool kvm_vcpu_trap_il_is32bit (const struct kvm_vcpu * vcpu )
345
324
{
346
- return !!(kvm_vcpu_get_hsr (vcpu ) & ESR_ELx_IL );
325
+ return !!(kvm_vcpu_get_esr (vcpu ) & ESR_ELx_IL );
347
326
}
348
327
349
328
static __always_inline u8 kvm_vcpu_trap_get_class (const struct kvm_vcpu * vcpu )
350
329
{
351
- return ESR_ELx_EC (kvm_vcpu_get_hsr (vcpu ));
330
+ return ESR_ELx_EC (kvm_vcpu_get_esr (vcpu ));
352
331
}
353
332
354
333
static inline bool kvm_vcpu_trap_is_iabt (const struct kvm_vcpu * vcpu )
@@ -358,15 +337,15 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
358
337
359
338
static __always_inline u8 kvm_vcpu_trap_get_fault (const struct kvm_vcpu * vcpu )
360
339
{
361
- return kvm_vcpu_get_hsr (vcpu ) & ESR_ELx_FSC ;
340
+ return kvm_vcpu_get_esr (vcpu ) & ESR_ELx_FSC ;
362
341
}
363
342
364
343
static __always_inline u8 kvm_vcpu_trap_get_fault_type (const struct kvm_vcpu * vcpu )
365
344
{
366
- return kvm_vcpu_get_hsr (vcpu ) & ESR_ELx_FSC_TYPE ;
345
+ return kvm_vcpu_get_esr (vcpu ) & ESR_ELx_FSC_TYPE ;
367
346
}
368
347
369
- static __always_inline bool kvm_vcpu_dabt_isextabt (const struct kvm_vcpu * vcpu )
348
+ static __always_inline bool kvm_vcpu_abt_issea (const struct kvm_vcpu * vcpu )
370
349
{
371
350
switch (kvm_vcpu_trap_get_fault (vcpu )) {
372
351
case FSC_SEA :
@@ -387,7 +366,7 @@ static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
387
366
388
367
static __always_inline int kvm_vcpu_sys_get_rt (struct kvm_vcpu * vcpu )
389
368
{
390
- u32 esr = kvm_vcpu_get_hsr (vcpu );
369
+ u32 esr = kvm_vcpu_get_esr (vcpu );
391
370
return ESR_ELx_SYS64_ISS_RT (esr );
392
371
}
393
372
@@ -516,14 +495,14 @@ static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_i
516
495
* Skip an instruction which has been emulated at hyp while most guest sysregs
517
496
* are live.
518
497
*/
519
- static __always_inline void __hyp_text __kvm_skip_instr (struct kvm_vcpu * vcpu )
498
+ static __always_inline void __kvm_skip_instr (struct kvm_vcpu * vcpu )
520
499
{
521
500
* vcpu_pc (vcpu ) = read_sysreg_el2 (SYS_ELR );
522
- vcpu -> arch . ctxt . gp_regs . regs . pstate = read_sysreg_el2 (SYS_SPSR );
501
+ vcpu_gp_regs ( vcpu ) -> pstate = read_sysreg_el2 (SYS_SPSR );
523
502
524
503
kvm_skip_instr (vcpu , kvm_vcpu_trap_il_is32bit (vcpu ));
525
504
526
- write_sysreg_el2 (vcpu -> arch . ctxt . gp_regs . regs . pstate , SYS_SPSR );
505
+ write_sysreg_el2 (vcpu_gp_regs ( vcpu ) -> pstate , SYS_SPSR );
527
506
write_sysreg_el2 (* vcpu_pc (vcpu ), SYS_ELR );
528
507
}
529
508
0 commit comments