Skip to content

Commit 3a949f4

Browse files
Gavin ShanMarc Zyngier
authored andcommitted
KVM: arm64: Rename HSR to ESR
kvm/arm32 isn't supported since commit 541ad01 ("arm: Remove 32bit KVM host support"). So HSR isn't meaningful since then. This renames HSR to ESR accordingly. This shouldn't cause any functional changes: * Rename kvm_vcpu_get_hsr() to kvm_vcpu_get_esr() to make the function names self-explanatory. * Rename variables from @hsr to @esr to make them self-explanatory. Note that the renaming on uapi and tracepoint will cause ABI changes, which we should avoid. Specificly, there are 4 related source files in this regard: * arch/arm64/include/uapi/asm/kvm.h (struct kvm_debug_exit_arch::hsr) * arch/arm64/kvm/handle_exit.c (struct kvm_debug_exit_arch::hsr) * arch/arm64/kvm/trace_arm.h (tracepoints) * arch/arm64/kvm/trace_handle_exit.h (tracepoints) Signed-off-by: Gavin Shan <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Acked-by: Andrew Scull <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 95fa0ba commit 3a949f4

File tree

7 files changed

+60
-60
lines changed

7 files changed

+60
-60
lines changed

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -259,14 +259,14 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
259259
return mode != PSR_MODE_EL0t;
260260
}
261261

262-
static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
262+
static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
263263
{
264264
return vcpu->arch.fault.esr_el2;
265265
}
266266

267267
static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
268268
{
269-
u32 esr = kvm_vcpu_get_hsr(vcpu);
269+
u32 esr = kvm_vcpu_get_esr(vcpu);
270270

271271
if (esr & ESR_ELx_CV)
272272
return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
@@ -291,64 +291,64 @@ static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
291291

292292
static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
293293
{
294-
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
294+
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
295295
}
296296

297297
static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
298298
{
299-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
299+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
300300
}
301301

302302
static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
303303
{
304-
return kvm_vcpu_get_hsr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
304+
return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
305305
}
306306

307307
static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
308308
{
309-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
309+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
310310
}
311311

312312
static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
313313
{
314-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
314+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
315315
}
316316

317317
static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
318318
{
319-
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
319+
return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
320320
}
321321

322322
static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
323323
{
324-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
324+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
325325
}
326326

327327
static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
328328
{
329-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
329+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) ||
330330
kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
331331
}
332332

333333
static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
334334
{
335-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
335+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
336336
}
337337

338338
static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
339339
{
340-
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
340+
return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
341341
}
342342

343343
/* This one is not specific to Data Abort */
344344
static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
345345
{
346-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
346+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
347347
}
348348

349349
static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
350350
{
351-
return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
351+
return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
352352
}
353353

354354
static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
@@ -358,12 +358,12 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
358358

359359
static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
360360
{
361-
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
361+
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
362362
}
363363

364364
static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
365365
{
366-
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
366+
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
367367
}
368368

369369
static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
@@ -387,7 +387,7 @@ static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
387387

388388
static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
389389
{
390-
u32 esr = kvm_vcpu_get_hsr(vcpu);
390+
u32 esr = kvm_vcpu_get_esr(vcpu);
391391
return ESR_ELx_SYS64_ISS_RT(esr);
392392
}
393393

arch/arm64/kvm/handle_exit.c

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
8989
*/
9090
static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
9191
{
92-
if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
92+
if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
9393
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
9494
vcpu->stat.wfe_exit_stat++;
9595
kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
@@ -119,13 +119,13 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
119119
*/
120120
static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
121121
{
122-
u32 hsr = kvm_vcpu_get_hsr(vcpu);
122+
u32 esr = kvm_vcpu_get_esr(vcpu);
123123
int ret = 0;
124124

125125
run->exit_reason = KVM_EXIT_DEBUG;
126-
run->debug.arch.hsr = hsr;
126+
run->debug.arch.hsr = esr;
127127

128-
switch (ESR_ELx_EC(hsr)) {
128+
switch (ESR_ELx_EC(esr)) {
129129
case ESR_ELx_EC_WATCHPT_LOW:
130130
run->debug.arch.far = vcpu->arch.fault.far_el2;
131131
/* fall through */
@@ -135,8 +135,8 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
135135
case ESR_ELx_EC_BRK64:
136136
break;
137137
default:
138-
kvm_err("%s: un-handled case hsr: %#08x\n",
139-
__func__, (unsigned int) hsr);
138+
kvm_err("%s: un-handled case esr: %#08x\n",
139+
__func__, (unsigned int) esr);
140140
ret = -1;
141141
break;
142142
}
@@ -146,10 +146,10 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
146146

147147
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
148148
{
149-
u32 hsr = kvm_vcpu_get_hsr(vcpu);
149+
u32 esr = kvm_vcpu_get_esr(vcpu);
150150

151-
kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
152-
hsr, esr_get_class_string(hsr));
151+
kvm_pr_unimpl("Unknown exception class: esr: %#08x -- %s\n",
152+
esr, esr_get_class_string(esr));
153153

154154
kvm_inject_undefined(vcpu);
155155
return 1;
@@ -200,10 +200,10 @@ static exit_handle_fn arm_exit_handlers[] = {
200200

201201
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
202202
{
203-
u32 hsr = kvm_vcpu_get_hsr(vcpu);
204-
u8 hsr_ec = ESR_ELx_EC(hsr);
203+
u32 esr = kvm_vcpu_get_esr(vcpu);
204+
u8 esr_ec = ESR_ELx_EC(esr);
205205

206-
return arm_exit_handlers[hsr_ec];
206+
return arm_exit_handlers[esr_ec];
207207
}
208208

209209
/*
@@ -241,15 +241,15 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
241241
int exception_index)
242242
{
243243
if (ARM_SERROR_PENDING(exception_index)) {
244-
u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
244+
u8 esr_ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
245245

246246
/*
247247
* HVC/SMC already have an adjusted PC, which we need
248248
* to correct in order to return to after having
249249
* injected the SError.
250250
*/
251-
if (hsr_ec == ESR_ELx_EC_HVC32 || hsr_ec == ESR_ELx_EC_HVC64 ||
252-
hsr_ec == ESR_ELx_EC_SMC32 || hsr_ec == ESR_ELx_EC_SMC64) {
251+
if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64 ||
252+
esr_ec == ESR_ELx_EC_SMC32 || esr_ec == ESR_ELx_EC_SMC64) {
253253
u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
254254
*vcpu_pc(vcpu) -= adj;
255255
}
@@ -307,5 +307,5 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
307307
exception_index = ARM_EXCEPTION_CODE(exception_index);
308308

309309
if (exception_index == ARM_EXCEPTION_EL1_SERROR)
310-
kvm_handle_guest_serror(vcpu, kvm_vcpu_get_hsr(vcpu));
310+
kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
311311
}

arch/arm64/kvm/hyp/aarch32.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
5151
int cond;
5252

5353
/* Top two bits non-zero? Unconditional. */
54-
if (kvm_vcpu_get_hsr(vcpu) >> 30)
54+
if (kvm_vcpu_get_esr(vcpu) >> 30)
5555
return true;
5656

5757
/* Is condition field valid? */

arch/arm64/kvm/hyp/switch.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -356,7 +356,7 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
356356
static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
357357
{
358358
bool vhe, sve_guest, sve_host;
359-
u8 hsr_ec;
359+
u8 esr_ec;
360360

361361
if (!system_supports_fpsimd())
362362
return false;
@@ -371,14 +371,14 @@ static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
371371
vhe = has_vhe();
372372
}
373373

374-
hsr_ec = kvm_vcpu_trap_get_class(vcpu);
375-
if (hsr_ec != ESR_ELx_EC_FP_ASIMD &&
376-
hsr_ec != ESR_ELx_EC_SVE)
374+
esr_ec = kvm_vcpu_trap_get_class(vcpu);
375+
if (esr_ec != ESR_ELx_EC_FP_ASIMD &&
376+
esr_ec != ESR_ELx_EC_SVE)
377377
return false;
378378

379379
/* Don't handle SVE traps for non-SVE vcpus here: */
380380
if (!sve_guest)
381-
if (hsr_ec != ESR_ELx_EC_FP_ASIMD)
381+
if (esr_ec != ESR_ELx_EC_FP_ASIMD)
382382
return false;
383383

384384
/* Valid trap. Switch the context: */
@@ -437,7 +437,7 @@ static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
437437

438438
static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
439439
{
440-
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
440+
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
441441
int rt = kvm_vcpu_sys_get_rt(vcpu);
442442
u64 val = vcpu_get_reg(vcpu, rt);
443443

@@ -529,7 +529,7 @@ static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
529529
u64 val;
530530

531531
if (!vcpu_has_ptrauth(vcpu) ||
532-
!esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu)))
532+
!esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
533533
return false;
534534

535535
ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;

arch/arm64/kvm/hyp/vgic-v3-sr.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -426,7 +426,7 @@ static int __hyp_text __vgic_v3_bpr_min(void)
426426

427427
static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu)
428428
{
429-
u32 esr = kvm_vcpu_get_hsr(vcpu);
429+
u32 esr = kvm_vcpu_get_esr(vcpu);
430430
u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
431431

432432
return crm != 8;
@@ -992,7 +992,7 @@ int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
992992
bool is_read;
993993
u32 sysreg;
994994

995-
esr = kvm_vcpu_get_hsr(vcpu);
995+
esr = kvm_vcpu_get_esr(vcpu);
996996
if (vcpu_mode_is_32bit(vcpu)) {
997997
if (!kvm_condition_valid(vcpu)) {
998998
__kvm_skip_instr(vcpu);

arch/arm64/kvm/mmu.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2079,7 +2079,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
20792079
* For RAS the host kernel may handle this abort.
20802080
* There is no need to pass the error into the guest.
20812081
*/
2082-
if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
2082+
if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu)))
20832083
return 1;
20842084

20852085
if (unlikely(!is_iabt)) {
@@ -2088,7 +2088,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
20882088
}
20892089
}
20902090

2091-
trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
2091+
trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
20922092
kvm_vcpu_get_hfar(vcpu), fault_ipa);
20932093

20942094
/* Check the stage-2 fault is trans. fault or write fault */
@@ -2097,7 +2097,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
20972097
kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
20982098
kvm_vcpu_trap_get_class(vcpu),
20992099
(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
2100-
(unsigned long)kvm_vcpu_get_hsr(vcpu));
2100+
(unsigned long)kvm_vcpu_get_esr(vcpu));
21012101
return -EFAULT;
21022102
}
21032103

arch/arm64/kvm/sys_regs.c

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2220,10 +2220,10 @@ static int emulate_cp(struct kvm_vcpu *vcpu,
22202220
static void unhandled_cp_access(struct kvm_vcpu *vcpu,
22212221
struct sys_reg_params *params)
22222222
{
2223-
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
2223+
u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
22242224
int cp = -1;
22252225

2226-
switch(hsr_ec) {
2226+
switch (esr_ec) {
22272227
case ESR_ELx_EC_CP15_32:
22282228
case ESR_ELx_EC_CP15_64:
22292229
cp = 15;
@@ -2254,17 +2254,17 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
22542254
size_t nr_specific)
22552255
{
22562256
struct sys_reg_params params;
2257-
u32 hsr = kvm_vcpu_get_hsr(vcpu);
2257+
u32 esr = kvm_vcpu_get_esr(vcpu);
22582258
int Rt = kvm_vcpu_sys_get_rt(vcpu);
2259-
int Rt2 = (hsr >> 10) & 0x1f;
2259+
int Rt2 = (esr >> 10) & 0x1f;
22602260

22612261
params.is_aarch32 = true;
22622262
params.is_32bit = false;
2263-
params.CRm = (hsr >> 1) & 0xf;
2264-
params.is_write = ((hsr & 1) == 0);
2263+
params.CRm = (esr >> 1) & 0xf;
2264+
params.is_write = ((esr & 1) == 0);
22652265

22662266
params.Op0 = 0;
2267-
params.Op1 = (hsr >> 16) & 0xf;
2267+
params.Op1 = (esr >> 16) & 0xf;
22682268
params.Op2 = 0;
22692269
params.CRn = 0;
22702270

@@ -2311,18 +2311,18 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
23112311
size_t nr_specific)
23122312
{
23132313
struct sys_reg_params params;
2314-
u32 hsr = kvm_vcpu_get_hsr(vcpu);
2314+
u32 esr = kvm_vcpu_get_esr(vcpu);
23152315
int Rt = kvm_vcpu_sys_get_rt(vcpu);
23162316

23172317
params.is_aarch32 = true;
23182318
params.is_32bit = true;
2319-
params.CRm = (hsr >> 1) & 0xf;
2319+
params.CRm = (esr >> 1) & 0xf;
23202320
params.regval = vcpu_get_reg(vcpu, Rt);
2321-
params.is_write = ((hsr & 1) == 0);
2322-
params.CRn = (hsr >> 10) & 0xf;
2321+
params.is_write = ((esr & 1) == 0);
2322+
params.CRn = (esr >> 10) & 0xf;
23232323
params.Op0 = 0;
2324-
params.Op1 = (hsr >> 14) & 0x7;
2325-
params.Op2 = (hsr >> 17) & 0x7;
2324+
params.Op1 = (esr >> 14) & 0x7;
2325+
params.Op2 = (esr >> 17) & 0x7;
23262326

23272327
if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
23282328
!emulate_cp(vcpu, &params, global, nr_global)) {
@@ -2421,7 +2421,7 @@ static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
24212421
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
24222422
{
24232423
struct sys_reg_params params;
2424-
unsigned long esr = kvm_vcpu_get_hsr(vcpu);
2424+
unsigned long esr = kvm_vcpu_get_esr(vcpu);
24252425
int Rt = kvm_vcpu_sys_get_rt(vcpu);
24262426
int ret;
24272427

0 commit comments

Comments
 (0)