Skip to content

Commit d3ec3a0

Browse files
Marc Zyngierwilldeacon
authored andcommitted
arm64: KVM: Trap VM ops when ARM64_WORKAROUND_CAVIUM_TX2_219_TVM is set
In order to workaround the TX2-219 erratum, it is necessary to trap TTBRx_EL1 accesses to EL2. This is done by setting HCR_EL2.TVM on guest entry, which has the side effect of trapping all the other VM-related sysregs as well. To minimize the overhead, a fast path is used so that we don't have to go all the way back to the main sysreg handling code, unless the rest of the hypervisor expects to see these accesses. Cc: <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Signed-off-by: Will Deacon <[email protected]>
1 parent da0c9ea commit d3ec3a0

File tree

2 files changed

+69
-3
lines changed

2 files changed

+69
-3
lines changed

arch/arm64/include/asm/cpucaps.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,8 @@
5252
#define ARM64_HAS_IRQ_PRIO_MASKING 42
5353
#define ARM64_HAS_DCPODP 43
5454
#define ARM64_WORKAROUND_1463225 44
55+
#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
5556

56-
#define ARM64_NCAPS 45
57+
#define ARM64_NCAPS 46
5758

5859
#endif /* __ASM_CPUCAPS_H */

arch/arm64/kvm/hyp/switch.c

Lines changed: 67 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
124124
{
125125
u64 hcr = vcpu->arch.hcr_el2;
126126

127+
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
128+
hcr |= HCR_TVM;
129+
127130
write_sysreg(hcr, hcr_el2);
128131

129132
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
@@ -174,8 +177,10 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
174177
* the crucial bit is "On taking a vSError interrupt,
175178
* HCR_EL2.VSE is cleared to 0."
176179
*/
177-
if (vcpu->arch.hcr_el2 & HCR_VSE)
178-
vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
180+
if (vcpu->arch.hcr_el2 & HCR_VSE) {
181+
vcpu->arch.hcr_el2 &= ~HCR_VSE;
182+
vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
183+
}
179184

180185
if (has_vhe())
181186
deactivate_traps_vhe();
@@ -380,6 +385,61 @@ static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
380385
return true;
381386
}
382387

388+
static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
389+
{
390+
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
391+
int rt = kvm_vcpu_sys_get_rt(vcpu);
392+
u64 val = vcpu_get_reg(vcpu, rt);
393+
394+
/*
395+
* The normal sysreg handling code expects to see the traps,
396+
* let's not do anything here.
397+
*/
398+
if (vcpu->arch.hcr_el2 & HCR_TVM)
399+
return false;
400+
401+
switch (sysreg) {
402+
case SYS_SCTLR_EL1:
403+
write_sysreg_el1(val, SYS_SCTLR);
404+
break;
405+
case SYS_TTBR0_EL1:
406+
write_sysreg_el1(val, SYS_TTBR0);
407+
break;
408+
case SYS_TTBR1_EL1:
409+
write_sysreg_el1(val, SYS_TTBR1);
410+
break;
411+
case SYS_TCR_EL1:
412+
write_sysreg_el1(val, SYS_TCR);
413+
break;
414+
case SYS_ESR_EL1:
415+
write_sysreg_el1(val, SYS_ESR);
416+
break;
417+
case SYS_FAR_EL1:
418+
write_sysreg_el1(val, SYS_FAR);
419+
break;
420+
case SYS_AFSR0_EL1:
421+
write_sysreg_el1(val, SYS_AFSR0);
422+
break;
423+
case SYS_AFSR1_EL1:
424+
write_sysreg_el1(val, SYS_AFSR1);
425+
break;
426+
case SYS_MAIR_EL1:
427+
write_sysreg_el1(val, SYS_MAIR);
428+
break;
429+
case SYS_AMAIR_EL1:
430+
write_sysreg_el1(val, SYS_AMAIR);
431+
break;
432+
case SYS_CONTEXTIDR_EL1:
433+
write_sysreg_el1(val, SYS_CONTEXTIDR);
434+
break;
435+
default:
436+
return false;
437+
}
438+
439+
__kvm_skip_instr(vcpu);
440+
return true;
441+
}
442+
383443
/*
384444
* Return true when we were able to fixup the guest exit and should return to
385445
* the guest, false when we should restore the host state and return to the
@@ -399,6 +459,11 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
399459
if (*exit_code != ARM_EXCEPTION_TRAP)
400460
goto exit;
401461

462+
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
463+
kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
464+
handle_tx2_tvm(vcpu))
465+
return true;
466+
402467
/*
403468
* We trap the first access to the FP/SIMD to save the host context
404469
* and restore the guest context lazily.

0 commit comments

Comments
 (0)