Skip to content

Commit ce66109

Browse files
committed
KVM: arm64: nv: Take "masked" aborts to EL2 when HCRX_EL2.TMEA is set
HCRX_EL2.TMEA further modifies the external abort behavior where unmasked aborts are taken to EL1 and masked aborts are taken to EL2. It's rather weird when you consider that SEAs are, well, *synchronous* and therefore not actually maskable. However, for the purposes of exception routing, they're considered "masked" if the A flag is set. This gets a bit hairier when considering the fact that TMEA also enables vSErrors, i.e. KVM has delegated the HW vSError context to the guest hypervisor. We can keep the vSError context delegation as-is by taking advantage of a couple properties: - If SErrors are unmasked, the 'physical' SError can be taken in-context immediately. In other words, KVM can emulate the EL1 SError while preserving vEL2's ownership of the vSError context. - If SErrors are masked, the 'physical' SError is taken to EL2 immediately and needs the usual nested exception entry. Note that the new in-context handling has the benign effect where unmasked SError injections are emulated even for non-nested VMs. Reviewed-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Oliver Upton <[email protected]>
1 parent fff97df commit ce66109

File tree

1 file changed

+32
-2
lines changed

1 file changed

+32
-2
lines changed

arch/arm64/kvm/inject_fault.c

Lines changed: 32 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -204,7 +204,14 @@ static void __kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
204204

205205
static bool kvm_sea_target_is_el2(struct kvm_vcpu *vcpu)
206206
{
207-
return __vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_TGE | HCR_TEA);
207+
if (__vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_TGE | HCR_TEA))
208+
return true;
209+
210+
if (!vcpu_mode_priv(vcpu))
211+
return false;
212+
213+
return (*vcpu_cpsr(vcpu) & PSR_A_BIT) &&
214+
(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA);
208215
}
209216

210217
int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
@@ -258,9 +265,20 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
258265
inject_undef64(vcpu);
259266
}
260267

268+
static bool serror_is_masked(struct kvm_vcpu *vcpu)
269+
{
270+
return *vcpu_cpsr(vcpu) & PSR_A_BIT;
271+
}
272+
261273
static bool kvm_serror_target_is_el2(struct kvm_vcpu *vcpu)
262274
{
263-
return is_hyp_ctxt(vcpu) || vcpu_el2_amo_is_set(vcpu);
275+
if (is_hyp_ctxt(vcpu) || vcpu_el2_amo_is_set(vcpu))
276+
return true;
277+
278+
if (!(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA))
279+
return false;
280+
281+
return serror_is_masked(vcpu);
264282
}
265283

266284
static bool kvm_serror_undeliverable_at_el2(struct kvm_vcpu *vcpu)
@@ -281,6 +299,18 @@ int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr)
281299
return 1;
282300
}
283301

302+
/*
303+
* Emulate the exception entry if SErrors are unmasked. This is useful if
304+
* the vCPU is in a nested context w/ vSErrors enabled then we've already
305+
* delegated he hardware vSError context (i.e. HCR_EL2.VSE, VSESR_EL2,
306+
* VDISR_EL2) to the guest hypervisor.
307+
*/
308+
if (!serror_is_masked(vcpu)) {
309+
pend_serror_exception(vcpu);
310+
vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
311+
return 1;
312+
}
313+
284314
vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
285315
*vcpu_hcr(vcpu) |= HCR_VSE;
286316
return 1;

0 commit comments

Comments
 (0)