Skip to content

Commit 18fbc24

Browse files
committed
KVM: arm64: nv: Use guest hypervisor's vSError state
When HCR_EL2.AMO is set, physical SErrors are routed to EL2 and virtual SError injection is enabled for EL1. Conceptually treating host-initiated SErrors as 'physical', this means we can delegate control of the vSError injection context to the guest hypervisor when nesting && AMO is set. Reviewed-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Oliver Upton <[email protected]>
1 parent 211fced commit 18fbc24

File tree

4 files changed

+77
-8
lines changed

4 files changed

+77
-8
lines changed

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -257,6 +257,11 @@ static inline bool is_nested_ctxt(struct kvm_vcpu *vcpu)
257257
return vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu);
258258
}
259259

260+
static inline bool vserror_state_is_nested(struct kvm_vcpu *vcpu)
261+
{
262+
return is_nested_ctxt(vcpu) && vcpu_el2_amo_is_set(vcpu);
263+
}
264+
260265
/*
261266
* The layout of SPSR for an AArch32 state is different when observed from an
262267
* AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32

arch/arm64/include/asm/kvm_host.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1682,6 +1682,9 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
16821682
#define kvm_has_s1poe(k) \
16831683
(kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP))
16841684

1685+
#define kvm_has_ras(k) \
1686+
(kvm_has_feat((k), ID_AA64PFR0_EL1, RAS, IMP))
1687+
16851688
static inline bool kvm_arch_has_irq_bypass(void)
16861689
{
16871690
return true;

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 40 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -476,21 +476,56 @@ static inline void ___activate_traps(struct kvm_vcpu *vcpu, u64 hcr)
476476

477477
write_sysreg_hcr(hcr);
478478

479-
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
480-
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
479+
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) {
480+
u64 vsesr;
481+
482+
/*
483+
* When HCR_EL2.AMO is set, physical SErrors are taken to EL2
484+
* and vSError injection is enabled for EL1. Conveniently, for
485+
* NV this means that it is never the case where a 'physical'
486+
* SError (injected by KVM or userspace) and vSError are
487+
* deliverable to the same context.
488+
*
489+
* As such, we can trivially select between the host or guest's
490+
* VSESR_EL2. Except for the case that FEAT_RAS hasn't been
491+
* exposed to the guest, where ESR propagation in hardware
492+
* occurs unconditionally.
493+
*
494+
* Paper over the architectural wart and use an IMPLEMENTATION
495+
* DEFINED ESR value in case FEAT_RAS is hidden from the guest.
496+
*/
497+
if (!vserror_state_is_nested(vcpu))
498+
vsesr = vcpu->arch.vsesr_el2;
499+
else if (kvm_has_ras(kern_hyp_va(vcpu->kvm)))
500+
vsesr = __vcpu_sys_reg(vcpu, VSESR_EL2);
501+
else
502+
vsesr = ESR_ELx_ISV;
503+
504+
write_sysreg_s(vsesr, SYS_VSESR_EL2);
505+
}
481506
}
482507

483508
static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
484509
{
510+
u64 *hcr;
511+
512+
if (vserror_state_is_nested(vcpu))
513+
hcr = __ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2);
514+
else
515+
hcr = &vcpu->arch.hcr_el2;
516+
485517
/*
486518
* If we pended a virtual abort, preserve it until it gets
487519
* cleared. See D1.14.3 (Virtual Interrupts) for details, but
488520
* the crucial bit is "On taking a vSError interrupt,
489521
* HCR_EL2.VSE is cleared to 0."
522+
*
523+
* Additionally, when in a nested context we need to propagate the
524+
* updated state to the guest hypervisor's HCR_EL2.
490525
*/
491-
if (vcpu->arch.hcr_el2 & HCR_VSE) {
492-
vcpu->arch.hcr_el2 &= ~HCR_VSE;
493-
vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
526+
if (*hcr & HCR_VSE) {
527+
*hcr &= ~HCR_VSE;
528+
*hcr |= read_sysreg(hcr_el2) & HCR_VSE;
494529
}
495530
}
496531

arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h

Lines changed: 29 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,17 @@ static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt)
109109
return kvm_has_s1poe(kern_hyp_va(vcpu->kvm));
110110
}
111111

112+
static inline bool ctxt_has_ras(struct kvm_cpu_context *ctxt)
113+
{
114+
struct kvm_vcpu *vcpu;
115+
116+
if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
117+
return false;
118+
119+
vcpu = ctxt_to_vcpu(ctxt);
120+
return kvm_has_ras(kern_hyp_va(vcpu->kvm));
121+
}
122+
112123
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
113124
{
114125
ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR);
@@ -159,8 +170,13 @@ static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
159170
if (!has_vhe() && ctxt->__hyp_running_vcpu)
160171
ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
161172

162-
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
173+
if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
174+
return;
175+
176+
if (!vserror_state_is_nested(ctxt_to_vcpu(ctxt)))
163177
ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);
178+
else if (ctxt_has_ras(ctxt))
179+
ctxt_sys_reg(ctxt, VDISR_EL2) = read_sysreg_s(SYS_VDISR_EL2);
164180
}
165181

166182
static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
@@ -275,6 +291,7 @@ static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctx
275291
{
276292
u64 pstate = to_hw_pstate(ctxt);
277293
u64 mode = pstate & PSR_AA32_MODE_MASK;
294+
u64 vdisr;
278295

279296
/*
280297
* Safety check to ensure we're setting the CPU up to enter the guest
@@ -293,8 +310,17 @@ static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctx
293310
write_sysreg_el2(ctxt->regs.pc, SYS_ELR);
294311
write_sysreg_el2(pstate, SYS_SPSR);
295312

296-
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
297-
write_sysreg_s(ctxt_sys_reg(ctxt, DISR_EL1), SYS_VDISR_EL2);
313+
if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
314+
return;
315+
316+
if (!vserror_state_is_nested(ctxt_to_vcpu(ctxt)))
317+
vdisr = ctxt_sys_reg(ctxt, DISR_EL1);
318+
else if (ctxt_has_ras(ctxt))
319+
vdisr = ctxt_sys_reg(ctxt, VDISR_EL2);
320+
else
321+
vdisr = 0;
322+
323+
write_sysreg_s(vdisr, SYS_VDISR_EL2);
298324
}
299325

300326
static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)

0 commit comments

Comments
 (0)