Skip to content

Commit 8c90d43

Browse files
mrutland-armgregkh
authored andcommitted
KVM: arm64: Eagerly switch ZCR_EL{1,2}
[ Upstream commit 59419f1 ] In non-protected KVM modes, while the guest FPSIMD/SVE/SME state is live on the CPU, the host's active SVE VL may differ from the guest's maximum SVE VL: * For VHE hosts, when a VM uses NV, ZCR_EL2 contains a value constrained by the guest hypervisor, which may be less than or equal to that guest's maximum VL. Note: in this case the value of ZCR_EL1 is immaterial due to E2H. * For nVHE/hVHE hosts, ZCR_EL1 contains a value written by the guest, which may be less than or greater than the guest's maximum VL. Note: in this case hyp code traps host SVE usage and lazily restores ZCR_EL2 to the host's maximum VL, which may be greater than the guest's maximum VL. This can be the case between exiting a guest and kvm_arch_vcpu_put_fp(). If a softirq is taken during this period and the softirq handler tries to use kernel-mode NEON, then the kernel will fail to save the guest's FPSIMD/SVE state, and will pend a SIGKILL for the current thread. This happens because kvm_arch_vcpu_ctxsync_fp() binds the guest's live FPSIMD/SVE state with the guest's maximum SVE VL, and fpsimd_save_user_state() verifies that the live SVE VL is as expected before attempting to save the register state: | if (WARN_ON(sve_get_vl() != vl)) { | force_signal_inject(SIGKILL, SI_KERNEL, 0, 0); | return; | } Fix this and make this a bit easier to reason about by always eagerly switching ZCR_EL{1,2} at hyp during guest<->host transitions. With this happening, there's no need to trap host SVE usage, and the nVHE/nVHE __deactivate_cptr_traps() logic can be simplified to enable host access to all present FPSIMD/SVE/SME features. In protected nVHE/hVHE modes, the host's state is always saved/restored by hyp, and the guest's state is saved prior to exit to the host, so from the host's PoV the guest never has live FPSIMD/SVE/SME state, and the host's ZCR_EL1 is never clobbered by hyp. Fixes: 8c8010d ("KVM: arm64: Save/restore SVE state for nVHE") Fixes: 2e3cf82 ("KVM: arm64: nv: Ensure correct VL is loaded before saving SVE state") Signed-off-by: Mark Rutland <[email protected]> Reviewed-by: Mark Brown <[email protected]> Tested-by: Mark Brown <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Fuad Tabba <[email protected]> Cc: Marc Zyngier <[email protected]> Cc: Oliver Upton <[email protected]> Cc: Will Deacon <[email protected]> Reviewed-by: Oliver Upton <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Marc Zyngier <[email protected]> Signed-off-by: Mark Brown <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent f32b2b4 commit 8c90d43

File tree

6 files changed

+100
-40
lines changed

6 files changed

+100
-40
lines changed

arch/arm64/kvm/fpsimd.c

Lines changed: 0 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -136,36 +136,6 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
136136
local_irq_save(flags);
137137

138138
if (guest_owns_fp_regs()) {
139-
if (vcpu_has_sve(vcpu)) {
140-
u64 zcr = read_sysreg_el1(SYS_ZCR);
141-
142-
/*
143-
* If the vCPU is in the hyp context then ZCR_EL1 is
144-
* loaded with its vEL2 counterpart.
145-
*/
146-
__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr;
147-
148-
/*
149-
* Restore the VL that was saved when bound to the CPU,
150-
* which is the maximum VL for the guest. Because the
151-
* layout of the data when saving the sve state depends
152-
* on the VL, we need to use a consistent (i.e., the
153-
* maximum) VL.
154-
* Note that this means that at guest exit ZCR_EL1 is
155-
* not necessarily the same as on guest entry.
156-
*
157-
* ZCR_EL2 holds the guest hypervisor's VL when running
158-
* a nested guest, which could be smaller than the
159-
* max for the vCPU. Similar to above, we first need to
160-
* switch to a VL consistent with the layout of the
161-
* vCPU's SVE state. KVM support for NV implies VHE, so
162-
* using the ZCR_EL1 alias is safe.
163-
*/
164-
if (!has_vhe() || (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)))
165-
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
166-
SYS_ZCR_EL1);
167-
}
168-
169139
/*
170140
* Flush (save and invalidate) the fpsimd/sve state so that if
171141
* the host tries to use fpsimd/sve, it's not using stale data

arch/arm64/kvm/hyp/entry.S

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,11 @@ alternative_if ARM64_HAS_RAS_EXTN
4444
alternative_else_nop_endif
4545
mrs x1, isr_el1
4646
cbz x1, 1f
47+
48+
// Ensure that __guest_enter() always provides a context
49+
// synchronization event so that callers don't need ISBs for anything
50+
// that would usually be synchonized by the ERET.
51+
isb
4752
mov x0, #ARM_EXCEPTION_IRQ
4853
ret
4954

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -375,6 +375,65 @@ static inline void __hyp_sve_save_host(void)
375375
true);
376376
}
377377

378+
static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu)
379+
{
380+
u64 zcr_el1, zcr_el2;
381+
382+
if (!guest_owns_fp_regs())
383+
return;
384+
385+
if (vcpu_has_sve(vcpu)) {
386+
/* A guest hypervisor may restrict the effective max VL. */
387+
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
388+
zcr_el2 = __vcpu_sys_reg(vcpu, ZCR_EL2);
389+
else
390+
zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
391+
392+
write_sysreg_el2(zcr_el2, SYS_ZCR);
393+
394+
zcr_el1 = __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu));
395+
write_sysreg_el1(zcr_el1, SYS_ZCR);
396+
}
397+
}
398+
399+
static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
400+
{
401+
u64 zcr_el1, zcr_el2;
402+
403+
if (!guest_owns_fp_regs())
404+
return;
405+
406+
/*
407+
* When the guest owns the FP regs, we know that guest+hyp traps for
408+
* any FPSIMD/SVE/SME features exposed to the guest have been disabled
409+
* by either fpsimd_lazy_switch_to_guest() or kvm_hyp_handle_fpsimd()
410+
* prior to __guest_entry(). As __guest_entry() guarantees a context
411+
* synchronization event, we don't need an ISB here to avoid taking
412+
* traps for anything that was exposed to the guest.
413+
*/
414+
if (vcpu_has_sve(vcpu)) {
415+
zcr_el1 = read_sysreg_el1(SYS_ZCR);
416+
__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr_el1;
417+
418+
/*
419+
* The guest's state is always saved using the guest's max VL.
420+
* Ensure that the host has the guest's max VL active such that
421+
* the host can save the guest's state lazily, but don't
422+
* artificially restrict the host to the guest's max VL.
423+
*/
424+
if (has_vhe()) {
425+
zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
426+
write_sysreg_el2(zcr_el2, SYS_ZCR);
427+
} else {
428+
zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
429+
write_sysreg_el2(zcr_el2, SYS_ZCR);
430+
431+
zcr_el1 = vcpu_sve_max_vq(vcpu) - 1;
432+
write_sysreg_el1(zcr_el1, SYS_ZCR);
433+
}
434+
}
435+
}
436+
378437
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
379438
{
380439
/*

arch/arm64/kvm/hyp/nvhe/hyp-main.c

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
*/
66

77
#include <hyp/adjust_pc.h>
8+
#include <hyp/switch.h>
89

910
#include <asm/pgtable-types.h>
1011
#include <asm/kvm_asm.h>
@@ -179,7 +180,9 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
179180
pkvm_put_hyp_vcpu(hyp_vcpu);
180181
} else {
181182
/* The host is fully trusted, run its vCPU directly. */
183+
fpsimd_lazy_switch_to_guest(host_vcpu);
182184
ret = __kvm_vcpu_run(host_vcpu);
185+
fpsimd_lazy_switch_to_host(host_vcpu);
183186
}
184187

185188
out:
@@ -480,12 +483,6 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
480483
case ESR_ELx_EC_SMC64:
481484
handle_host_smc(host_ctxt);
482485
break;
483-
case ESR_ELx_EC_SVE:
484-
cpacr_clear_set(0, CPACR_ELx_ZEN);
485-
isb();
486-
sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1,
487-
SYS_ZCR_EL2);
488-
break;
489486
case ESR_ELx_EC_IABT_LOW:
490487
case ESR_ELx_EC_DABT_LOW:
491488
handle_host_mem_abort(host_ctxt);

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 29 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,9 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
4040
{
4141
u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
4242

43+
if (!guest_owns_fp_regs())
44+
__activate_traps_fpsimd32(vcpu);
45+
4346
if (has_hvhe()) {
4447
val |= CPACR_ELx_TTA;
4548

@@ -48,6 +51,8 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
4851
if (vcpu_has_sve(vcpu))
4952
val |= CPACR_ELx_ZEN;
5053
}
54+
55+
write_sysreg(val, cpacr_el1);
5156
} else {
5257
val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
5358

@@ -62,12 +67,32 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
6267

6368
if (!guest_owns_fp_regs())
6469
val |= CPTR_EL2_TFP;
70+
71+
write_sysreg(val, cptr_el2);
6572
}
73+
}
6674

67-
if (!guest_owns_fp_regs())
68-
__activate_traps_fpsimd32(vcpu);
75+
static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
76+
{
77+
if (has_hvhe()) {
78+
u64 val = CPACR_ELx_FPEN;
79+
80+
if (cpus_have_final_cap(ARM64_SVE))
81+
val |= CPACR_ELx_ZEN;
82+
if (cpus_have_final_cap(ARM64_SME))
83+
val |= CPACR_ELx_SMEN;
84+
85+
write_sysreg(val, cpacr_el1);
86+
} else {
87+
u64 val = CPTR_NVHE_EL2_RES1;
88+
89+
if (!cpus_have_final_cap(ARM64_SVE))
90+
val |= CPTR_EL2_TZ;
91+
if (!cpus_have_final_cap(ARM64_SME))
92+
val |= CPTR_EL2_TSM;
6993

70-
kvm_write_cptr_el2(val);
94+
write_sysreg(val, cptr_el2);
95+
}
7196
}
7297

7398
static void __activate_traps(struct kvm_vcpu *vcpu)
@@ -120,7 +145,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
120145

121146
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
122147

123-
kvm_reset_cptr_el2(vcpu);
148+
__deactivate_cptr_traps(vcpu);
124149
write_sysreg(__kvm_hyp_host_vector, vbar_el2);
125150
}
126151

arch/arm64/kvm/hyp/vhe/switch.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -462,6 +462,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
462462

463463
sysreg_save_host_state_vhe(host_ctxt);
464464

465+
fpsimd_lazy_switch_to_guest(vcpu);
466+
465467
/*
466468
* Note that ARM erratum 1165522 requires us to configure both stage 1
467469
* and stage 2 translation for the guest context before we clear
@@ -486,6 +488,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
486488

487489
__deactivate_traps(vcpu);
488490

491+
fpsimd_lazy_switch_to_host(vcpu);
492+
489493
sysreg_restore_host_state_vhe(host_ctxt);
490494

491495
if (guest_owns_fp_regs())

0 commit comments

Comments
 (0)