Skip to content

Commit 8c2899e

Browse files
committed
Merge branch kvm-arm64/nv-sve into kvmarm/next
* kvm-arm64/nv-sve: : CPTR_EL2, FPSIMD/SVE support for nested : : This series brings support for honoring the guest hypervisor's CPTR_EL2 : trap configuration when running a nested guest, along with support for : FPSIMD/SVE usage at L1 and L2. KVM: arm64: Allow the use of SVE+NV KVM: arm64: nv: Add additional trap setup for CPTR_EL2 KVM: arm64: nv: Add trap description for CPTR_EL2 KVM: arm64: nv: Add TCPAC/TTA to CPTR->CPACR conversion helper KVM: arm64: nv: Honor guest hypervisor's FP/SVE traps in CPTR_EL2 KVM: arm64: nv: Load guest FP state for ZCR_EL2 trap KVM: arm64: nv: Handle CPACR_EL1 traps KVM: arm64: Spin off helper for programming CPTR traps KVM: arm64: nv: Ensure correct VL is loaded before saving SVE state KVM: arm64: nv: Use guest hypervisor's max VL when running nested guest KVM: arm64: nv: Save guest's ZCR_EL2 when in hyp context KVM: arm64: nv: Load guest hyp's ZCR into EL1 state KVM: arm64: nv: Handle ZCR_EL2 traps KVM: arm64: nv: Forward SVE traps to guest hypervisor KVM: arm64: nv: Forward FP/ASIMD traps to guest hypervisor Signed-off-by: Oliver Upton <[email protected]>
2 parents 1270dad + f1ee914 commit 8c2899e

File tree

11 files changed

+370
-42
lines changed

11 files changed

+370
-42
lines changed

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
#ifndef __ARM64_KVM_EMULATE_H__
1212
#define __ARM64_KVM_EMULATE_H__
1313

14+
#include <linux/bitfield.h>
1415
#include <linux/kvm_host.h>
1516

1617
#include <asm/debug-monitors.h>
@@ -55,6 +56,14 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
5556
int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
5657
int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
5758

59+
static inline void kvm_inject_nested_sve_trap(struct kvm_vcpu *vcpu)
60+
{
61+
u64 esr = FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SVE) |
62+
ESR_ELx_IL;
63+
64+
kvm_inject_nested_sync(vcpu, esr);
65+
}
66+
5867
#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
5968
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
6069
{
@@ -638,4 +647,50 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
638647

639648
kvm_write_cptr_el2(val);
640649
}
650+
651+
/*
652+
* Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
653+
* format if E2H isn't set.
654+
*/
655+
static inline u64 vcpu_sanitised_cptr_el2(const struct kvm_vcpu *vcpu)
656+
{
657+
u64 cptr = __vcpu_sys_reg(vcpu, CPTR_EL2);
658+
659+
if (!vcpu_el2_e2h_is_set(vcpu))
660+
cptr = translate_cptr_el2_to_cpacr_el1(cptr);
661+
662+
return cptr;
663+
}
664+
665+
static inline bool ____cptr_xen_trap_enabled(const struct kvm_vcpu *vcpu,
666+
unsigned int xen)
667+
{
668+
switch (xen) {
669+
case 0b00:
670+
case 0b10:
671+
return true;
672+
case 0b01:
673+
return vcpu_el2_tge_is_set(vcpu) && !vcpu_is_el2(vcpu);
674+
case 0b11:
675+
default:
676+
return false;
677+
}
678+
}
679+
680+
#define __guest_hyp_cptr_xen_trap_enabled(vcpu, xen) \
681+
(!vcpu_has_nv(vcpu) ? false : \
682+
____cptr_xen_trap_enabled(vcpu, \
683+
SYS_FIELD_GET(CPACR_ELx, xen, \
684+
vcpu_sanitised_cptr_el2(vcpu))))
685+
686+
static inline bool guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu *vcpu)
687+
{
688+
return __guest_hyp_cptr_xen_trap_enabled(vcpu, FPEN);
689+
}
690+
691+
static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu *vcpu)
692+
{
693+
return __guest_hyp_cptr_xen_trap_enabled(vcpu, ZEN);
694+
}
695+
641696
#endif /* __ARM64_KVM_EMULATE_H__ */

arch/arm64/include/asm/kvm_host.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -458,6 +458,7 @@ enum vcpu_sysreg {
458458
MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */
459459
CPTR_EL2, /* Architectural Feature Trap Register (EL2) */
460460
HACR_EL2, /* Hypervisor Auxiliary Control Register */
461+
ZCR_EL2, /* SVE Control Register (EL2) */
461462
TTBR0_EL2, /* Translation Table Base Register 0 (EL2) */
462463
TTBR1_EL2, /* Translation Table Base Register 1 (EL2) */
463464
TCR_EL2, /* Translation Control Register (EL2) */
@@ -902,6 +903,9 @@ struct kvm_vcpu_arch {
902903

903904
#define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
904905

906+
#define vcpu_sve_zcr_elx(vcpu) \
907+
(unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1)
908+
905909
#define vcpu_sve_state_size(vcpu) ({ \
906910
size_t __size_ret; \
907911
unsigned int __vcpu_vq; \
@@ -1026,6 +1030,7 @@ static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
10261030
case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
10271031
case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
10281032
case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
1033+
case ZCR_EL1: *val = read_sysreg_s(SYS_ZCR_EL12); break;
10291034
default: return false;
10301035
}
10311036

@@ -1071,6 +1076,7 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
10711076
case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
10721077
case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
10731078
case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
1079+
case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break;
10741080
default: return false;
10751081
}
10761082

arch/arm64/include/asm/kvm_nested.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ static inline u64 translate_tcr_el2_to_tcr_el1(u64 tcr)
3333

3434
static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2)
3535
{
36-
u64 cpacr_el1 = 0;
36+
u64 cpacr_el1 = CPACR_ELx_RES1;
3737

3838
if (cptr_el2 & CPTR_EL2_TTA)
3939
cpacr_el1 |= CPACR_ELx_TTA;
@@ -42,6 +42,8 @@ static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2)
4242
if (!(cptr_el2 & CPTR_EL2_TZ))
4343
cpacr_el1 |= CPACR_ELx_ZEN;
4444

45+
cpacr_el1 |= cptr_el2 & (CPTR_EL2_TCPAC | CPTR_EL2_TAM);
46+
4547
return cpacr_el1;
4648
}
4749

arch/arm64/kvm/arm.c

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1458,11 +1458,6 @@ static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
14581458
test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features))
14591459
return -EINVAL;
14601460

1461-
/* Disallow NV+SVE for the time being */
1462-
if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features) &&
1463-
test_bit(KVM_ARM_VCPU_SVE, &features))
1464-
return -EINVAL;
1465-
14661461
if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features))
14671462
return 0;
14681463

arch/arm64/kvm/emulate-nested.c

Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,10 @@ enum cgt_group_id {
7979
CGT_MDCR_E2TB,
8080
CGT_MDCR_TDCC,
8181

82+
CGT_CPACR_E0POE,
83+
CGT_CPTR_TAM,
84+
CGT_CPTR_TCPAC,
85+
8286
/*
8387
* Anything after this point is a combination of coarse trap
8488
* controls, which must all be evaluated to decide what to do.
@@ -106,6 +110,8 @@ enum cgt_group_id {
106110
CGT_CNTHCTL_EL1PCTEN = __COMPLEX_CONDITIONS__,
107111
CGT_CNTHCTL_EL1PTEN,
108112

113+
CGT_CPTR_TTA,
114+
109115
/* Must be last */
110116
__NR_CGT_GROUP_IDS__
111117
};
@@ -345,6 +351,24 @@ static const struct trap_bits coarse_trap_bits[] = {
345351
.mask = MDCR_EL2_TDCC,
346352
.behaviour = BEHAVE_FORWARD_ANY,
347353
},
354+
[CGT_CPACR_E0POE] = {
355+
.index = CPTR_EL2,
356+
.value = CPACR_ELx_E0POE,
357+
.mask = CPACR_ELx_E0POE,
358+
.behaviour = BEHAVE_FORWARD_ANY,
359+
},
360+
[CGT_CPTR_TAM] = {
361+
.index = CPTR_EL2,
362+
.value = CPTR_EL2_TAM,
363+
.mask = CPTR_EL2_TAM,
364+
.behaviour = BEHAVE_FORWARD_ANY,
365+
},
366+
[CGT_CPTR_TCPAC] = {
367+
.index = CPTR_EL2,
368+
.value = CPTR_EL2_TCPAC,
369+
.mask = CPTR_EL2_TCPAC,
370+
.behaviour = BEHAVE_FORWARD_ANY,
371+
},
348372
};
349373

350374
#define MCB(id, ...) \
@@ -410,12 +434,26 @@ static enum trap_behaviour check_cnthctl_el1pten(struct kvm_vcpu *vcpu)
410434
return BEHAVE_FORWARD_ANY;
411435
}
412436

437+
static enum trap_behaviour check_cptr_tta(struct kvm_vcpu *vcpu)
438+
{
439+
u64 val = __vcpu_sys_reg(vcpu, CPTR_EL2);
440+
441+
if (!vcpu_el2_e2h_is_set(vcpu))
442+
val = translate_cptr_el2_to_cpacr_el1(val);
443+
444+
if (val & CPACR_ELx_TTA)
445+
return BEHAVE_FORWARD_ANY;
446+
447+
return BEHAVE_HANDLE_LOCALLY;
448+
}
449+
413450
#define CCC(id, fn) \
414451
[id - __COMPLEX_CONDITIONS__] = fn
415452

416453
static const complex_condition_check ccc[] = {
417454
CCC(CGT_CNTHCTL_EL1PCTEN, check_cnthctl_el1pcten),
418455
CCC(CGT_CNTHCTL_EL1PTEN, check_cnthctl_el1pten),
456+
CCC(CGT_CPTR_TTA, check_cptr_tta),
419457
};
420458

421459
/*
@@ -1000,6 +1038,59 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
10001038
SR_TRAP(SYS_TRBPTR_EL1, CGT_MDCR_E2TB),
10011039
SR_TRAP(SYS_TRBSR_EL1, CGT_MDCR_E2TB),
10021040
SR_TRAP(SYS_TRBTRG_EL1, CGT_MDCR_E2TB),
1041+
SR_TRAP(SYS_CPACR_EL1, CGT_CPTR_TCPAC),
1042+
SR_TRAP(SYS_AMUSERENR_EL0, CGT_CPTR_TAM),
1043+
SR_TRAP(SYS_AMCFGR_EL0, CGT_CPTR_TAM),
1044+
SR_TRAP(SYS_AMCGCR_EL0, CGT_CPTR_TAM),
1045+
SR_TRAP(SYS_AMCNTENCLR0_EL0, CGT_CPTR_TAM),
1046+
SR_TRAP(SYS_AMCNTENCLR1_EL0, CGT_CPTR_TAM),
1047+
SR_TRAP(SYS_AMCNTENSET0_EL0, CGT_CPTR_TAM),
1048+
SR_TRAP(SYS_AMCNTENSET1_EL0, CGT_CPTR_TAM),
1049+
SR_TRAP(SYS_AMCR_EL0, CGT_CPTR_TAM),
1050+
SR_TRAP(SYS_AMEVCNTR0_EL0(0), CGT_CPTR_TAM),
1051+
SR_TRAP(SYS_AMEVCNTR0_EL0(1), CGT_CPTR_TAM),
1052+
SR_TRAP(SYS_AMEVCNTR0_EL0(2), CGT_CPTR_TAM),
1053+
SR_TRAP(SYS_AMEVCNTR0_EL0(3), CGT_CPTR_TAM),
1054+
SR_TRAP(SYS_AMEVCNTR1_EL0(0), CGT_CPTR_TAM),
1055+
SR_TRAP(SYS_AMEVCNTR1_EL0(1), CGT_CPTR_TAM),
1056+
SR_TRAP(SYS_AMEVCNTR1_EL0(2), CGT_CPTR_TAM),
1057+
SR_TRAP(SYS_AMEVCNTR1_EL0(3), CGT_CPTR_TAM),
1058+
SR_TRAP(SYS_AMEVCNTR1_EL0(4), CGT_CPTR_TAM),
1059+
SR_TRAP(SYS_AMEVCNTR1_EL0(5), CGT_CPTR_TAM),
1060+
SR_TRAP(SYS_AMEVCNTR1_EL0(6), CGT_CPTR_TAM),
1061+
SR_TRAP(SYS_AMEVCNTR1_EL0(7), CGT_CPTR_TAM),
1062+
SR_TRAP(SYS_AMEVCNTR1_EL0(8), CGT_CPTR_TAM),
1063+
SR_TRAP(SYS_AMEVCNTR1_EL0(9), CGT_CPTR_TAM),
1064+
SR_TRAP(SYS_AMEVCNTR1_EL0(10), CGT_CPTR_TAM),
1065+
SR_TRAP(SYS_AMEVCNTR1_EL0(11), CGT_CPTR_TAM),
1066+
SR_TRAP(SYS_AMEVCNTR1_EL0(12), CGT_CPTR_TAM),
1067+
SR_TRAP(SYS_AMEVCNTR1_EL0(13), CGT_CPTR_TAM),
1068+
SR_TRAP(SYS_AMEVCNTR1_EL0(14), CGT_CPTR_TAM),
1069+
SR_TRAP(SYS_AMEVCNTR1_EL0(15), CGT_CPTR_TAM),
1070+
SR_TRAP(SYS_AMEVTYPER0_EL0(0), CGT_CPTR_TAM),
1071+
SR_TRAP(SYS_AMEVTYPER0_EL0(1), CGT_CPTR_TAM),
1072+
SR_TRAP(SYS_AMEVTYPER0_EL0(2), CGT_CPTR_TAM),
1073+
SR_TRAP(SYS_AMEVTYPER0_EL0(3), CGT_CPTR_TAM),
1074+
SR_TRAP(SYS_AMEVTYPER1_EL0(0), CGT_CPTR_TAM),
1075+
SR_TRAP(SYS_AMEVTYPER1_EL0(1), CGT_CPTR_TAM),
1076+
SR_TRAP(SYS_AMEVTYPER1_EL0(2), CGT_CPTR_TAM),
1077+
SR_TRAP(SYS_AMEVTYPER1_EL0(3), CGT_CPTR_TAM),
1078+
SR_TRAP(SYS_AMEVTYPER1_EL0(4), CGT_CPTR_TAM),
1079+
SR_TRAP(SYS_AMEVTYPER1_EL0(5), CGT_CPTR_TAM),
1080+
SR_TRAP(SYS_AMEVTYPER1_EL0(6), CGT_CPTR_TAM),
1081+
SR_TRAP(SYS_AMEVTYPER1_EL0(7), CGT_CPTR_TAM),
1082+
SR_TRAP(SYS_AMEVTYPER1_EL0(8), CGT_CPTR_TAM),
1083+
SR_TRAP(SYS_AMEVTYPER1_EL0(9), CGT_CPTR_TAM),
1084+
SR_TRAP(SYS_AMEVTYPER1_EL0(10), CGT_CPTR_TAM),
1085+
SR_TRAP(SYS_AMEVTYPER1_EL0(11), CGT_CPTR_TAM),
1086+
SR_TRAP(SYS_AMEVTYPER1_EL0(12), CGT_CPTR_TAM),
1087+
SR_TRAP(SYS_AMEVTYPER1_EL0(13), CGT_CPTR_TAM),
1088+
SR_TRAP(SYS_AMEVTYPER1_EL0(14), CGT_CPTR_TAM),
1089+
SR_TRAP(SYS_AMEVTYPER1_EL0(15), CGT_CPTR_TAM),
1090+
SR_TRAP(SYS_POR_EL0, CGT_CPACR_E0POE),
1091+
/* op0=2, op1=1, and CRn<0b1000 */
1092+
SR_RANGE_TRAP(sys_reg(2, 1, 0, 0, 0),
1093+
sys_reg(2, 1, 7, 15, 7), CGT_CPTR_TTA),
10031094
SR_TRAP(SYS_CNTP_TVAL_EL0, CGT_CNTHCTL_EL1PTEN),
10041095
SR_TRAP(SYS_CNTP_CVAL_EL0, CGT_CNTHCTL_EL1PTEN),
10051096
SR_TRAP(SYS_CNTP_CTL_EL0, CGT_CNTHCTL_EL1PTEN),

arch/arm64/kvm/fpsimd.c

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,13 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
178178

179179
if (guest_owns_fp_regs()) {
180180
if (vcpu_has_sve(vcpu)) {
181-
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
181+
u64 zcr = read_sysreg_el1(SYS_ZCR);
182+
183+
/*
184+
* If the vCPU is in the hyp context then ZCR_EL1 is
185+
* loaded with its vEL2 counterpart.
186+
*/
187+
__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr;
182188

183189
/*
184190
* Restore the VL that was saved when bound to the CPU,
@@ -189,11 +195,14 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
189195
* Note that this means that at guest exit ZCR_EL1 is
190196
* not necessarily the same as on guest entry.
191197
*
192-
* Restoring the VL isn't needed in VHE mode since
193-
* ZCR_EL2 (accessed via ZCR_EL1) would fulfill the same
194-
* role when doing the save from EL2.
198+
* ZCR_EL2 holds the guest hypervisor's VL when running
199+
* a nested guest, which could be smaller than the
200+
* max for the vCPU. Similar to above, we first need to
201+
* switch to a VL consistent with the layout of the
202+
* vCPU's SVE state. KVM support for NV implies VHE, so
203+
* using the ZCR_EL1 alias is safe.
195204
*/
196-
if (!has_vhe())
205+
if (!has_vhe() || (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)))
197206
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
198207
SYS_ZCR_EL1);
199208
}

arch/arm64/kvm/handle_exit.c

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -94,11 +94,19 @@ static int handle_smc(struct kvm_vcpu *vcpu)
9494
}
9595

9696
/*
97-
* Guest access to FP/ASIMD registers are routed to this handler only
98-
* when the system doesn't support FP/ASIMD.
97+
* This handles the cases where the system does not support FP/ASIMD or when
98+
* we are running nested virtualization and the guest hypervisor is trapping
99+
* FP/ASIMD accesses by its guest guest.
100+
*
101+
* All other handling of guest vs. host FP/ASIMD register state is handled in
102+
* fixup_guest_exit().
99103
*/
100-
static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
104+
static int kvm_handle_fpasimd(struct kvm_vcpu *vcpu)
101105
{
106+
if (guest_hyp_fpsimd_traps_enabled(vcpu))
107+
return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
108+
109+
/* This is the case when the system doesn't support FP/ASIMD. */
102110
kvm_inject_undefined(vcpu);
103111
return 1;
104112
}
@@ -209,6 +217,9 @@ static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
209217
*/
210218
static int handle_sve(struct kvm_vcpu *vcpu)
211219
{
220+
if (guest_hyp_sve_traps_enabled(vcpu))
221+
return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
222+
212223
kvm_inject_undefined(vcpu);
213224
return 1;
214225
}
@@ -304,7 +315,7 @@ static exit_handle_fn arm_exit_handlers[] = {
304315
[ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
305316
[ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
306317
[ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
307-
[ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd,
318+
[ESR_ELx_EC_FP_ASIMD] = kvm_handle_fpasimd,
308319
[ESR_ELx_EC_PAC] = kvm_handle_ptrauth,
309320
};
310321

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -314,11 +314,24 @@ static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
314314

315315
static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
316316
{
317+
/*
318+
* The vCPU's saved SVE state layout always matches the max VL of the
319+
* vCPU. Start off with the max VL so we can load the SVE state.
320+
*/
317321
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
318322
__sve_restore_state(vcpu_sve_pffr(vcpu),
319323
&vcpu->arch.ctxt.fp_regs.fpsr,
320324
true);
321-
write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
325+
326+
/*
327+
* The effective VL for a VM could differ from the max VL when running a
328+
* nested guest, as the guest hypervisor could select a smaller VL. Slap
329+
* that into hardware before wrapping up.
330+
*/
331+
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
332+
sve_cond_update_zcr_vq(__vcpu_sys_reg(vcpu, ZCR_EL2), SYS_ZCR_EL2);
333+
334+
write_sysreg_el1(__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)), SYS_ZCR);
322335
}
323336

324337
static inline void __hyp_sve_save_host(void)
@@ -354,10 +367,19 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
354367
/* Only handle traps the vCPU can support here: */
355368
switch (esr_ec) {
356369
case ESR_ELx_EC_FP_ASIMD:
370+
/* Forward traps to the guest hypervisor as required */
371+
if (guest_hyp_fpsimd_traps_enabled(vcpu))
372+
return false;
357373
break;
374+
case ESR_ELx_EC_SYS64:
375+
if (WARN_ON_ONCE(!is_hyp_ctxt(vcpu)))
376+
return false;
377+
fallthrough;
358378
case ESR_ELx_EC_SVE:
359379
if (!sve_guest)
360380
return false;
381+
if (guest_hyp_sve_traps_enabled(vcpu))
382+
return false;
361383
break;
362384
default:
363385
return false;

0 commit comments

Comments
 (0)