@@ -23,20 +23,80 @@ DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
23
23
24
24
void __kvm_hyp_host_forward_smc (struct kvm_cpu_context * host_ctxt );
25
25
26
+ static void __hyp_sve_save_guest (struct kvm_vcpu * vcpu )
27
+ {
28
+ __vcpu_sys_reg (vcpu , ZCR_EL1 ) = read_sysreg_el1 (SYS_ZCR );
29
+ /*
30
+ * On saving/restoring guest sve state, always use the maximum VL for
31
+ * the guest. The layout of the data when saving the sve state depends
32
+ * on the VL, so use a consistent (i.e., the maximum) guest VL.
33
+ */
34
+ sve_cond_update_zcr_vq (vcpu_sve_max_vq (vcpu ) - 1 , SYS_ZCR_EL2 );
35
+ __sve_save_state (vcpu_sve_pffr (vcpu ), & vcpu -> arch .ctxt .fp_regs .fpsr , true);
36
+ write_sysreg_s (ZCR_ELx_LEN_MASK , SYS_ZCR_EL2 );
37
+ }
38
+
39
+ static void __hyp_sve_restore_host (void )
40
+ {
41
+ struct cpu_sve_state * sve_state = * host_data_ptr (sve_state );
42
+
43
+ /*
44
+ * On saving/restoring host sve state, always use the maximum VL for
45
+ * the host. The layout of the data when saving the sve state depends
46
+ * on the VL, so use a consistent (i.e., the maximum) host VL.
47
+ *
48
+ * Setting ZCR_EL2 to ZCR_ELx_LEN_MASK sets the effective length
49
+ * supported by the system (or limited at EL3).
50
+ */
51
+ write_sysreg_s (ZCR_ELx_LEN_MASK , SYS_ZCR_EL2 );
52
+ __sve_restore_state (sve_state -> sve_regs + sve_ffr_offset (kvm_host_sve_max_vl ),
53
+ & sve_state -> fpsr ,
54
+ true);
55
+ write_sysreg_el1 (sve_state -> zcr_el1 , SYS_ZCR );
56
+ }
57
+
58
+ static void fpsimd_sve_flush (void )
59
+ {
60
+ * host_data_ptr (fp_owner ) = FP_STATE_HOST_OWNED ;
61
+ }
62
+
63
+ static void fpsimd_sve_sync (struct kvm_vcpu * vcpu )
64
+ {
65
+ if (!guest_owns_fp_regs ())
66
+ return ;
67
+
68
+ cpacr_clear_set (0 , CPACR_ELx_FPEN | CPACR_ELx_ZEN );
69
+ isb ();
70
+
71
+ if (vcpu_has_sve (vcpu ))
72
+ __hyp_sve_save_guest (vcpu );
73
+ else
74
+ __fpsimd_save_state (& vcpu -> arch .ctxt .fp_regs );
75
+
76
+ if (system_supports_sve ())
77
+ __hyp_sve_restore_host ();
78
+ else
79
+ __fpsimd_restore_state (* host_data_ptr (fpsimd_state ));
80
+
81
+ * host_data_ptr (fp_owner ) = FP_STATE_HOST_OWNED ;
82
+ }
83
+
26
84
static void flush_hyp_vcpu (struct pkvm_hyp_vcpu * hyp_vcpu )
27
85
{
28
86
struct kvm_vcpu * host_vcpu = hyp_vcpu -> host_vcpu ;
29
87
88
+ fpsimd_sve_flush ();
89
+
30
90
hyp_vcpu -> vcpu .arch .ctxt = host_vcpu -> arch .ctxt ;
31
91
32
92
hyp_vcpu -> vcpu .arch .sve_state = kern_hyp_va (host_vcpu -> arch .sve_state );
33
- hyp_vcpu -> vcpu .arch .sve_max_vl = host_vcpu -> arch .sve_max_vl ;
93
+ /* Limit guest vector length to the maximum supported by the host. */
94
+ hyp_vcpu -> vcpu .arch .sve_max_vl = min (host_vcpu -> arch .sve_max_vl , kvm_host_sve_max_vl );
34
95
35
96
hyp_vcpu -> vcpu .arch .hw_mmu = host_vcpu -> arch .hw_mmu ;
36
97
37
98
hyp_vcpu -> vcpu .arch .hcr_el2 = host_vcpu -> arch .hcr_el2 ;
38
99
hyp_vcpu -> vcpu .arch .mdcr_el2 = host_vcpu -> arch .mdcr_el2 ;
39
- hyp_vcpu -> vcpu .arch .cptr_el2 = host_vcpu -> arch .cptr_el2 ;
40
100
41
101
hyp_vcpu -> vcpu .arch .iflags = host_vcpu -> arch .iflags ;
42
102
@@ -54,10 +114,11 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
54
114
struct vgic_v3_cpu_if * host_cpu_if = & host_vcpu -> arch .vgic_cpu .vgic_v3 ;
55
115
unsigned int i ;
56
116
117
+ fpsimd_sve_sync (& hyp_vcpu -> vcpu );
118
+
57
119
host_vcpu -> arch .ctxt = hyp_vcpu -> vcpu .arch .ctxt ;
58
120
59
121
host_vcpu -> arch .hcr_el2 = hyp_vcpu -> vcpu .arch .hcr_el2 ;
60
- host_vcpu -> arch .cptr_el2 = hyp_vcpu -> vcpu .arch .cptr_el2 ;
61
122
62
123
host_vcpu -> arch .fault = hyp_vcpu -> vcpu .arch .fault ;
63
124
0 commit comments