Skip to content

Commit e62dd50

Browse files
mrutland-armMarc Zyngier
authored andcommitted
KVM: arm64: Reorganise CPTR trap manipulation
The NVHE/HVHE and VHE modes have separate implementations of __activate_cptr_traps() and __deactivate_cptr_traps() in their respective switch.c files. There's some duplication of logic, and it's not currently possible to reuse this logic elsewhere. Move the logic into the common switch.h header so that it can be reused, and de-duplicate the common logic. This rework changes the way SVE traps are deactivated in VHE mode, aligning it with NVHE/HVHE modes: * Before this patch, VHE's __deactivate_cptr_traps() would unconditionally enable SVE for host EL2 (but not EL0), regardless of whether the ARM64_SVE cpucap was set. * After this patch, VHE's __deactivate_cptr_traps() will take the ARM64_SVE cpucap into account. When ARM64_SVE is not set, SVE will be trapped from EL2 and below. The old and new behaviour are both benign: * When ARM64_SVE is not set, the host will not touch SVE state, and will not reconfigure SVE traps. Host EL0 access to SVE will be trapped as expected. * When ARM64_SVE is set, the host will configure EL0 SVE traps before returning to EL0 as part of reloading the EL0 FPSIMD/SVE/SME state. Signed-off-by: Mark Rutland <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Fuad Tabba <[email protected]> Cc: Marc Zyngier <[email protected]> Cc: Mark Brown <[email protected]> Cc: Oliver Upton <[email protected]> Cc: Will Deacon <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Marc Zyngier <[email protected]>
1 parent 257d0aa commit e62dd50

File tree

3 files changed

+130
-140
lines changed

3 files changed

+130
-140
lines changed

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,136 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
6565
}
6666
}
6767

68+
static inline void __activate_cptr_traps_nvhe(struct kvm_vcpu *vcpu)
69+
{
70+
u64 val = CPTR_NVHE_EL2_RES1 | CPTR_EL2_TAM | CPTR_EL2_TTA;
71+
72+
/*
73+
* Always trap SME since it's not supported in KVM.
74+
* TSM is RES1 if SME isn't implemented.
75+
*/
76+
val |= CPTR_EL2_TSM;
77+
78+
if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
79+
val |= CPTR_EL2_TZ;
80+
81+
if (!guest_owns_fp_regs())
82+
val |= CPTR_EL2_TFP;
83+
84+
write_sysreg(val, cptr_el2);
85+
}
86+
87+
static inline void __activate_cptr_traps_vhe(struct kvm_vcpu *vcpu)
88+
{
89+
/*
90+
* With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
91+
* CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2,
92+
* except for some missing controls, such as TAM.
93+
* In this case, CPTR_EL2.TAM has the same position with or without
94+
* VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM
95+
* shift value for trapping the AMU accesses.
96+
*/
97+
u64 val = CPTR_EL2_TAM | CPACR_EL1_TTA;
98+
u64 cptr;
99+
100+
if (guest_owns_fp_regs()) {
101+
val |= CPACR_EL1_FPEN;
102+
if (vcpu_has_sve(vcpu))
103+
val |= CPACR_EL1_ZEN;
104+
}
105+
106+
if (!vcpu_has_nv(vcpu))
107+
goto write;
108+
109+
/*
110+
* The architecture is a bit crap (what a surprise): an EL2 guest
111+
* writing to CPTR_EL2 via CPACR_EL1 can't set any of TCPAC or TTA,
112+
* as they are RES0 in the guest's view. To work around it, trap the
113+
* sucker using the very same bit it can't set...
114+
*/
115+
if (vcpu_el2_e2h_is_set(vcpu) && is_hyp_ctxt(vcpu))
116+
val |= CPTR_EL2_TCPAC;
117+
118+
/*
119+
* Layer the guest hypervisor's trap configuration on top of our own if
120+
* we're in a nested context.
121+
*/
122+
if (is_hyp_ctxt(vcpu))
123+
goto write;
124+
125+
cptr = vcpu_sanitised_cptr_el2(vcpu);
126+
127+
/*
128+
* Pay attention, there's some interesting detail here.
129+
*
130+
* The CPTR_EL2.xEN fields are 2 bits wide, although there are only two
131+
* meaningful trap states when HCR_EL2.TGE = 0 (running a nested guest):
132+
*
133+
* - CPTR_EL2.xEN = x0, traps are enabled
134+
* - CPTR_EL2.xEN = x1, traps are disabled
135+
*
136+
* In other words, bit[0] determines if guest accesses trap or not. In
137+
* the interest of simplicity, clear the entire field if the guest
138+
* hypervisor has traps enabled to dispel any illusion of something more
139+
* complicated taking place.
140+
*/
141+
if (!(SYS_FIELD_GET(CPACR_EL1, FPEN, cptr) & BIT(0)))
142+
val &= ~CPACR_EL1_FPEN;
143+
if (!(SYS_FIELD_GET(CPACR_EL1, ZEN, cptr) & BIT(0)))
144+
val &= ~CPACR_EL1_ZEN;
145+
146+
if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
147+
val |= cptr & CPACR_EL1_E0POE;
148+
149+
val |= cptr & CPTR_EL2_TCPAC;
150+
151+
write:
152+
write_sysreg(val, cpacr_el1);
153+
}
154+
155+
static inline void __activate_cptr_traps(struct kvm_vcpu *vcpu)
156+
{
157+
if (!guest_owns_fp_regs())
158+
__activate_traps_fpsimd32(vcpu);
159+
160+
if (has_vhe() || has_hvhe())
161+
__activate_cptr_traps_vhe(vcpu);
162+
else
163+
__activate_cptr_traps_nvhe(vcpu);
164+
}
165+
166+
static inline void __deactivate_cptr_traps_nvhe(struct kvm_vcpu *vcpu)
167+
{
168+
u64 val = CPTR_NVHE_EL2_RES1;
169+
170+
if (!cpus_have_final_cap(ARM64_SVE))
171+
val |= CPTR_EL2_TZ;
172+
if (!cpus_have_final_cap(ARM64_SME))
173+
val |= CPTR_EL2_TSM;
174+
175+
write_sysreg(val, cptr_el2);
176+
}
177+
178+
static inline void __deactivate_cptr_traps_vhe(struct kvm_vcpu *vcpu)
179+
{
180+
u64 val = CPACR_EL1_FPEN;
181+
182+
if (cpus_have_final_cap(ARM64_SVE))
183+
val |= CPACR_EL1_ZEN;
184+
if (cpus_have_final_cap(ARM64_SME))
185+
val |= CPACR_EL1_SMEN;
186+
187+
write_sysreg(val, cpacr_el1);
188+
}
189+
190+
static inline void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
191+
{
192+
if (has_vhe() || has_hvhe())
193+
__deactivate_cptr_traps_vhe(vcpu);
194+
else
195+
__deactivate_cptr_traps_nvhe(vcpu);
196+
}
197+
68198
#define reg_to_fgt_masks(reg) \
69199
({ \
70200
struct fgt_masks *m; \

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 0 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -47,65 +47,6 @@ struct fgt_masks hdfgwtr2_masks;
4747

4848
extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
4949

50-
static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
51-
{
52-
u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
53-
54-
if (!guest_owns_fp_regs())
55-
__activate_traps_fpsimd32(vcpu);
56-
57-
if (has_hvhe()) {
58-
val |= CPACR_EL1_TTA;
59-
60-
if (guest_owns_fp_regs()) {
61-
val |= CPACR_EL1_FPEN;
62-
if (vcpu_has_sve(vcpu))
63-
val |= CPACR_EL1_ZEN;
64-
}
65-
66-
write_sysreg(val, cpacr_el1);
67-
} else {
68-
val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
69-
70-
/*
71-
* Always trap SME since it's not supported in KVM.
72-
* TSM is RES1 if SME isn't implemented.
73-
*/
74-
val |= CPTR_EL2_TSM;
75-
76-
if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
77-
val |= CPTR_EL2_TZ;
78-
79-
if (!guest_owns_fp_regs())
80-
val |= CPTR_EL2_TFP;
81-
82-
write_sysreg(val, cptr_el2);
83-
}
84-
}
85-
86-
static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
87-
{
88-
if (has_hvhe()) {
89-
u64 val = CPACR_EL1_FPEN;
90-
91-
if (cpus_have_final_cap(ARM64_SVE))
92-
val |= CPACR_EL1_ZEN;
93-
if (cpus_have_final_cap(ARM64_SME))
94-
val |= CPACR_EL1_SMEN;
95-
96-
write_sysreg(val, cpacr_el1);
97-
} else {
98-
u64 val = CPTR_NVHE_EL2_RES1;
99-
100-
if (!cpus_have_final_cap(ARM64_SVE))
101-
val |= CPTR_EL2_TZ;
102-
if (!cpus_have_final_cap(ARM64_SME))
103-
val |= CPTR_EL2_TSM;
104-
105-
write_sysreg(val, cptr_el2);
106-
}
107-
}
108-
10950
static void __activate_traps(struct kvm_vcpu *vcpu)
11051
{
11152
___activate_traps(vcpu, vcpu->arch.hcr_el2);

arch/arm64/kvm/hyp/vhe/switch.c

Lines changed: 0 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -90,87 +90,6 @@ static u64 __compute_hcr(struct kvm_vcpu *vcpu)
9090
return hcr | (guest_hcr & ~NV_HCR_GUEST_EXCLUDE);
9191
}
9292

93-
static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
94-
{
95-
u64 cptr;
96-
97-
/*
98-
* With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
99-
* CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2,
100-
* except for some missing controls, such as TAM.
101-
* In this case, CPTR_EL2.TAM has the same position with or without
102-
* VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM
103-
* shift value for trapping the AMU accesses.
104-
*/
105-
u64 val = CPACR_EL1_TTA | CPTR_EL2_TAM;
106-
107-
if (guest_owns_fp_regs()) {
108-
val |= CPACR_EL1_FPEN;
109-
if (vcpu_has_sve(vcpu))
110-
val |= CPACR_EL1_ZEN;
111-
} else {
112-
__activate_traps_fpsimd32(vcpu);
113-
}
114-
115-
if (!vcpu_has_nv(vcpu))
116-
goto write;
117-
118-
/*
119-
* The architecture is a bit crap (what a surprise): an EL2 guest
120-
* writing to CPTR_EL2 via CPACR_EL1 can't set any of TCPAC or TTA,
121-
* as they are RES0 in the guest's view. To work around it, trap the
122-
* sucker using the very same bit it can't set...
123-
*/
124-
if (vcpu_el2_e2h_is_set(vcpu) && is_hyp_ctxt(vcpu))
125-
val |= CPTR_EL2_TCPAC;
126-
127-
/*
128-
* Layer the guest hypervisor's trap configuration on top of our own if
129-
* we're in a nested context.
130-
*/
131-
if (is_hyp_ctxt(vcpu))
132-
goto write;
133-
134-
cptr = vcpu_sanitised_cptr_el2(vcpu);
135-
136-
/*
137-
* Pay attention, there's some interesting detail here.
138-
*
139-
* The CPTR_EL2.xEN fields are 2 bits wide, although there are only two
140-
* meaningful trap states when HCR_EL2.TGE = 0 (running a nested guest):
141-
*
142-
* - CPTR_EL2.xEN = x0, traps are enabled
143-
* - CPTR_EL2.xEN = x1, traps are disabled
144-
*
145-
* In other words, bit[0] determines if guest accesses trap or not. In
146-
* the interest of simplicity, clear the entire field if the guest
147-
* hypervisor has traps enabled to dispel any illusion of something more
148-
* complicated taking place.
149-
*/
150-
if (!(SYS_FIELD_GET(CPACR_EL1, FPEN, cptr) & BIT(0)))
151-
val &= ~CPACR_EL1_FPEN;
152-
if (!(SYS_FIELD_GET(CPACR_EL1, ZEN, cptr) & BIT(0)))
153-
val &= ~CPACR_EL1_ZEN;
154-
155-
if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
156-
val |= cptr & CPACR_EL1_E0POE;
157-
158-
val |= cptr & CPTR_EL2_TCPAC;
159-
160-
write:
161-
write_sysreg(val, cpacr_el1);
162-
}
163-
164-
static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
165-
{
166-
u64 val = CPACR_EL1_FPEN | CPACR_EL1_ZEN_EL1EN;
167-
168-
if (cpus_have_final_cap(ARM64_SME))
169-
val |= CPACR_EL1_SMEN_EL1EN;
170-
171-
write_sysreg(val, cpacr_el1);
172-
}
173-
17493
static void __activate_traps(struct kvm_vcpu *vcpu)
17594
{
17695
u64 val;

0 commit comments

Comments
 (0)