Skip to content

Commit b5475d8

Browse files
mrutland-armctmarinas
authored andcommitted
arm64: kvm: hyp: use cpus_have_final_cap()
The KVM hyp code is only run after system capabilities have been finalized, and thus all const cap checks have been patched. This is noted in in __cpu_init_hyp_mode(), where we BUG() if called too early: | /* | * Call initialization code, and switch to the full blown HYP code. | * If the cpucaps haven't been finalized yet, something has gone very | * wrong, and hyp will crash and burn when it uses any | * cpus_have_const_cap() wrapper. | */ Given this, the hyp code can use cpus_have_final_cap() and avoid generating code to check the cpu_hwcaps array, which would be unsafe to run in hyp context. This patch migrate the KVM hyp code to cpus_have_final_cap(), avoiding this redundant code generation, and making it possible to detect if we accidentally invoke this code too early. In the latter case, the BUG() in cpus_have_final_cap() will cause a hyp panic. Signed-off-by: Mark Rutland <[email protected]> Reviewed-by: Marc Zyngier <[email protected]> Cc: James Morse <[email protected]> Cc: Julien Thierry <[email protected]> Cc: Suzuki Poulouse <[email protected]> Cc: Will Deacon <[email protected]> Signed-off-by: Catalin Marinas <[email protected]>
1 parent 1db5cde commit b5475d8

File tree

3 files changed

+15
-15
lines changed

3 files changed

+15
-15
lines changed

arch/arm64/kvm/hyp/switch.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
127127

128128
write_sysreg(val, cptr_el2);
129129

130-
if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
130+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
131131
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
132132

133133
isb();
@@ -146,12 +146,12 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
146146
{
147147
u64 hcr = vcpu->arch.hcr_el2;
148148

149-
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
149+
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
150150
hcr |= HCR_TVM;
151151

152152
write_sysreg(hcr, hcr_el2);
153153

154-
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
154+
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
155155
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
156156

157157
if (has_vhe())
@@ -181,7 +181,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
181181
{
182182
u64 mdcr_el2 = read_sysreg(mdcr_el2);
183183

184-
if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
184+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
185185
u64 val;
186186

187187
/*
@@ -328,7 +328,7 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
328328
* resolve the IPA using the AT instruction.
329329
*/
330330
if (!(esr & ESR_ELx_S1PTW) &&
331-
(cpus_have_const_cap(ARM64_WORKAROUND_834220) ||
331+
(cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
332332
(esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
333333
if (!__translate_far_to_hpfar(far, &hpfar))
334334
return false;
@@ -498,7 +498,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
498498
if (*exit_code != ARM_EXCEPTION_TRAP)
499499
goto exit;
500500

501-
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
501+
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
502502
kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
503503
handle_tx2_tvm(vcpu))
504504
return true;
@@ -555,7 +555,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
555555

556556
static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
557557
{
558-
if (!cpus_have_const_cap(ARM64_SSBD))
558+
if (!cpus_have_final_cap(ARM64_SSBD))
559559
return false;
560560

561561
return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);

arch/arm64/kvm/hyp/sysreg-sr.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ct
7171
ctxt->gp_regs.regs.pc = read_sysreg_el2(SYS_ELR);
7272
ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
7373

74-
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
74+
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
7575
ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
7676
}
7777

@@ -118,7 +118,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
118118
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
119119
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
120120

121-
if (!cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
121+
if (!cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
122122
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
123123
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
124124
} else if (!ctxt->__hyp_running_vcpu) {
@@ -149,7 +149,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
149149
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
150150
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
151151

152-
if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) &&
152+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) &&
153153
ctxt->__hyp_running_vcpu) {
154154
/*
155155
* Must only be done for host registers, hence the context
@@ -194,7 +194,7 @@ __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
194194
write_sysreg_el2(ctxt->gp_regs.regs.pc, SYS_ELR);
195195
write_sysreg_el2(pstate, SYS_SPSR);
196196

197-
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
197+
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
198198
write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
199199
}
200200

arch/arm64/kvm/hyp/tlb.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
2323

2424
local_irq_save(cxt->flags);
2525

26-
if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
26+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
2727
/*
2828
* For CPUs that are affected by ARM errata 1165522 or 1530923,
2929
* we cannot trust stage-1 to be in a correct state at that
@@ -63,7 +63,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
6363
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
6464
struct tlb_inv_context *cxt)
6565
{
66-
if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
66+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
6767
u64 val;
6868

6969
/*
@@ -103,7 +103,7 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
103103
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
104104
isb();
105105

106-
if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
106+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
107107
/* Restore the registers to what they were */
108108
write_sysreg_el1(cxt->tcr, SYS_TCR);
109109
write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
@@ -117,7 +117,7 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
117117
{
118118
write_sysreg(0, vttbr_el2);
119119

120-
if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
120+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
121121
/* Ensure write of the host VMID */
122122
isb();
123123
/* Restore the host's TCR_EL1 */

0 commit comments

Comments
 (0)