Skip to content

Commit c350717

Browse files
committed
Merge branch 'for-next/kvm/errata' into for-next/core
KVM CPU errata rework (Andrew Scull and Marc Zyngier) * for-next/kvm/errata: KVM: arm64: Move __load_guest_stage2 to kvm_mmu.h arm64: Unify WORKAROUND_SPECULATIVE_AT_{NVHE,VHE}
2 parents d278652 + fe677be commit c350717

File tree

9 files changed

+68
-77
lines changed

9 files changed

+68
-77
lines changed

arch/arm64/Kconfig

Lines changed: 18 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -528,13 +528,13 @@ config ARM64_ERRATUM_1418040
528528

529529
If unsure, say Y.
530530

531-
config ARM64_WORKAROUND_SPECULATIVE_AT_VHE
531+
config ARM64_WORKAROUND_SPECULATIVE_AT
532532
bool
533533

534534
config ARM64_ERRATUM_1165522
535-
bool "Cortex-A76: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
535+
bool "Cortex-A76: 1165522: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
536536
default y
537-
select ARM64_WORKAROUND_SPECULATIVE_AT_VHE
537+
select ARM64_WORKAROUND_SPECULATIVE_AT
538538
help
539539
This option adds a workaround for ARM Cortex-A76 erratum 1165522.
540540

@@ -544,10 +544,23 @@ config ARM64_ERRATUM_1165522
544544

545545
If unsure, say Y.
546546

547+
config ARM64_ERRATUM_1319367
548+
bool "Cortex-A57/A72: 1319537: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
549+
default y
550+
select ARM64_WORKAROUND_SPECULATIVE_AT
551+
help
552+
This option adds work arounds for ARM Cortex-A57 erratum 1319537
553+
and A72 erratum 1319367
554+
555+
Cortex-A57 and A72 cores could end-up with corrupted TLBs by
556+
speculating an AT instruction during a guest context switch.
557+
558+
If unsure, say Y.
559+
547560
config ARM64_ERRATUM_1530923
548-
bool "Cortex-A55: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
561+
bool "Cortex-A55: 1530923: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
549562
default y
550-
select ARM64_WORKAROUND_SPECULATIVE_AT_VHE
563+
select ARM64_WORKAROUND_SPECULATIVE_AT
551564
help
552565
This option adds a workaround for ARM Cortex-A55 erratum 1530923.
553566

@@ -576,22 +589,6 @@ config ARM64_ERRATUM_1286807
576589
invalidated has been observed by other observers. The
577590
workaround repeats the TLBI+DSB operation.
578591

579-
config ARM64_WORKAROUND_SPECULATIVE_AT_NVHE
580-
bool
581-
582-
config ARM64_ERRATUM_1319367
583-
bool "Cortex-A57/A72: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
584-
default y
585-
select ARM64_WORKAROUND_SPECULATIVE_AT_NVHE
586-
help
587-
This option adds work arounds for ARM Cortex-A57 erratum 1319537
588-
and A72 erratum 1319367
589-
590-
Cortex-A57 and A72 cores could end-up with corrupted TLBs by
591-
speculating an AT instruction during a guest context switch.
592-
593-
If unsure, say Y.
594-
595592
config ARM64_ERRATUM_1463225
596593
bool "Cortex-A76: Software Step might prevent interrupt recognition"
597594
default y

arch/arm64/include/asm/cpucaps.h

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@
4444
#define ARM64_SSBS 34
4545
#define ARM64_WORKAROUND_1418040 35
4646
#define ARM64_HAS_SB 36
47-
#define ARM64_WORKAROUND_SPECULATIVE_AT_VHE 37
47+
#define ARM64_WORKAROUND_SPECULATIVE_AT 37
4848
#define ARM64_HAS_ADDRESS_AUTH_ARCH 38
4949
#define ARM64_HAS_ADDRESS_AUTH_IMP_DEF 39
5050
#define ARM64_HAS_GENERIC_AUTH_ARCH 40
@@ -55,15 +55,14 @@
5555
#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
5656
#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
5757
#define ARM64_WORKAROUND_1542419 47
58-
#define ARM64_WORKAROUND_SPECULATIVE_AT_NVHE 48
59-
#define ARM64_HAS_E0PD 49
60-
#define ARM64_HAS_RNG 50
61-
#define ARM64_HAS_AMU_EXTN 51
62-
#define ARM64_HAS_ADDRESS_AUTH 52
63-
#define ARM64_HAS_GENERIC_AUTH 53
64-
#define ARM64_HAS_32BIT_EL1 54
65-
#define ARM64_BTI 55
58+
#define ARM64_HAS_E0PD 48
59+
#define ARM64_HAS_RNG 49
60+
#define ARM64_HAS_AMU_EXTN 50
61+
#define ARM64_HAS_ADDRESS_AUTH 51
62+
#define ARM64_HAS_GENERIC_AUTH 52
63+
#define ARM64_HAS_32BIT_EL1 53
64+
#define ARM64_BTI 54
6665

67-
#define ARM64_NCAPS 56
66+
#define ARM64_NCAPS 55
6867

6968
#endif /* __ASM_CPUCAPS_H */

arch/arm64/include/asm/kvm_host.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -573,10 +573,6 @@ static inline bool kvm_arch_requires_vhe(void)
573573
if (system_supports_sve())
574574
return true;
575575

576-
/* Some implementations have defects that confine them to VHE */
577-
if (cpus_have_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE))
578-
return true;
579-
580576
return false;
581577
}
582578

arch/arm64/include/asm/kvm_hyp.h

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010
#include <linux/compiler.h>
1111
#include <linux/kvm_host.h>
1212
#include <asm/alternative.h>
13-
#include <asm/kvm_mmu.h>
1413
#include <asm/sysreg.h>
1514

1615
#define __hyp_text __section(.hyp.text) notrace
@@ -88,22 +87,5 @@ void deactivate_traps_vhe_put(void);
8887
u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
8988
void __noreturn __hyp_do_panic(unsigned long, ...);
9089

91-
/*
92-
* Must be called from hyp code running at EL2 with an updated VTTBR
93-
* and interrupts disabled.
94-
*/
95-
static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm)
96-
{
97-
write_sysreg(kvm->arch.vtcr, vtcr_el2);
98-
write_sysreg(kvm_get_vttbr(kvm), vttbr_el2);
99-
100-
/*
101-
* ARM errata 1165522 and 1530923 require the actual execution of the
102-
* above before we can switch to the EL1/EL0 translation regime used by
103-
* the guest.
104-
*/
105-
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE));
106-
}
107-
10890
#endif /* __ARM64_KVM_HYP_H__ */
10991

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -604,5 +604,22 @@ static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
604604
return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
605605
}
606606

607+
/*
608+
* Must be called from hyp code running at EL2 with an updated VTTBR
609+
* and interrupts disabled.
610+
*/
611+
static __always_inline void __load_guest_stage2(struct kvm *kvm)
612+
{
613+
write_sysreg(kvm->arch.vtcr, vtcr_el2);
614+
write_sysreg(kvm_get_vttbr(kvm), vttbr_el2);
615+
616+
/*
617+
* ARM errata 1165522 and 1530923 require the actual execution of the
618+
* above before we can switch to the EL1/EL0 translation regime used by
619+
* the guest.
620+
*/
621+
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
622+
}
623+
607624
#endif /* __ASSEMBLY__ */
608625
#endif /* __ARM64_KVM_MMU_H__ */

arch/arm64/kernel/cpu_errata.c

Lines changed: 11 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -635,7 +635,7 @@ has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
635635
return is_midr_in_range(midr, &range) && has_dic;
636636
}
637637

638-
#if defined(CONFIG_HARDEN_EL2_VECTORS) || defined(CONFIG_ARM64_ERRATUM_1319367)
638+
#if defined(CONFIG_HARDEN_EL2_VECTORS)
639639

640640
static const struct midr_range ca57_a72[] = {
641641
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
@@ -757,12 +757,16 @@ static const struct arm64_cpu_capabilities erratum_843419_list[] = {
757757
};
758758
#endif
759759

760-
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE
761-
static const struct midr_range erratum_speculative_at_vhe_list[] = {
760+
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
761+
static const struct midr_range erratum_speculative_at_list[] = {
762762
#ifdef CONFIG_ARM64_ERRATUM_1165522
763763
/* Cortex A76 r0p0 to r2p0 */
764764
MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
765765
#endif
766+
#ifdef CONFIG_ARM64_ERRATUM_1319367
767+
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
768+
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
769+
#endif
766770
#ifdef CONFIG_ARM64_ERRATUM_1530923
767771
/* Cortex A55 r0p0 to r2p0 */
768772
MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
@@ -897,11 +901,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
897901
ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
898902
},
899903
#endif
900-
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE
904+
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
901905
{
902-
.desc = "ARM errata 1165522 or 1530923",
903-
.capability = ARM64_WORKAROUND_SPECULATIVE_AT_VHE,
904-
ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_vhe_list),
906+
.desc = "ARM errata 1165522, 1319367, or 1530923",
907+
.capability = ARM64_WORKAROUND_SPECULATIVE_AT,
908+
ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
905909
},
906910
#endif
907911
#ifdef CONFIG_ARM64_ERRATUM_1463225
@@ -934,13 +938,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
934938
.matches = has_neoverse_n1_erratum_1542419,
935939
.cpu_enable = cpu_enable_trap_ctr_access,
936940
},
937-
#endif
938-
#ifdef CONFIG_ARM64_ERRATUM_1319367
939-
{
940-
.desc = "ARM erratum 1319367",
941-
.capability = ARM64_WORKAROUND_SPECULATIVE_AT_NVHE,
942-
ERRATA_MIDR_RANGE_LIST(ca57_a72),
943-
},
944941
#endif
945942
{
946943
}

arch/arm64/kvm/hyp/switch.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
138138

139139
write_sysreg(val, cptr_el2);
140140

141-
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
141+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
142142
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
143143

144144
isb();
@@ -181,7 +181,7 @@ static void deactivate_traps_vhe(void)
181181
* above before we can switch to the EL2/EL0 translation regime used by
182182
* the host.
183183
*/
184-
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE));
184+
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
185185

186186
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
187187
write_sysreg(vectors, vbar_el1);
@@ -192,7 +192,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
192192
{
193193
u64 mdcr_el2 = read_sysreg(mdcr_el2);
194194

195-
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
195+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
196196
u64 val;
197197

198198
/*

arch/arm64/kvm/hyp/sysreg-sr.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,8 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
118118
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
119119
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
120120

121-
if (!cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
121+
if (has_vhe() ||
122+
!cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
122123
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
123124
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
124125
} else if (!ctxt->__hyp_running_vcpu) {
@@ -149,7 +150,8 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
149150
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
150151
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
151152

152-
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) &&
153+
if (!has_vhe() &&
154+
cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) &&
153155
ctxt->__hyp_running_vcpu) {
154156
/*
155157
* Must only be done for host registers, hence the context

arch/arm64/kvm/hyp/tlb.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
2323

2424
local_irq_save(cxt->flags);
2525

26-
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
26+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
2727
/*
2828
* For CPUs that are affected by ARM errata 1165522 or 1530923,
2929
* we cannot trust stage-1 to be in a correct state at that
@@ -63,7 +63,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
6363
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
6464
struct tlb_inv_context *cxt)
6565
{
66-
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
66+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
6767
u64 val;
6868

6969
/*
@@ -79,8 +79,9 @@ static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
7979
isb();
8080
}
8181

82+
/* __load_guest_stage2() includes an ISB for the workaround. */
8283
__load_guest_stage2(kvm);
83-
isb();
84+
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
8485
}
8586

8687
static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm,
@@ -103,7 +104,7 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
103104
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
104105
isb();
105106

106-
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
107+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
107108
/* Restore the registers to what they were */
108109
write_sysreg_el1(cxt->tcr, SYS_TCR);
109110
write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
@@ -117,7 +118,7 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
117118
{
118119
write_sysreg(0, vttbr_el2);
119120

120-
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
121+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
121122
/* Ensure write of the host VMID */
122123
isb();
123124
/* Restore the host's TCR_EL1 */

0 commit comments

Comments
 (0)