Skip to content

Commit 02ab1f5

Browse files
AndrewScullwilldeacon
authored andcommitted
arm64: Unify WORKAROUND_SPECULATIVE_AT_{NVHE,VHE}
Errata 1165522, 1319367 and 1530923 each allow TLB entries to be allocated as a result of a speculative AT instruction. In order to avoid mandating VHE on certain affected CPUs, apply the workaround to both the nVHE and the VHE case for all affected CPUs. Signed-off-by: Andrew Scull <[email protected]> Acked-by: Will Deacon <[email protected]> CC: Marc Zyngier <[email protected]> CC: James Morse <[email protected]> CC: Suzuki K Poulose <[email protected]> CC: Will Deacon <[email protected]> CC: Steven Price <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Will Deacon <[email protected]>
1 parent 6a8b55e commit 02ab1f5

File tree

8 files changed

+50
-58
lines changed

8 files changed

+50
-58
lines changed

arch/arm64/Kconfig

Lines changed: 18 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -524,13 +524,13 @@ config ARM64_ERRATUM_1418040
524524

525525
If unsure, say Y.
526526

527-
config ARM64_WORKAROUND_SPECULATIVE_AT_VHE
527+
config ARM64_WORKAROUND_SPECULATIVE_AT
528528
bool
529529

530530
config ARM64_ERRATUM_1165522
531-
bool "Cortex-A76: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
531+
bool "Cortex-A76: 1165522: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
532532
default y
533-
select ARM64_WORKAROUND_SPECULATIVE_AT_VHE
533+
select ARM64_WORKAROUND_SPECULATIVE_AT
534534
help
535535
This option adds a workaround for ARM Cortex-A76 erratum 1165522.
536536

@@ -540,10 +540,23 @@ config ARM64_ERRATUM_1165522
540540

541541
If unsure, say Y.
542542

543+
config ARM64_ERRATUM_1319367
544+
bool "Cortex-A57/A72: 1319537: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
545+
default y
546+
select ARM64_WORKAROUND_SPECULATIVE_AT
547+
help
548+
This option adds work arounds for ARM Cortex-A57 erratum 1319537
549+
and A72 erratum 1319367
550+
551+
Cortex-A57 and A72 cores could end-up with corrupted TLBs by
552+
speculating an AT instruction during a guest context switch.
553+
554+
If unsure, say Y.
555+
543556
config ARM64_ERRATUM_1530923
544-
bool "Cortex-A55: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
557+
bool "Cortex-A55: 1530923: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
545558
default y
546-
select ARM64_WORKAROUND_SPECULATIVE_AT_VHE
559+
select ARM64_WORKAROUND_SPECULATIVE_AT
547560
help
548561
This option adds a workaround for ARM Cortex-A55 erratum 1530923.
549562

@@ -569,22 +582,6 @@ config ARM64_ERRATUM_1286807
569582
invalidated has been observed by other observers. The
570583
workaround repeats the TLBI+DSB operation.
571584

572-
config ARM64_WORKAROUND_SPECULATIVE_AT_NVHE
573-
bool
574-
575-
config ARM64_ERRATUM_1319367
576-
bool "Cortex-A57/A72: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
577-
default y
578-
select ARM64_WORKAROUND_SPECULATIVE_AT_NVHE
579-
help
580-
This option adds work arounds for ARM Cortex-A57 erratum 1319537
581-
and A72 erratum 1319367
582-
583-
Cortex-A57 and A72 cores could end-up with corrupted TLBs by
584-
speculating an AT instruction during a guest context switch.
585-
586-
If unsure, say Y.
587-
588585
config ARM64_ERRATUM_1463225
589586
bool "Cortex-A76: Software Step might prevent interrupt recognition"
590587
default y

arch/arm64/include/asm/cpucaps.h

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@
4444
#define ARM64_SSBS 34
4545
#define ARM64_WORKAROUND_1418040 35
4646
#define ARM64_HAS_SB 36
47-
#define ARM64_WORKAROUND_SPECULATIVE_AT_VHE 37
47+
#define ARM64_WORKAROUND_SPECULATIVE_AT 37
4848
#define ARM64_HAS_ADDRESS_AUTH_ARCH 38
4949
#define ARM64_HAS_ADDRESS_AUTH_IMP_DEF 39
5050
#define ARM64_HAS_GENERIC_AUTH_ARCH 40
@@ -55,13 +55,12 @@
5555
#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
5656
#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
5757
#define ARM64_WORKAROUND_1542419 47
58-
#define ARM64_WORKAROUND_SPECULATIVE_AT_NVHE 48
59-
#define ARM64_HAS_E0PD 49
60-
#define ARM64_HAS_RNG 50
61-
#define ARM64_HAS_AMU_EXTN 51
62-
#define ARM64_HAS_ADDRESS_AUTH 52
63-
#define ARM64_HAS_GENERIC_AUTH 53
58+
#define ARM64_HAS_E0PD 48
59+
#define ARM64_HAS_RNG 49
60+
#define ARM64_HAS_AMU_EXTN 50
61+
#define ARM64_HAS_ADDRESS_AUTH 51
62+
#define ARM64_HAS_GENERIC_AUTH 52
6463

65-
#define ARM64_NCAPS 54
64+
#define ARM64_NCAPS 53
6665

6766
#endif /* __ASM_CPUCAPS_H */

arch/arm64/include/asm/kvm_host.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -573,10 +573,6 @@ static inline bool kvm_arch_requires_vhe(void)
573573
if (system_supports_sve())
574574
return true;
575575

576-
/* Some implementations have defects that confine them to VHE */
577-
if (cpus_have_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE))
578-
return true;
579-
580576
return false;
581577
}
582578

arch/arm64/include/asm/kvm_hyp.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm)
102102
* above before we can switch to the EL1/EL0 translation regime used by
103103
* the guest.
104104
*/
105-
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE));
105+
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
106106
}
107107

108108
#endif /* __ARM64_KVM_HYP_H__ */

arch/arm64/kernel/cpu_errata.c

Lines changed: 11 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -635,7 +635,7 @@ has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
635635
return is_midr_in_range(midr, &range) && has_dic;
636636
}
637637

638-
#if defined(CONFIG_HARDEN_EL2_VECTORS) || defined(CONFIG_ARM64_ERRATUM_1319367)
638+
#if defined(CONFIG_HARDEN_EL2_VECTORS)
639639

640640
static const struct midr_range ca57_a72[] = {
641641
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
@@ -757,12 +757,16 @@ static const struct arm64_cpu_capabilities erratum_843419_list[] = {
757757
};
758758
#endif
759759

760-
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE
761-
static const struct midr_range erratum_speculative_at_vhe_list[] = {
760+
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
761+
static const struct midr_range erratum_speculative_at_list[] = {
762762
#ifdef CONFIG_ARM64_ERRATUM_1165522
763763
/* Cortex A76 r0p0 to r2p0 */
764764
MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
765765
#endif
766+
#ifdef CONFIG_ARM64_ERRATUM_1319367
767+
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
768+
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
769+
#endif
766770
#ifdef CONFIG_ARM64_ERRATUM_1530923
767771
/* Cortex A55 r0p0 to r2p0 */
768772
MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
@@ -897,11 +901,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
897901
ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
898902
},
899903
#endif
900-
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE
904+
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
901905
{
902-
.desc = "ARM errata 1165522, 1530923",
903-
.capability = ARM64_WORKAROUND_SPECULATIVE_AT_VHE,
904-
ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_vhe_list),
906+
.desc = "ARM errata 1165522, 1319367, 1530923",
907+
.capability = ARM64_WORKAROUND_SPECULATIVE_AT,
908+
ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
905909
},
906910
#endif
907911
#ifdef CONFIG_ARM64_ERRATUM_1463225
@@ -934,13 +938,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
934938
.matches = has_neoverse_n1_erratum_1542419,
935939
.cpu_enable = cpu_enable_trap_ctr_access,
936940
},
937-
#endif
938-
#ifdef CONFIG_ARM64_ERRATUM_1319367
939-
{
940-
.desc = "ARM erratum 1319367",
941-
.capability = ARM64_WORKAROUND_SPECULATIVE_AT_NVHE,
942-
ERRATA_MIDR_RANGE_LIST(ca57_a72),
943-
},
944941
#endif
945942
{
946943
}

arch/arm64/kvm/hyp/switch.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
138138

139139
write_sysreg(val, cptr_el2);
140140

141-
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
141+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
142142
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
143143

144144
isb();
@@ -181,7 +181,7 @@ static void deactivate_traps_vhe(void)
181181
* above before we can switch to the EL2/EL0 translation regime used by
182182
* the host.
183183
*/
184-
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE));
184+
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
185185

186186
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
187187
write_sysreg(vectors, vbar_el1);
@@ -192,7 +192,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
192192
{
193193
u64 mdcr_el2 = read_sysreg(mdcr_el2);
194194

195-
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
195+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
196196
u64 val;
197197

198198
/*

arch/arm64/kvm/hyp/sysreg-sr.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,8 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
118118
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
119119
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
120120

121-
if (!cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
121+
if (has_vhe() ||
122+
!cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
122123
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
123124
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
124125
} else if (!ctxt->__hyp_running_vcpu) {
@@ -149,7 +150,8 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
149150
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
150151
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
151152

152-
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) &&
153+
if (!has_vhe() &&
154+
cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) &&
153155
ctxt->__hyp_running_vcpu) {
154156
/*
155157
* Must only be done for host registers, hence the context

arch/arm64/kvm/hyp/tlb.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
2323

2424
local_irq_save(cxt->flags);
2525

26-
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
26+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
2727
/*
2828
* For CPUs that are affected by ARM errata 1165522 or 1530923,
2929
* we cannot trust stage-1 to be in a correct state at that
@@ -63,7 +63,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
6363
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
6464
struct tlb_inv_context *cxt)
6565
{
66-
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
66+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
6767
u64 val;
6868

6969
/*
@@ -79,8 +79,9 @@ static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
7979
isb();
8080
}
8181

82+
/* __load_guest_stage2() includes an ISB for the workaround. */
8283
__load_guest_stage2(kvm);
83-
isb();
84+
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
8485
}
8586

8687
static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm,
@@ -103,7 +104,7 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
103104
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
104105
isb();
105106

106-
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE)) {
107+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
107108
/* Restore the registers to what they were */
108109
write_sysreg_el1(cxt->tcr, SYS_TCR);
109110
write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
@@ -117,7 +118,7 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
117118
{
118119
write_sysreg(0, vttbr_el2);
119120

120-
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
121+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
121122
/* Ensure write of the host VMID */
122123
isb();
123124
/* Restore the host's TCR_EL1 */

0 commit comments

Comments
 (0)