|
12 | 12 | #include <asm/cpu.h>
|
13 | 13 | #include <asm/cputype.h>
|
14 | 14 | #include <asm/cpufeature.h>
|
| 15 | +#include <asm/smp_plat.h> |
15 | 16 |
|
16 | 17 | static bool __maybe_unused
|
17 | 18 | is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
|
@@ -623,6 +624,30 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
|
623 | 624 | return (need_wa > 0);
|
624 | 625 | }
|
625 | 626 |
|
| 627 | +static const __maybe_unused struct midr_range tx2_family_cpus[] = { |
| 628 | + MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), |
| 629 | + MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), |
| 630 | + {}, |
| 631 | +}; |
| 632 | + |
| 633 | +static bool __maybe_unused |
| 634 | +needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry, |
| 635 | + int scope) |
| 636 | +{ |
| 637 | + int i; |
| 638 | + |
| 639 | + if (!is_affected_midr_range_list(entry, scope) || |
| 640 | + !is_hyp_mode_available()) |
| 641 | + return false; |
| 642 | + |
| 643 | + for_each_possible_cpu(i) { |
| 644 | + if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0) |
| 645 | + return true; |
| 646 | + } |
| 647 | + |
| 648 | + return false; |
| 649 | +} |
| 650 | + |
626 | 651 | #ifdef CONFIG_HARDEN_EL2_VECTORS
|
627 | 652 |
|
628 | 653 | static const struct midr_range arm64_harden_el2_vectors[] = {
|
@@ -851,6 +876,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
851 | 876 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
852 | 877 | .matches = has_cortex_a76_erratum_1463225,
|
853 | 878 | },
|
| 879 | +#endif |
| 880 | +#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219 |
| 881 | + { |
| 882 | + .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)", |
| 883 | + .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM, |
| 884 | + ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), |
| 885 | + .matches = needs_tx2_tvm_workaround, |
| 886 | + }, |
854 | 887 | #endif
|
855 | 888 | {
|
856 | 889 | }
|
|
0 commit comments