|
27 | 27 |
|
28 | 28 | #include "cpu.h"
|
29 | 29 |
|
30 |
| -static const int amd_erratum_383[]; |
31 |
| -static const int amd_erratum_400[]; |
32 |
| -static const int amd_erratum_1054[]; |
33 |
| -static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); |
34 |
| - |
35 | 30 | /*
|
36 | 31 | * nodes_per_socket: Stores the number of nodes per socket.
|
37 | 32 | * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
|
38 | 33 | * Node Identifiers[10:8]
|
39 | 34 | */
|
40 | 35 | static u32 nodes_per_socket = 1;
|
41 | 36 |
|
| 37 | +/* |
| 38 | + * AMD errata checking |
| 39 | + * |
| 40 | + * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or |
| 41 | + * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that |
| 42 | + * have an OSVW id assigned, which it takes as first argument. Both take a |
| 43 | + * variable number of family-specific model-stepping ranges created by |
| 44 | + * AMD_MODEL_RANGE(). |
| 45 | + * |
| 46 | + * Example: |
| 47 | + * |
| 48 | + * const int amd_erratum_319[] = |
| 49 | + * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), |
| 50 | + * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), |
| 51 | + * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); |
| 52 | + */ |
| 53 | + |
| 54 | +#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } |
| 55 | +#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } |
| 56 | +#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ |
| 57 | + ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) |
| 58 | +#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) |
| 59 | +#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) |
| 60 | +#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) |
| 61 | + |
| 62 | +static const int amd_erratum_400[] = |
| 63 | + AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), |
| 64 | + AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); |
| 65 | + |
| 66 | +static const int amd_erratum_383[] = |
| 67 | + AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); |
| 68 | + |
| 69 | +/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ |
| 70 | +static const int amd_erratum_1054[] = |
| 71 | + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); |
| 72 | + |
| 73 | +static const int amd_zenbleed[] = |
| 74 | + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf), |
| 75 | + AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), |
| 76 | + AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); |
| 77 | + |
| 78 | +static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) |
| 79 | +{ |
| 80 | + int osvw_id = *erratum++; |
| 81 | + u32 range; |
| 82 | + u32 ms; |
| 83 | + |
| 84 | + if (osvw_id >= 0 && osvw_id < 65536 && |
| 85 | + cpu_has(cpu, X86_FEATURE_OSVW)) { |
| 86 | + u64 osvw_len; |
| 87 | + |
| 88 | + rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); |
| 89 | + if (osvw_id < osvw_len) { |
| 90 | + u64 osvw_bits; |
| 91 | + |
| 92 | + rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), |
| 93 | + osvw_bits); |
| 94 | + return osvw_bits & (1ULL << (osvw_id & 0x3f)); |
| 95 | + } |
| 96 | + } |
| 97 | + |
| 98 | + /* OSVW unavailable or ID unknown, match family-model-stepping range */ |
| 99 | + ms = (cpu->x86_model << 4) | cpu->x86_stepping; |
| 100 | + while ((range = *erratum++)) |
| 101 | + if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && |
| 102 | + (ms >= AMD_MODEL_RANGE_START(range)) && |
| 103 | + (ms <= AMD_MODEL_RANGE_END(range))) |
| 104 | + return true; |
| 105 | + |
| 106 | + return false; |
| 107 | +} |
| 108 | + |
42 | 109 | static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
|
43 | 110 | {
|
44 | 111 | u32 gprs[8] = { 0 };
|
@@ -916,6 +983,47 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
|
916 | 983 | }
|
917 | 984 | }
|
918 | 985 |
|
| 986 | +static bool cpu_has_zenbleed_microcode(void) |
| 987 | +{ |
| 988 | + u32 good_rev = 0; |
| 989 | + |
| 990 | + switch (boot_cpu_data.x86_model) { |
| 991 | + case 0x30 ... 0x3f: good_rev = 0x0830107a; break; |
| 992 | + case 0x60 ... 0x67: good_rev = 0x0860010b; break; |
| 993 | + case 0x68 ... 0x6f: good_rev = 0x08608105; break; |
| 994 | + case 0x70 ... 0x7f: good_rev = 0x08701032; break; |
| 995 | + case 0xa0 ... 0xaf: good_rev = 0x08a00008; break; |
| 996 | + |
| 997 | + default: |
| 998 | + return false; |
| 999 | + break; |
| 1000 | + } |
| 1001 | + |
| 1002 | + if (boot_cpu_data.microcode < good_rev) |
| 1003 | + return false; |
| 1004 | + |
| 1005 | + return true; |
| 1006 | +} |
| 1007 | + |
| 1008 | +static void zenbleed_check(struct cpuinfo_x86 *c) |
| 1009 | +{ |
| 1010 | + if (!cpu_has_amd_erratum(c, amd_zenbleed)) |
| 1011 | + return; |
| 1012 | + |
| 1013 | + if (cpu_has(c, X86_FEATURE_HYPERVISOR)) |
| 1014 | + return; |
| 1015 | + |
| 1016 | + if (!cpu_has(c, X86_FEATURE_AVX)) |
| 1017 | + return; |
| 1018 | + |
| 1019 | + if (!cpu_has_zenbleed_microcode()) { |
| 1020 | + pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n"); |
| 1021 | + msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); |
| 1022 | + } else { |
| 1023 | + msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); |
| 1024 | + } |
| 1025 | +} |
| 1026 | + |
919 | 1027 | static void init_amd(struct cpuinfo_x86 *c)
|
920 | 1028 | {
|
921 | 1029 | early_init_amd(c);
|
@@ -1020,6 +1128,8 @@ static void init_amd(struct cpuinfo_x86 *c)
|
1020 | 1128 | if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
|
1021 | 1129 | cpu_has(c, X86_FEATURE_AUTOIBRS))
|
1022 | 1130 | WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
|
| 1131 | + |
| 1132 | + zenbleed_check(c); |
1023 | 1133 | }
|
1024 | 1134 |
|
1025 | 1135 | #ifdef CONFIG_X86_32
|
@@ -1115,73 +1225,6 @@ static const struct cpu_dev amd_cpu_dev = {
|
1115 | 1225 |
|
1116 | 1226 | cpu_dev_register(amd_cpu_dev);
|
1117 | 1227 |
|
1118 |
| -/* |
1119 |
| - * AMD errata checking |
1120 |
| - * |
1121 |
| - * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or |
1122 |
| - * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that |
1123 |
| - * have an OSVW id assigned, which it takes as first argument. Both take a |
1124 |
| - * variable number of family-specific model-stepping ranges created by |
1125 |
| - * AMD_MODEL_RANGE(). |
1126 |
| - * |
1127 |
| - * Example: |
1128 |
| - * |
1129 |
| - * const int amd_erratum_319[] = |
1130 |
| - * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), |
1131 |
| - * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), |
1132 |
| - * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); |
1133 |
| - */ |
1134 |
| - |
1135 |
| -#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } |
1136 |
| -#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } |
1137 |
| -#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ |
1138 |
| - ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) |
1139 |
| -#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) |
1140 |
| -#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) |
1141 |
| -#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) |
1142 |
| - |
1143 |
| -static const int amd_erratum_400[] = |
1144 |
| - AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), |
1145 |
| - AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); |
1146 |
| - |
1147 |
| -static const int amd_erratum_383[] = |
1148 |
| - AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); |
1149 |
| - |
1150 |
| -/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ |
1151 |
| -static const int amd_erratum_1054[] = |
1152 |
| - AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); |
1153 |
| - |
1154 |
| -static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) |
1155 |
| -{ |
1156 |
| - int osvw_id = *erratum++; |
1157 |
| - u32 range; |
1158 |
| - u32 ms; |
1159 |
| - |
1160 |
| - if (osvw_id >= 0 && osvw_id < 65536 && |
1161 |
| - cpu_has(cpu, X86_FEATURE_OSVW)) { |
1162 |
| - u64 osvw_len; |
1163 |
| - |
1164 |
| - rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); |
1165 |
| - if (osvw_id < osvw_len) { |
1166 |
| - u64 osvw_bits; |
1167 |
| - |
1168 |
| - rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), |
1169 |
| - osvw_bits); |
1170 |
| - return osvw_bits & (1ULL << (osvw_id & 0x3f)); |
1171 |
| - } |
1172 |
| - } |
1173 |
| - |
1174 |
| - /* OSVW unavailable or ID unknown, match family-model-stepping range */ |
1175 |
| - ms = (cpu->x86_model << 4) | cpu->x86_stepping; |
1176 |
| - while ((range = *erratum++)) |
1177 |
| - if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && |
1178 |
| - (ms >= AMD_MODEL_RANGE_START(range)) && |
1179 |
| - (ms <= AMD_MODEL_RANGE_END(range))) |
1180 |
| - return true; |
1181 |
| - |
1182 |
| - return false; |
1183 |
| -} |
1184 |
| - |
1185 | 1228 | static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask);
|
1186 | 1229 |
|
1187 | 1230 | static unsigned int amd_msr_dr_addr_masks[] = {
|
@@ -1235,3 +1278,15 @@ u32 amd_get_highest_perf(void)
|
1235 | 1278 | return 255;
|
1236 | 1279 | }
|
1237 | 1280 | EXPORT_SYMBOL_GPL(amd_get_highest_perf);
|
| 1281 | + |
| 1282 | +static void zenbleed_check_cpu(void *unused) |
| 1283 | +{ |
| 1284 | + struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); |
| 1285 | + |
| 1286 | + zenbleed_check(c); |
| 1287 | +} |
| 1288 | + |
| 1289 | +void amd_check_microcode(void) |
| 1290 | +{ |
| 1291 | + on_each_cpu(zenbleed_check_cpu, NULL, 1); |
| 1292 | +} |
0 commit comments