Skip to content

Commit 5318b98

Browse files
committed
Merge tag 'x86_bugs_pbrsb' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 eIBRS fixes from Borislav Petkov: "More from the CPU vulnerability nightmares front: Intel eIBRS machines do not sufficiently mitigate against RET mispredictions when doing a VM Exit therefore an additional RSB, one-entry stuffing is needed" * tag 'x86_bugs_pbrsb' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/speculation: Add LFENCE to RSB fill sequence x86/speculation: Add RSB VM Exit protections
2 parents eb555cb + ba6e31a commit 5318b98

File tree

9 files changed

+116
-30
lines changed

9 files changed

+116
-30
lines changed

Documentation/admin-guide/hw-vuln/spectre.rst

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -422,6 +422,14 @@ The possible values in this file are:
422422
'RSB filling' Protection of RSB on context switch enabled
423423
============= ===========================================
424424

425+
- EIBRS Post-barrier Return Stack Buffer (PBRSB) protection status:
426+
427+
=========================== =======================================================
428+
'PBRSB-eIBRS: SW sequence' CPU is affected and protection of RSB on VMEXIT enabled
429+
'PBRSB-eIBRS: Vulnerable' CPU is vulnerable
430+
'PBRSB-eIBRS: Not affected' CPU is not affected by PBRSB
431+
=========================== =======================================================
432+
425433
Full mitigation might require a microcode update from the CPU
426434
vendor. When the necessary microcode is not available, the kernel will
427435
report vulnerability.

arch/x86/include/asm/cpufeatures.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -303,6 +303,7 @@
303303
#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
304304
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
305305
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
306+
#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
306307

307308
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
308309
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
@@ -457,5 +458,6 @@
457458
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
458459
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
459460
#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */
461+
#define X86_BUG_EIBRS_PBRSB X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
460462

461463
#endif /* _ASM_X86_CPUFEATURES_H */

arch/x86/include/asm/msr-index.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,10 @@
150150
* are restricted to targets in
151151
* kernel.
152152
*/
153+
#define ARCH_CAP_PBRSB_NO BIT(24) /*
154+
* Not susceptible to Post-Barrier
155+
* Return Stack Buffer Predictions.
156+
*/
153157

154158
#define MSR_IA32_FLUSH_CMD 0x0000010b
155159
#define L1D_FLUSH BIT(0) /*

arch/x86/include/asm/nospec-branch.h

Lines changed: 19 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,9 @@
6060
774: \
6161
add $(BITS_PER_LONG/8) * 2, sp; \
6262
dec reg; \
63-
jnz 771b;
63+
jnz 771b; \
64+
/* barrier for jnz misprediction */ \
65+
lfence;
6466

6567
#ifdef __ASSEMBLY__
6668

@@ -130,13 +132,28 @@
130132
#endif
131133
.endm
132134

135+
.macro ISSUE_UNBALANCED_RET_GUARD
136+
ANNOTATE_INTRA_FUNCTION_CALL
137+
call .Lunbalanced_ret_guard_\@
138+
int3
139+
.Lunbalanced_ret_guard_\@:
140+
add $(BITS_PER_LONG/8), %_ASM_SP
141+
lfence
142+
.endm
143+
133144
/*
134145
* A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
135146
* monstrosity above, manually.
136147
*/
137-
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
148+
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2
149+
.ifb \ftr2
138150
ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
151+
.else
152+
ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", \ftr2
153+
.endif
139154
__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
155+
.Lunbalanced_\@:
156+
ISSUE_UNBALANCED_RET_GUARD
140157
.Lskip_rsb_\@:
141158
.endm
142159

arch/x86/kernel/cpu/bugs.c

Lines changed: 63 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1335,6 +1335,53 @@ static void __init spec_ctrl_disable_kernel_rrsba(void)
13351335
}
13361336
}
13371337

1338+
static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
1339+
{
1340+
/*
1341+
* Similar to context switches, there are two types of RSB attacks
1342+
* after VM exit:
1343+
*
1344+
* 1) RSB underflow
1345+
*
1346+
* 2) Poisoned RSB entry
1347+
*
1348+
* When retpoline is enabled, both are mitigated by filling/clearing
1349+
* the RSB.
1350+
*
1351+
* When IBRS is enabled, while #1 would be mitigated by the IBRS branch
1352+
* prediction isolation protections, RSB still needs to be cleared
1353+
* because of #2. Note that SMEP provides no protection here, unlike
1354+
* user-space-poisoned RSB entries.
1355+
*
1356+
* eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
1357+
* bug is present then a LITE version of RSB protection is required,
1358+
* just a single call needs to retire before a RET is executed.
1359+
*/
1360+
switch (mode) {
1361+
case SPECTRE_V2_NONE:
1362+
return;
1363+
1364+
case SPECTRE_V2_EIBRS_LFENCE:
1365+
case SPECTRE_V2_EIBRS:
1366+
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
1367+
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
1368+
pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
1369+
}
1370+
return;
1371+
1372+
case SPECTRE_V2_EIBRS_RETPOLINE:
1373+
case SPECTRE_V2_RETPOLINE:
1374+
case SPECTRE_V2_LFENCE:
1375+
case SPECTRE_V2_IBRS:
1376+
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1377+
pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
1378+
return;
1379+
}
1380+
1381+
pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
1382+
dump_stack();
1383+
}
1384+
13381385
static void __init spectre_v2_select_mitigation(void)
13391386
{
13401387
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -1485,28 +1532,7 @@ static void __init spectre_v2_select_mitigation(void)
14851532
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
14861533
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
14871534

1488-
/*
1489-
* Similar to context switches, there are two types of RSB attacks
1490-
* after vmexit:
1491-
*
1492-
* 1) RSB underflow
1493-
*
1494-
* 2) Poisoned RSB entry
1495-
*
1496-
* When retpoline is enabled, both are mitigated by filling/clearing
1497-
* the RSB.
1498-
*
1499-
* When IBRS is enabled, while #1 would be mitigated by the IBRS branch
1500-
* prediction isolation protections, RSB still needs to be cleared
1501-
* because of #2. Note that SMEP provides no protection here, unlike
1502-
* user-space-poisoned RSB entries.
1503-
*
1504-
* eIBRS, on the other hand, has RSB-poisoning protections, so it
1505-
* doesn't need RSB clearing after vmexit.
1506-
*/
1507-
if (boot_cpu_has(X86_FEATURE_RETPOLINE) ||
1508-
boot_cpu_has(X86_FEATURE_KERNEL_IBRS))
1509-
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1535+
spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
15101536

15111537
/*
15121538
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
@@ -2292,6 +2318,19 @@ static char *ibpb_state(void)
22922318
return "";
22932319
}
22942320

2321+
static char *pbrsb_eibrs_state(void)
2322+
{
2323+
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
2324+
if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
2325+
boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
2326+
return ", PBRSB-eIBRS: SW sequence";
2327+
else
2328+
return ", PBRSB-eIBRS: Vulnerable";
2329+
} else {
2330+
return ", PBRSB-eIBRS: Not affected";
2331+
}
2332+
}
2333+
22952334
static ssize_t spectre_v2_show_state(char *buf)
22962335
{
22972336
if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
@@ -2304,12 +2343,13 @@ static ssize_t spectre_v2_show_state(char *buf)
23042343
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
23052344
return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
23062345

2307-
return sprintf(buf, "%s%s%s%s%s%s\n",
2346+
return sprintf(buf, "%s%s%s%s%s%s%s\n",
23082347
spectre_v2_strings[spectre_v2_enabled],
23092348
ibpb_state(),
23102349
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
23112350
stibp_state(),
23122351
boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
2352+
pbrsb_eibrs_state(),
23132353
spectre_v2_module_string());
23142354
}
23152355

arch/x86/kernel/cpu/common.c

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1135,6 +1135,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
11351135
#define NO_SWAPGS BIT(6)
11361136
#define NO_ITLB_MULTIHIT BIT(7)
11371137
#define NO_SPECTRE_V2 BIT(8)
1138+
#define NO_EIBRS_PBRSB BIT(9)
11381139

11391140
#define VULNWL(vendor, family, model, whitelist) \
11401141
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
@@ -1177,7 +1178,7 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
11771178

11781179
VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
11791180
VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
1180-
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
1181+
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
11811182

11821183
/*
11831184
* Technically, swapgs isn't serializing on AMD (despite it previously
@@ -1187,7 +1188,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
11871188
* good enough for our purposes.
11881189
*/
11891190

1190-
VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT),
1191+
VULNWL_INTEL(ATOM_TREMONT, NO_EIBRS_PBRSB),
1192+
VULNWL_INTEL(ATOM_TREMONT_L, NO_EIBRS_PBRSB),
1193+
VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
11911194

11921195
/* AMD Family 0xf - 0x12 */
11931196
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
@@ -1365,6 +1368,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
13651368
setup_force_cpu_bug(X86_BUG_RETBLEED);
13661369
}
13671370

1371+
if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
1372+
!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
1373+
!(ia32_cap & ARCH_CAP_PBRSB_NO))
1374+
setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
1375+
13681376
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
13691377
return;
13701378

arch/x86/kvm/vmx/vmenter.S

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -227,11 +227,13 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
227227
* entries and (in some cases) RSB underflow.
228228
*
229229
* eIBRS has its own protection against poisoned RSB, so it doesn't
230-
* need the RSB filling sequence. But it does need to be enabled
231-
* before the first unbalanced RET.
230+
* need the RSB filling sequence. But it does need to be enabled, and a
231+
* single call to retire, before the first unbalanced RET.
232232
*/
233233

234-
FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
234+
FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT,\
235+
X86_FEATURE_RSB_VMEXIT_LITE
236+
235237

236238
pop %_ASM_ARG2 /* @flags */
237239
pop %_ASM_ARG1 /* @vmx */

tools/arch/x86/include/asm/cpufeatures.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -303,6 +303,7 @@
303303
#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
304304
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
305305
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
306+
#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM-Exit when EIBRS is enabled */
306307

307308
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
308309
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */

tools/arch/x86/include/asm/msr-index.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,10 @@
150150
* are restricted to targets in
151151
* kernel.
152152
*/
153+
#define ARCH_CAP_PBRSB_NO BIT(24) /*
154+
* Not susceptible to Post-Barrier
155+
* Return Stack Buffer Predictions.
156+
*/
153157

154158
#define MSR_IA32_FLUSH_CMD 0x0000010b
155159
#define L1D_FLUSH BIT(0) /*

0 commit comments

Comments
 (0)