Skip to content

Commit f84a52e

Browse files
committed
Merge tag 'x86_bugs_for_6.7_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 hw mitigation updates from Borislav Petkov: - A bunch of improvements, cleanups and fixlets to the SRSO mitigation machinery and other, general cleanups to the hw mitigations code, by Josh Poimboeuf - Improve the return thunk detection by objtool as it is absolutely important that the default return thunk is not used after returns have been patched. Future work to detect and report this better is pending - Other misc cleanups and fixes * tag 'x86_bugs_for_6.7_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (21 commits) x86/retpoline: Document some thunk handling aspects x86/retpoline: Make sure there are no unconverted return thunks due to KCSAN x86/callthunks: Delete unused "struct thunk_desc" x86/vdso: Run objtool on vdso32-setup.o objtool: Fix return thunk patching in retpolines x86/srso: Remove unnecessary semicolon x86/pti: Fix kernel warnings for pti= and nopti cmdline options x86/calldepth: Rename __x86_return_skl() to call_depth_return_thunk() x86/nospec: Refactor UNTRAIN_RET[_*] x86/rethunk: Use SYM_CODE_START[_LOCAL]_NOALIGN macros x86/srso: Disentangle rethunk-dependent options x86/srso: Move retbleed IBPB check into existing 'has_microcode' code block x86/bugs: Remove default case for fully switched enums x86/srso: Remove 'pred_cmd' label x86/srso: Unexport untraining functions x86/srso: Improve i-cache locality for alias mitigation x86/srso: Fix unret validation dependencies x86/srso: Fix vulnerability reporting for missing microcode x86/srso: Print mitigation for retbleed IBPB case x86/srso: Print actual mitigation if requested mitigation isn't possible ...
2 parents 01ae815 + 9d9c22c commit f84a52e

File tree

13 files changed

+258
-211
lines changed

13 files changed

+258
-211
lines changed

Documentation/admin-guide/hw-vuln/srso.rst

Lines changed: 17 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -46,12 +46,22 @@ The possible values in this file are:
4646

4747
The processor is not vulnerable
4848

49-
* 'Vulnerable: no microcode':
49+
* 'Vulnerable':
50+
51+
The processor is vulnerable and no mitigations have been applied.
52+
53+
* 'Vulnerable: No microcode':
5054

5155
The processor is vulnerable, no microcode extending IBPB
5256
functionality to address the vulnerability has been applied.
5357

54-
* 'Mitigation: microcode':
58+
* 'Vulnerable: Safe RET, no microcode':
59+
60+
The "Safe RET" mitigation (see below) has been applied to protect the
61+
kernel, but the IBPB-extending microcode has not been applied. User
62+
space tasks may still be vulnerable.
63+
64+
* 'Vulnerable: Microcode, no safe RET':
5565

5666
Extended IBPB functionality microcode patch has been applied. It does
5767
not address User->Kernel and Guest->Host transitions protection but it
@@ -72,11 +82,11 @@ The possible values in this file are:
7282

7383
(spec_rstack_overflow=microcode)
7484

75-
* 'Mitigation: safe RET':
85+
* 'Mitigation: Safe RET':
7686

77-
Software-only mitigation. It complements the extended IBPB microcode
78-
patch functionality by addressing User->Kernel and Guest->Host
79-
transitions protection.
87+
Combined microcode/software mitigation. It complements the
88+
extended IBPB microcode patch functionality by addressing
89+
User->Kernel and Guest->Host transitions protection.
8090

8191
Selected by default or by spec_rstack_overflow=safe-ret
8292

@@ -129,7 +139,7 @@ an indrect branch prediction barrier after having applied the required
129139
microcode patch for one's system. This mitigation comes also at
130140
a performance cost.
131141

132-
Mitigation: safe RET
142+
Mitigation: Safe RET
133143
--------------------
134144

135145
The mitigation works by ensuring all RET instructions speculate to

arch/x86/entry/vdso/Makefile

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,8 @@ vdso_img-$(VDSO64-y) += 64
4242
vdso_img-$(VDSOX32-y) += x32
4343
vdso_img-$(VDSO32-y) += 32
4444

45-
obj-$(VDSO32-y) += vdso32-setup.o
45+
obj-$(VDSO32-y) += vdso32-setup.o
46+
OBJECT_FILES_NON_STANDARD_vdso32-setup.o := n
4647

4748
vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
4849
vobjs32 := $(foreach F,$(vobjs32-y),$(obj)/$F)

arch/x86/include/asm/nospec-branch.h

Lines changed: 29 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -271,7 +271,7 @@
271271
.Lskip_rsb_\@:
272272
.endm
273273

274-
#ifdef CONFIG_CPU_UNRET_ENTRY
274+
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
275275
#define CALL_UNTRAIN_RET "call entry_untrain_ret"
276276
#else
277277
#define CALL_UNTRAIN_RET ""
@@ -288,38 +288,24 @@
288288
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
289289
* where we have a stack but before any RET instruction.
290290
*/
291-
.macro UNTRAIN_RET
292-
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
293-
defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
291+
.macro __UNTRAIN_RET ibpb_feature, call_depth_insns
292+
#if defined(CONFIG_RETHUNK) || defined(CONFIG_CPU_IBPB_ENTRY)
294293
VALIDATE_UNRET_END
295294
ALTERNATIVE_3 "", \
296295
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
297-
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
298-
__stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
296+
"call entry_ibpb", \ibpb_feature, \
297+
__stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
299298
#endif
300299
.endm
301300

302-
.macro UNTRAIN_RET_VM
303-
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
304-
defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
305-
VALIDATE_UNRET_END
306-
ALTERNATIVE_3 "", \
307-
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
308-
"call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT, \
309-
__stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
310-
#endif
311-
.endm
301+
#define UNTRAIN_RET \
302+
__UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH)
312303

313-
.macro UNTRAIN_RET_FROM_CALL
314-
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
315-
defined(CONFIG_CALL_DEPTH_TRACKING)
316-
VALIDATE_UNRET_END
317-
ALTERNATIVE_3 "", \
318-
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
319-
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
320-
__stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH
321-
#endif
322-
.endm
304+
#define UNTRAIN_RET_VM \
305+
__UNTRAIN_RET X86_FEATURE_IBPB_ON_VMEXIT, __stringify(RESET_CALL_DEPTH)
306+
307+
#define UNTRAIN_RET_FROM_CALL \
308+
__UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH_FROM_CALL)
323309

324310

325311
.macro CALL_DEPTH_ACCOUNT
@@ -348,26 +334,31 @@ extern void __x86_return_thunk(void);
348334
static inline void __x86_return_thunk(void) {}
349335
#endif
350336

337+
#ifdef CONFIG_CPU_UNRET_ENTRY
351338
extern void retbleed_return_thunk(void);
339+
#else
340+
static inline void retbleed_return_thunk(void) {}
341+
#endif
342+
343+
#ifdef CONFIG_CPU_SRSO
352344
extern void srso_return_thunk(void);
353345
extern void srso_alias_return_thunk(void);
346+
#else
347+
static inline void srso_return_thunk(void) {}
348+
static inline void srso_alias_return_thunk(void) {}
349+
#endif
354350

355-
extern void retbleed_untrain_ret(void);
356-
extern void srso_untrain_ret(void);
357-
extern void srso_alias_untrain_ret(void);
351+
extern void retbleed_return_thunk(void);
352+
extern void srso_return_thunk(void);
353+
extern void srso_alias_return_thunk(void);
358354

359355
extern void entry_untrain_ret(void);
360356
extern void entry_ibpb(void);
361357

362358
extern void (*x86_return_thunk)(void);
363359

364360
#ifdef CONFIG_CALL_DEPTH_TRACKING
365-
extern void __x86_return_skl(void);
366-
367-
static inline void x86_set_skl_return_thunk(void)
368-
{
369-
x86_return_thunk = &__x86_return_skl;
370-
}
361+
extern void call_depth_return_thunk(void);
371362

372363
#define CALL_DEPTH_ACCOUNT \
373364
ALTERNATIVE("", \
@@ -380,12 +371,12 @@ DECLARE_PER_CPU(u64, __x86_ret_count);
380371
DECLARE_PER_CPU(u64, __x86_stuffs_count);
381372
DECLARE_PER_CPU(u64, __x86_ctxsw_count);
382373
#endif
383-
#else
384-
static inline void x86_set_skl_return_thunk(void) {}
374+
#else /* !CONFIG_CALL_DEPTH_TRACKING */
385375

376+
static inline void call_depth_return_thunk(void) {}
386377
#define CALL_DEPTH_ACCOUNT ""
387378

388-
#endif
379+
#endif /* CONFIG_CALL_DEPTH_TRACKING */
389380

390381
#ifdef CONFIG_RETPOLINE
391382

arch/x86/kernel/callthunks.c

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -48,11 +48,6 @@ EXPORT_SYMBOL_GPL(__x86_call_count);
4848

4949
extern s32 __call_sites[], __call_sites_end[];
5050

51-
struct thunk_desc {
52-
void *template;
53-
unsigned int template_size;
54-
};
55-
5651
struct core_text {
5752
unsigned long base;
5853
unsigned long end;

0 commit comments

Comments
 (0)