Skip to content

Commit dbd8f17

Browse files
pa1guptagregkh
authored andcommitted
x86/its: Add support for ITS-safe return thunk
commit a75bf27fe41abe658c53276a0c486c4bf9adecfc upstream. RETs in the lower half of cacheline may be affected by ITS bug, specifically when the RSB-underflows. Use ITS-safe return thunk for such RETs. RETs that are not patched: - RET in retpoline sequence does not need to be patched, because the sequence itself fills an RSB before RET. - RET in Call Depth Tracking (CDT) thunks __x86_indirect_{call|jump}_thunk and call_depth_return_thunk are not patched because CDT by design prevents RSB-underflow. - RETs in .init section are not reachable after init. - RETs that are explicitly marked safe with ANNOTATE_UNRET_SAFE. Signed-off-by: Pawan Gupta <[email protected]> Signed-off-by: Dave Hansen <[email protected]> Reviewed-by: Josh Poimboeuf <[email protected]> Reviewed-by: Alexandre Chartre <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 5e7d4f2 commit dbd8f17

File tree

8 files changed

+55
-5
lines changed

8 files changed

+55
-5
lines changed

arch/x86/include/asm/alternative.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,20 @@ extern void apply_ibt_endbr(s32 *start, s32 *end);
8181

8282
struct module;
8383

84+
#if defined(CONFIG_RETHUNK) && defined(CONFIG_OBJTOOL)
85+
extern bool cpu_wants_rethunk(void);
86+
extern bool cpu_wants_rethunk_at(void *addr);
87+
#else
88+
static __always_inline bool cpu_wants_rethunk(void)
89+
{
90+
return false;
91+
}
92+
static __always_inline bool cpu_wants_rethunk_at(void *addr)
93+
{
94+
return false;
95+
}
96+
#endif
97+
8498
#ifdef CONFIG_SMP
8599
extern void alternatives_smp_module_add(struct module *mod, char *name,
86100
void *locks, void *locks_end,

arch/x86/include/asm/nospec-branch.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -257,6 +257,12 @@ extern void __x86_return_thunk(void);
257257
static inline void __x86_return_thunk(void) {}
258258
#endif
259259

260+
#ifdef CONFIG_MITIGATION_ITS
261+
extern void its_return_thunk(void);
262+
#else
263+
static inline void its_return_thunk(void) {}
264+
#endif
265+
260266
extern void retbleed_return_thunk(void);
261267
extern void srso_return_thunk(void);
262268
extern void srso_alias_return_thunk(void);

arch/x86/kernel/alternative.c

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -614,6 +614,21 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
614614

615615
#ifdef CONFIG_RETHUNK
616616

617+
bool cpu_wants_rethunk(void)
618+
{
619+
return cpu_feature_enabled(X86_FEATURE_RETHUNK);
620+
}
621+
622+
bool cpu_wants_rethunk_at(void *addr)
623+
{
624+
if (!cpu_feature_enabled(X86_FEATURE_RETHUNK))
625+
return false;
626+
if (x86_return_thunk != its_return_thunk)
627+
return true;
628+
629+
return !((unsigned long)addr & 0x20);
630+
}
631+
617632
/*
618633
* Rewrite the compiler generated return thunk tail-calls.
619634
*
@@ -629,7 +644,7 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes)
629644
{
630645
int i = 0;
631646

632-
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
647+
if (cpu_wants_rethunk_at(addr)) {
633648
if (x86_return_thunk == __x86_return_thunk)
634649
return -1;
635650

arch/x86/kernel/ftrace.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -360,7 +360,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
360360
goto fail;
361361

362362
ip = trampoline + size;
363-
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
363+
if (cpu_wants_rethunk_at(ip))
364364
__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
365365
else
366366
memcpy(ip, retq, sizeof(retq));

arch/x86/kernel/static_call.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
7979
break;
8080

8181
case RET:
82-
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
82+
if (cpu_wants_rethunk_at(insn))
8383
code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk);
8484
else
8585
code = &retinsn;

arch/x86/kernel/vmlinux.lds.S

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -534,6 +534,10 @@ INIT_PER_CPU(irq_stack_backing_store);
534534
. = ASSERT(__x86_indirect_its_thunk_array == __x86_indirect_its_thunk_rax, "Gap in ITS thunk array");
535535
#endif
536536

537+
#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
538+
. = ASSERT(its_return_thunk & 0x20, "its_return_thunk not in second half of cacheline");
539+
#endif
540+
537541
#endif /* CONFIG_X86_64 */
538542

539543
#ifdef CONFIG_KEXEC_CORE

arch/x86/lib/retpoline.S

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,18 @@ SYM_CODE_START(__x86_indirect_its_thunk_array)
284284
.align 64, 0xcc
285285
SYM_CODE_END(__x86_indirect_its_thunk_array)
286286

287-
#endif
287+
.align 64, 0xcc
288+
.skip 32, 0xcc
289+
SYM_CODE_START(its_return_thunk)
290+
UNWIND_HINT_FUNC
291+
ANNOTATE_NOENDBR
292+
ANNOTATE_UNRET_SAFE
293+
ret
294+
int3
295+
SYM_CODE_END(its_return_thunk)
296+
EXPORT_SYMBOL(its_return_thunk)
297+
298+
#endif /* CONFIG_MITIGATION_ITS */
288299

289300
SYM_CODE_START(__x86_return_thunk)
290301
UNWIND_HINT_FUNC

arch/x86/net/bpf_jit_comp.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -487,7 +487,7 @@ static void emit_return(u8 **pprog, u8 *ip)
487487
{
488488
u8 *prog = *pprog;
489489

490-
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
490+
if (cpu_wants_rethunk()) {
491491
emit_jump(&prog, x86_return_thunk, ip);
492492
} else {
493493
EMIT1(0xC3); /* ret */

0 commit comments

Comments
 (0)