Skip to content

Commit 7001052

Browse files
committed
Merge tag 'x86_core_for_5.18_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 CET-IBT (Control-Flow-Integrity) support from Peter Zijlstra: "Add support for Intel CET-IBT, available since Tigerlake (11th gen), which is a coarse grained, hardware based, forward edge Control-Flow-Integrity mechanism where any indirect CALL/JMP must target an ENDBR instruction or suffer #CP. Additionally, since Alderlake (12th gen)/Sapphire-Rapids, speculation is limited to 2 instructions (and typically fewer) on branch targets not starting with ENDBR. CET-IBT also limits speculation of the next sequential instruction after the indirect CALL/JMP [1]. CET-IBT is fundamentally incompatible with retpolines, but provides, as described above, speculation limits itself" [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/branch-history-injection.html * tag 'x86_core_for_5.18_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (53 commits) kvm/emulate: Fix SETcc emulation for ENDBR x86/Kconfig: Only allow CONFIG_X86_KERNEL_IBT with ld.lld >= 14.0.0 x86/Kconfig: Only enable CONFIG_CC_HAS_IBT for clang >= 14.0.0 kbuild: Fixup the IBT kbuild changes x86/Kconfig: Do not allow CONFIG_X86_X32_ABI=y with llvm-objcopy x86: Remove toolchain check for X32 ABI capability x86/alternative: Use .ibt_endbr_seal to seal indirect calls objtool: Find unused ENDBR instructions objtool: Validate IBT assumptions objtool: Add IBT/ENDBR decoding objtool: Read the NOENDBR annotation x86: Annotate idtentry_df() x86,objtool: Move the ASM_REACHABLE annotation to objtool.h x86: Annotate call_on_stack() objtool: Rework ASM_REACHABLE x86: Mark __invalid_creds() __noreturn exit: Mark do_group_exit() __noreturn x86: Mark stop_this_cpu() __noreturn objtool: Ignore extra-symbol code objtool: Rename --duplicate to --lto ...
2 parents f022814 + 3986f65 commit 7001052

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

95 files changed

+1454
-326
lines changed

arch/powerpc/include/asm/livepatch.h

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -17,17 +17,6 @@ static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
1717
ftrace_instruction_pointer_set(fregs, ip);
1818
}
1919

20-
#define klp_get_ftrace_location klp_get_ftrace_location
21-
static inline unsigned long klp_get_ftrace_location(unsigned long faddr)
22-
{
23-
/*
24-
* Live patch works on PPC32 and only with -mprofile-kernel on PPC64. In
25-
* both cases, the ftrace location is always within the first 16 bytes.
26-
*/
27-
return ftrace_location_range(faddr, faddr + 16);
28-
}
29-
#endif /* CONFIG_LIVEPATCH */
30-
3120
#ifdef CONFIG_LIVEPATCH_64
3221
static inline void klp_init_thread_info(struct task_struct *p)
3322
{

arch/powerpc/kernel/kprobes.c

Lines changed: 21 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,27 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
105105
return addr;
106106
}
107107

108+
static bool arch_kprobe_on_func_entry(unsigned long offset)
109+
{
110+
#ifdef PPC64_ELF_ABI_v2
111+
#ifdef CONFIG_KPROBES_ON_FTRACE
112+
return offset <= 16;
113+
#else
114+
return offset <= 8;
115+
#endif
116+
#else
117+
return !offset;
118+
#endif
119+
}
120+
121+
/* XXX try and fold the magic of kprobe_lookup_name() in this */
122+
kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
123+
bool *on_func_entry)
124+
{
125+
*on_func_entry = arch_kprobe_on_func_entry(offset);
126+
return (kprobe_opcode_t *)(addr + offset);
127+
}
128+
108129
void *alloc_insn_page(void)
109130
{
110131
void *page;
@@ -218,19 +239,6 @@ static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs
218239
kcb->kprobe_saved_msr = regs->msr;
219240
}
220241

221-
bool arch_kprobe_on_func_entry(unsigned long offset)
222-
{
223-
#ifdef PPC64_ELF_ABI_v2
224-
#ifdef CONFIG_KPROBES_ON_FTRACE
225-
return offset <= 16;
226-
#else
227-
return offset <= 8;
228-
#endif
229-
#else
230-
return !offset;
231-
#endif
232-
}
233-
234242
void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
235243
{
236244
ri->ret_addr = (kprobe_opcode_t *)regs->link;

arch/um/kernel/um_arch.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -424,6 +424,10 @@ void __init check_bugs(void)
424424
os_check_bugs();
425425
}
426426

427+
void apply_ibt_endbr(s32 *start, s32 *end)
428+
{
429+
}
430+
427431
void apply_retpolines(s32 *start, s32 *end)
428432
{
429433
}

arch/x86/Kconfig

Lines changed: 37 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1842,6 +1842,36 @@ config X86_UMIP
18421842
specific cases in protected and virtual-8086 modes. Emulated
18431843
results are dummy.
18441844

1845+
config CC_HAS_IBT
1846+
# GCC >= 9 and binutils >= 2.29
1847+
# Retpoline check to work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93654
1848+
# Clang/LLVM >= 14
1849+
# https://github.com/llvm/llvm-project/commit/e0b89df2e0f0130881bf6c39bf31d7f6aac00e0f
1850+
# https://github.com/llvm/llvm-project/commit/dfcf69770bc522b9e411c66454934a37c1f35332
1851+
def_bool ((CC_IS_GCC && $(cc-option, -fcf-protection=branch -mindirect-branch-register)) || \
1852+
(CC_IS_CLANG && CLANG_VERSION >= 140000)) && \
1853+
$(as-instr,endbr64)
1854+
1855+
config X86_KERNEL_IBT
1856+
prompt "Indirect Branch Tracking"
1857+
bool
1858+
depends on X86_64 && CC_HAS_IBT && STACK_VALIDATION
1859+
# https://github.com/llvm/llvm-project/commit/9d7001eba9c4cb311e03cd8cdc231f9e579f2d0f
1860+
depends on !LD_IS_LLD || LLD_VERSION >= 140000
1861+
help
1862+
Build the kernel with support for Indirect Branch Tracking, a
1863+
hardware support course-grain forward-edge Control Flow Integrity
1864+
protection. It enforces that all indirect calls must land on
1865+
an ENDBR instruction, as such, the compiler will instrument the
1866+
code with them to make this happen.
1867+
1868+
In addition to building the kernel with IBT, seal all functions that
1869+
are not indirect call targets, avoiding them ever becomming one.
1870+
1871+
This requires LTO like objtool runs and will slow down the build. It
1872+
does significantly reduce the number of ENDBR instructions in the
1873+
kernel image.
1874+
18451875
config X86_INTEL_MEMORY_PROTECTION_KEYS
18461876
prompt "Memory Protection Keys"
18471877
def_bool y
@@ -2815,19 +2845,20 @@ config IA32_AOUT
28152845
help
28162846
Support old a.out binaries in the 32bit emulation.
28172847

2818-
config X86_X32
2848+
config X86_X32_ABI
28192849
bool "x32 ABI for 64-bit mode"
28202850
depends on X86_64
2851+
# llvm-objcopy does not convert x86_64 .note.gnu.property or
2852+
# compressed debug sections to x86_x32 properly:
2853+
# https://github.com/ClangBuiltLinux/linux/issues/514
2854+
# https://github.com/ClangBuiltLinux/linux/issues/1141
2855+
depends on $(success,$(OBJCOPY) --version | head -n1 | grep -qv llvm)
28212856
help
28222857
Include code to run binaries for the x32 native 32-bit ABI
28232858
for 64-bit processors. An x32 process gets access to the
28242859
full 64-bit register file and wide data path while leaving
28252860
pointers at 32 bits for smaller memory footprint.
28262861

2827-
You will need a recent binutils (2.22 or later) with
2828-
elf32_x86_64 support enabled to compile a kernel with this
2829-
option set.
2830-
28312862
config COMPAT_32
28322863
def_bool y
28332864
depends on IA32_EMULATION || X86_32
@@ -2836,7 +2867,7 @@ config COMPAT_32
28362867

28372868
config COMPAT
28382869
def_bool y
2839-
depends on IA32_EMULATION || X86_X32
2870+
depends on IA32_EMULATION || X86_X32_ABI
28402871

28412872
if COMPAT
28422873
config COMPAT_FOR_U64_ALIGNMENT

arch/x86/Makefile

Lines changed: 14 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ endif
3636

3737
# How to compile the 16-bit code. Note we always compile for -march=i386;
3838
# that way we can complain to the user if the CPU is insufficient.
39-
REALMODE_CFLAGS := -m16 -g -Os -DDISABLE_BRANCH_PROFILING \
39+
REALMODE_CFLAGS := -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \
4040
-Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
4141
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
4242
-mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)
@@ -62,8 +62,20 @@ export BITS
6262
#
6363
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
6464

65-
# Intel CET isn't enabled in the kernel
65+
ifeq ($(CONFIG_X86_KERNEL_IBT),y)
66+
#
67+
# Kernel IBT has S_CET.NOTRACK_EN=0, as such the compilers must not generate
68+
# NOTRACK prefixes. Current generation compilers unconditionally employ NOTRACK
69+
# for jump-tables, as such, disable jump-tables for now.
70+
#
71+
# (jump-tables are implicitly disabled by RETPOLINE)
72+
#
73+
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104816
74+
#
75+
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=branch -fno-jump-tables)
76+
else
6677
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
78+
endif
6779

6880
ifeq ($(CONFIG_X86_32),y)
6981
BITS := 32
@@ -140,22 +152,6 @@ else
140152
KBUILD_CFLAGS += -mcmodel=kernel
141153
endif
142154

143-
ifdef CONFIG_X86_X32
144-
x32_ld_ok := $(call try-run,\
145-
/bin/echo -e '1: .quad 1b' | \
146-
$(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" - && \
147-
$(OBJCOPY) -O elf32-x86-64 "$$TMP" "$$TMP.o" && \
148-
$(LD) -m elf32_x86_64 "$$TMP.o" -o "$$TMP",y,n)
149-
ifeq ($(x32_ld_ok),y)
150-
CONFIG_X86_X32_ABI := y
151-
KBUILD_AFLAGS += -DCONFIG_X86_X32_ABI
152-
KBUILD_CFLAGS += -DCONFIG_X86_X32_ABI
153-
else
154-
$(warning CONFIG_X86_X32 enabled but no binutils support)
155-
endif
156-
endif
157-
export CONFIG_X86_X32_ABI
158-
159155
#
160156
# If the function graph tracer is used with mcount instead of fentry,
161157
# '-maccumulate-outgoing-args' is needed to prevent a GCC bug

arch/x86/crypto/crc32c-pcl-intel-asm_64.S

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,7 @@ crc_array:
195195
.altmacro
196196
LABEL crc_ %i
197197
.noaltmacro
198+
ENDBR
198199
crc32q -i*8(block_0), crc_init
199200
crc32q -i*8(block_1), crc1
200201
crc32q -i*8(block_2), crc2
@@ -204,6 +205,7 @@ LABEL crc_ %i
204205
.altmacro
205206
LABEL crc_ %i
206207
.noaltmacro
208+
ENDBR
207209
crc32q -i*8(block_0), crc_init
208210
crc32q -i*8(block_1), crc1
209211
# SKIP crc32 -i*8(block_2), crc2 ; Don't do this one yet
@@ -237,6 +239,7 @@ LABEL crc_ %i
237239
################################################################
238240

239241
LABEL crc_ 0
242+
ENDBR
240243
mov tmp, len
241244
cmp $128*24, tmp
242245
jae full_block

arch/x86/entry/entry_64.S

Lines changed: 27 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,7 @@
8686

8787
SYM_CODE_START(entry_SYSCALL_64)
8888
UNWIND_HINT_EMPTY
89+
ENDBR
8990

9091
swapgs
9192
/* tss.sp2 is scratch space. */
@@ -94,6 +95,7 @@ SYM_CODE_START(entry_SYSCALL_64)
9495
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
9596

9697
SYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL)
98+
ANNOTATE_NOENDBR
9799

98100
/* Construct struct pt_regs on stack */
99101
pushq $__USER_DS /* pt_regs->ss */
@@ -276,6 +278,7 @@ SYM_FUNC_END(__switch_to_asm)
276278
.pushsection .text, "ax"
277279
SYM_CODE_START(ret_from_fork)
278280
UNWIND_HINT_EMPTY
281+
ANNOTATE_NOENDBR // copy_thread
279282
movq %rax, %rdi
280283
call schedule_tail /* rdi: 'prev' task parameter */
281284

@@ -350,6 +353,7 @@ SYM_CODE_END(ret_from_fork)
350353
.macro idtentry vector asmsym cfunc has_error_code:req
351354
SYM_CODE_START(\asmsym)
352355
UNWIND_HINT_IRET_REGS offset=\has_error_code*8
356+
ENDBR
353357
ASM_CLAC
354358

355359
.if \has_error_code == 0
@@ -417,6 +421,7 @@ SYM_CODE_END(\asmsym)
417421
.macro idtentry_mce_db vector asmsym cfunc
418422
SYM_CODE_START(\asmsym)
419423
UNWIND_HINT_IRET_REGS
424+
ENDBR
420425
ASM_CLAC
421426

422427
pushq $-1 /* ORIG_RAX: no syscall to restart */
@@ -472,6 +477,7 @@ SYM_CODE_END(\asmsym)
472477
.macro idtentry_vc vector asmsym cfunc
473478
SYM_CODE_START(\asmsym)
474479
UNWIND_HINT_IRET_REGS
480+
ENDBR
475481
ASM_CLAC
476482

477483
/*
@@ -533,6 +539,7 @@ SYM_CODE_END(\asmsym)
533539
.macro idtentry_df vector asmsym cfunc
534540
SYM_CODE_START(\asmsym)
535541
UNWIND_HINT_IRET_REGS offset=8
542+
ENDBR
536543
ASM_CLAC
537544

538545
/* paranoid_entry returns GS information for paranoid_exit in EBX. */
@@ -544,6 +551,9 @@ SYM_CODE_START(\asmsym)
544551
movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
545552
call \cfunc
546553

554+
/* For some configurations \cfunc ends up being a noreturn. */
555+
REACHABLE
556+
547557
jmp paranoid_exit
548558

549559
_ASM_NOKPROBE(\asmsym)
@@ -564,6 +574,7 @@ __irqentry_text_start:
564574
.align 16
565575
.globl __irqentry_text_end
566576
__irqentry_text_end:
577+
ANNOTATE_NOENDBR
567578

568579
SYM_CODE_START_LOCAL(common_interrupt_return)
569580
SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
@@ -608,8 +619,8 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
608619

609620
/* Restore RDI. */
610621
popq %rdi
611-
SWAPGS
612-
INTERRUPT_RETURN
622+
swapgs
623+
jmp .Lnative_iret
613624

614625

615626
SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL)
@@ -626,9 +637,14 @@ SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL)
626637
* ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
627638
* when returning from IPI handler.
628639
*/
629-
INTERRUPT_RETURN
640+
#ifdef CONFIG_XEN_PV
641+
SYM_INNER_LABEL(early_xen_iret_patch, SYM_L_GLOBAL)
642+
ANNOTATE_NOENDBR
643+
.byte 0xe9
644+
.long .Lnative_iret - (. + 4)
645+
#endif
630646

631-
SYM_INNER_LABEL_ALIGN(native_iret, SYM_L_GLOBAL)
647+
.Lnative_iret:
632648
UNWIND_HINT_IRET_REGS
633649
/*
634650
* Are we returning to a stack segment from the LDT? Note: in
@@ -640,6 +656,7 @@ SYM_INNER_LABEL_ALIGN(native_iret, SYM_L_GLOBAL)
640656
#endif
641657

642658
SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
659+
ANNOTATE_NOENDBR // exc_double_fault
643660
/*
644661
* This may fault. Non-paranoid faults on return to userspace are
645662
* handled by fixup_bad_iret. These include #SS, #GP, and #NP.
@@ -734,6 +751,7 @@ SYM_FUNC_START(asm_load_gs_index)
734751
FRAME_BEGIN
735752
swapgs
736753
.Lgs_change:
754+
ANNOTATE_NOENDBR // error_entry
737755
movl %edi, %gs
738756
2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
739757
swapgs
@@ -804,6 +822,7 @@ SYM_CODE_END(exc_xen_hypervisor_callback)
804822
*/
805823
SYM_CODE_START(xen_failsafe_callback)
806824
UNWIND_HINT_EMPTY
825+
ENDBR
807826
movl %ds, %ecx
808827
cmpw %cx, 0x10(%rsp)
809828
jne 1f
@@ -1063,6 +1082,7 @@ SYM_CODE_END(error_return)
10631082
*/
10641083
SYM_CODE_START(asm_exc_nmi)
10651084
UNWIND_HINT_IRET_REGS
1085+
ENDBR
10661086

10671087
/*
10681088
* We allow breakpoints in NMIs. If a breakpoint occurs, then
@@ -1310,6 +1330,7 @@ first_nmi:
13101330
#endif
13111331

13121332
repeat_nmi:
1333+
ANNOTATE_NOENDBR // this code
13131334
/*
13141335
* If there was a nested NMI, the first NMI's iret will return
13151336
* here. But NMIs are still enabled and we can take another
@@ -1338,6 +1359,7 @@ repeat_nmi:
13381359
.endr
13391360
subq $(5*8), %rsp
13401361
end_repeat_nmi:
1362+
ANNOTATE_NOENDBR // this code
13411363

13421364
/*
13431365
* Everything below this point can be preempted by a nested NMI.
@@ -1421,6 +1443,7 @@ SYM_CODE_END(asm_exc_nmi)
14211443
*/
14221444
SYM_CODE_START(ignore_sysret)
14231445
UNWIND_HINT_EMPTY
1446+
ENDBR
14241447
mov $-ENOSYS, %eax
14251448
sysretl
14261449
SYM_CODE_END(ignore_sysret)

0 commit comments

Comments
 (0)