Skip to content

Commit f6ffa4c

Browse files
committed
Merge branch 'for-next/dynamic-scs' into for-next/core
* for-next/dynamic-scs: arm64: implement dynamic shadow call stack for Clang scs: add support for dynamic shadow call stacks arm64: unwind: add asynchronous unwind tables to kernel and modules
2 parents 9f93047 + 3b619e2 commit f6ffa4c

File tree

21 files changed

+426
-8
lines changed

21 files changed

+426
-8
lines changed

Makefile

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -966,8 +966,10 @@ LDFLAGS_vmlinux += --gc-sections
966966
endif
967967

968968
ifdef CONFIG_SHADOW_CALL_STACK
969+
ifndef CONFIG_DYNAMIC_SCS
969970
CC_FLAGS_SCS := -fsanitize=shadow-call-stack
970971
KBUILD_CFLAGS += $(CC_FLAGS_SCS)
972+
endif
971973
export CC_FLAGS_SCS
972974
endif
973975

arch/Kconfig

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -651,6 +651,13 @@ config SHADOW_CALL_STACK
651651
reading and writing arbitrary memory may be able to locate them
652652
and hijack control flow by modifying the stacks.
653653

654+
config DYNAMIC_SCS
655+
bool
656+
help
657+
Set by the arch code if it relies on code patching to insert the
658+
shadow call stack push and pop instructions rather than on the
659+
compiler.
660+
654661
config LTO
655662
bool
656663
help

arch/arm64/Kconfig

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -371,6 +371,9 @@ config KASAN_SHADOW_OFFSET
371371
default 0xeffffff800000000 if ARM64_VA_BITS_36 && KASAN_SW_TAGS
372372
default 0xffffffffffffffff
373373

374+
config UNWIND_TABLES
375+
bool
376+
374377
source "arch/arm64/Kconfig.platforms"
375378

376379
menu "Kernel Features"
@@ -2158,6 +2161,15 @@ config ARCH_NR_GPIO
21582161

21592162
If unsure, leave the default value.
21602163

2164+
config UNWIND_PATCH_PAC_INTO_SCS
2165+
bool "Enable shadow call stack dynamically using code patching"
2166+
# needs Clang with https://reviews.llvm.org/D111780 incorporated
2167+
depends on CC_IS_CLANG && CLANG_VERSION >= 150000
2168+
depends on ARM64_PTR_AUTH_KERNEL && CC_HAS_BRANCH_PROT_PAC_RET
2169+
depends on SHADOW_CALL_STACK
2170+
select UNWIND_TABLES
2171+
select DYNAMIC_SCS
2172+
21612173
endmenu # "Kernel Features"
21622174

21632175
menu "Boot options"

arch/arm64/Makefile

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,13 @@ KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
4545
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
4646

4747
# Avoid generating .eh_frame* sections.
48+
ifneq ($(CONFIG_UNWIND_TABLES),y)
4849
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
4950
KBUILD_AFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
51+
else
52+
KBUILD_CFLAGS += -fasynchronous-unwind-tables
53+
KBUILD_AFLAGS += -fasynchronous-unwind-tables
54+
endif
5055

5156
ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
5257
prepare: stack_protector_prepare
@@ -72,10 +77,16 @@ branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=
7277
# We enable additional protection for leaf functions as there is some
7378
# narrow potential for ROP protection benefits and no substantial
7479
# performance impact has been observed.
80+
PACRET-y := pac-ret+leaf
81+
82+
# Using a shadow call stack in leaf functions is too costly, so avoid PAC there
83+
# as well when we may be patching PAC into SCS
84+
PACRET-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) := pac-ret
85+
7586
ifeq ($(CONFIG_ARM64_BTI_KERNEL),y)
76-
branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI) := -mbranch-protection=pac-ret+leaf+bti
87+
branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI) := -mbranch-protection=$(PACRET-y)+bti
7788
else
78-
branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=pac-ret+leaf
89+
branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=$(PACRET-y)
7990
endif
8091
# -march=armv8.3-a enables the non-nops instructions for PAC, to avoid the
8192
# compiler to generate them and consequently to break the single image contract

arch/arm64/include/asm/module.lds.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,4 +17,12 @@ SECTIONS {
1717
*/
1818
.text.hot : { *(.text.hot) }
1919
#endif
20+
21+
#ifdef CONFIG_UNWIND_TABLES
22+
/*
23+
* Currently, we only use unwind info at module load time, so we can
24+
* put it into the .init allocation.
25+
*/
26+
.init.eh_frame : { *(.eh_frame) }
27+
#endif
2028
}

arch/arm64/include/asm/scs.h

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
#ifdef __ASSEMBLY__
66

77
#include <asm/asm-offsets.h>
8+
#include <asm/sysreg.h>
89

910
#ifdef CONFIG_SHADOW_CALL_STACK
1011
scs_sp .req x18
@@ -24,6 +25,54 @@
2425
.endm
2526
#endif /* CONFIG_SHADOW_CALL_STACK */
2627

28+
29+
#else
30+
31+
#include <linux/scs.h>
32+
#include <asm/cpufeature.h>
33+
34+
#ifdef CONFIG_UNWIND_PATCH_PAC_INTO_SCS
35+
static inline bool should_patch_pac_into_scs(void)
36+
{
37+
u64 reg;
38+
39+
/*
40+
* We only enable the shadow call stack dynamically if we are running
41+
* on a system that does not implement PAC or BTI. PAC and SCS provide
42+
* roughly the same level of protection, and BTI relies on the PACIASP
43+
* instructions serving as landing pads, preventing us from patching
44+
* those instructions into something else.
45+
*/
46+
reg = read_sysreg_s(SYS_ID_AA64ISAR1_EL1);
47+
if (SYS_FIELD_GET(ID_AA64ISAR1_EL1, APA, reg) |
48+
SYS_FIELD_GET(ID_AA64ISAR1_EL1, API, reg))
49+
return false;
50+
51+
reg = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
52+
if (SYS_FIELD_GET(ID_AA64ISAR2_EL1, APA3, reg))
53+
return false;
54+
55+
if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) {
56+
reg = read_sysreg_s(SYS_ID_AA64PFR1_EL1);
57+
if (reg & (0xf << ID_AA64PFR1_EL1_BT_SHIFT))
58+
return false;
59+
}
60+
return true;
61+
}
62+
63+
static inline void dynamic_scs_init(void)
64+
{
65+
if (should_patch_pac_into_scs()) {
66+
pr_info("Enabling dynamic shadow call stack\n");
67+
static_branch_enable(&dynamic_scs_enabled);
68+
}
69+
}
70+
#else
71+
static inline void dynamic_scs_init(void) {}
72+
#endif
73+
74+
int scs_patch(const u8 eh_frame[], int size);
75+
2776
#endif /* __ASSEMBLY __ */
2877

2978
#endif /* _ASM_SCS_H */

arch/arm64/kernel/Makefile

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,8 @@ obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
8080
obj-$(CONFIG_ARM64_MTE) += mte.o
8181
obj-y += vdso-wrap.o
8282
obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o
83+
obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.o
84+
CFLAGS_patch-scs.o += -mbranch-protection=none
8385

8486
# Force dependency (vdso*-wrap.S includes vdso.so through incbin)
8587
$(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so

arch/arm64/kernel/head.S

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -462,6 +462,9 @@ SYM_FUNC_START_LOCAL(__primary_switched)
462462
bl early_fdt_map // Try mapping the FDT early
463463
mov x0, x20 // pass the full boot status
464464
bl init_feature_override // Parse cpu feature overrides
465+
#ifdef CONFIG_UNWIND_PATCH_PAC_INTO_SCS
466+
bl scs_patch_vmlinux
467+
#endif
465468
mov x0, x20
466469
bl finalise_el2 // Prefer VHE if possible
467470
ldp x29, x30, [sp], #16

arch/arm64/kernel/irq.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ static void init_irq_scs(void)
4141
{
4242
int cpu;
4343

44-
if (!IS_ENABLED(CONFIG_SHADOW_CALL_STACK))
44+
if (!scs_is_enabled())
4545
return;
4646

4747
for_each_possible_cpu(cpu)

arch/arm64/kernel/module.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,11 @@
1515
#include <linux/kernel.h>
1616
#include <linux/mm.h>
1717
#include <linux/moduleloader.h>
18+
#include <linux/scs.h>
1819
#include <linux/vmalloc.h>
1920
#include <asm/alternative.h>
2021
#include <asm/insn.h>
22+
#include <asm/scs.h>
2123
#include <asm/sections.h>
2224

2325
void *module_alloc(unsigned long size)
@@ -514,5 +516,11 @@ int module_finalize(const Elf_Ehdr *hdr,
514516
if (s)
515517
apply_alternatives_module((void *)s->sh_addr, s->sh_size);
516518

519+
if (scs_is_dynamic()) {
520+
s = find_section(hdr, sechdrs, ".init.eh_frame");
521+
if (s)
522+
scs_patch((void *)s->sh_addr, s->sh_size);
523+
}
524+
517525
return module_init_ftrace_plt(hdr, sechdrs, me);
518526
}

0 commit comments

Comments
 (0)