Skip to content

Commit 44ca0e0

Browse files
committed
Merge branch 'for-next/kernel-ptrauth' into for-next/core
* for-next/kernel-ptrauth: : Return address signing - in-kernel support arm64: Kconfig: verify binutils support for ARM64_PTR_AUTH lkdtm: arm64: test kernel pointer authentication arm64: compile the kernel with ptrauth return address signing kconfig: Add support for 'as-option' arm64: suspend: restore the kernel ptrauth keys arm64: __show_regs: strip PAC from lr in printk arm64: unwind: strip PAC from kernel addresses arm64: mask PAC bits of __builtin_return_address arm64: initialize ptrauth keys for kernel booting task arm64: initialize and switch ptrauth kernel keys arm64: enable ptrauth earlier arm64: cpufeature: handle conflicts based on capability arm64: cpufeature: Move cpu capability helpers inside C file arm64: ptrauth: Add bootup/runtime flags for __cpu_setup arm64: install user ptrauth keys at kernel exit time arm64: rename ptrauth key structures to be user-specific arm64: cpufeature: add pointer auth meta-capabilities arm64: cpufeature: Fix meta-capability cpufeature check
2 parents 806dc82 + 3b446c7 commit 44ca0e0

26 files changed

+427
-100
lines changed

arch/arm64/Kconfig

Lines changed: 34 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,7 @@ config ARM64
118118
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
119119
select HAVE_ARCH_AUDITSYSCALL
120120
select HAVE_ARCH_BITREVERSE
121+
select HAVE_ARCH_COMPILER_H
121122
select HAVE_ARCH_HUGE_VMAP
122123
select HAVE_ARCH_JUMP_LABEL
123124
select HAVE_ARCH_JUMP_LABEL_RELATIVE
@@ -1501,23 +1502,55 @@ config ARM64_PTR_AUTH
15011502
bool "Enable support for pointer authentication"
15021503
default y
15031504
depends on !KVM || ARM64_VHE
1505+
depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
1506+
depends on CC_IS_GCC || (CC_IS_CLANG && AS_HAS_CFI_NEGATE_RA_STATE)
1507+
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
15041508
help
15051509
Pointer authentication (part of the ARMv8.3 Extensions) provides
15061510
instructions for signing and authenticating pointers against secret
15071511
keys, which can be used to mitigate Return Oriented Programming (ROP)
15081512
and other attacks.
15091513

15101514
This option enables these instructions at EL0 (i.e. for userspace).
1511-
15121515
Choosing this option will cause the kernel to initialise secret keys
15131516
for each process at exec() time, with these keys being
15141517
context-switched along with the process.
15151518

1519+
If the compiler supports the -mbranch-protection or
1520+
-msign-return-address flag (e.g. GCC 7 or later), then this option
1521+
will also cause the kernel itself to be compiled with return address
1522+
protection. In this case, and if the target hardware is known to
1523+
support pointer authentication, then CONFIG_STACKPROTECTOR can be
1524+
disabled with minimal loss of protection.
1525+
15161526
The feature is detected at runtime. If the feature is not present in
15171527
hardware it will not be advertised to userspace/KVM guest nor will it
15181528
be enabled. However, KVM guest also require VHE mode and hence
15191529
CONFIG_ARM64_VHE=y option to use this feature.
15201530

1531+
If the feature is present on the boot CPU but not on a late CPU, then
1532+
the late CPU will be parked. Also, if the boot CPU does not have
1533+
address auth and the late CPU has then the late CPU will still boot
1534+
but with the feature disabled. On such a system, this option should
1535+
not be selected.
1536+
1537+
This feature works with FUNCTION_GRAPH_TRACER option only if
1538+
DYNAMIC_FTRACE_WITH_REGS is enabled.
1539+
1540+
config CC_HAS_BRANCH_PROT_PAC_RET
1541+
# GCC 9 or later, clang 8 or later
1542+
def_bool $(cc-option,-mbranch-protection=pac-ret+leaf)
1543+
1544+
config CC_HAS_SIGN_RETURN_ADDRESS
1545+
# GCC 7, 8
1546+
def_bool $(cc-option,-msign-return-address=all)
1547+
1548+
config AS_HAS_PAC
1549+
def_bool $(as-option,-Wa$(comma)-march=armv8.3-a)
1550+
1551+
config AS_HAS_CFI_NEGATE_RA_STATE
1552+
def_bool $(as-instr,.cfi_startproc\n.cfi_negate_ra_state\n.cfi_endproc\n)
1553+
15211554
endmenu
15221555

15231556
menu "ARMv8.4 architectural features"

arch/arm64/Makefile

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,17 @@ stack_protector_prepare: prepare0
6565
include/generated/asm-offsets.h))
6666
endif
6767

68+
ifeq ($(CONFIG_ARM64_PTR_AUTH),y)
69+
branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all
70+
branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=pac-ret+leaf
71+
# -march=armv8.3-a enables the non-nops instructions for PAC, to avoid the
72+
# compiler to generate them and consequently to break the single image contract
73+
# we pass it only to the assembler. This option is utilized only in case of non
74+
# integrated assemblers.
75+
branch-prot-flags-$(CONFIG_AS_HAS_PAC) += -Wa,-march=armv8.3-a
76+
KBUILD_CFLAGS += $(branch-prot-flags-y)
77+
endif
78+
6879
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
6980
KBUILD_CPPFLAGS += -mbig-endian
7081
CHECKFLAGS += -D__AARCH64EB__
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
#ifndef __ASM_ASM_POINTER_AUTH_H
3+
#define __ASM_ASM_POINTER_AUTH_H
4+
5+
#include <asm/alternative.h>
6+
#include <asm/asm-offsets.h>
7+
#include <asm/cpufeature.h>
8+
#include <asm/sysreg.h>
9+
10+
#ifdef CONFIG_ARM64_PTR_AUTH
11+
/*
12+
* thread.keys_user.ap* as offset exceeds the #imm offset range
13+
* so use the base value of ldp as thread.keys_user and offset as
14+
* thread.keys_user.ap*.
15+
*/
16+
.macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
17+
mov \tmp1, #THREAD_KEYS_USER
18+
add \tmp1, \tsk, \tmp1
19+
alternative_if_not ARM64_HAS_ADDRESS_AUTH
20+
b .Laddr_auth_skip_\@
21+
alternative_else_nop_endif
22+
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
23+
msr_s SYS_APIAKEYLO_EL1, \tmp2
24+
msr_s SYS_APIAKEYHI_EL1, \tmp3
25+
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIB]
26+
msr_s SYS_APIBKEYLO_EL1, \tmp2
27+
msr_s SYS_APIBKEYHI_EL1, \tmp3
28+
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDA]
29+
msr_s SYS_APDAKEYLO_EL1, \tmp2
30+
msr_s SYS_APDAKEYHI_EL1, \tmp3
31+
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDB]
32+
msr_s SYS_APDBKEYLO_EL1, \tmp2
33+
msr_s SYS_APDBKEYHI_EL1, \tmp3
34+
.Laddr_auth_skip_\@:
35+
alternative_if ARM64_HAS_GENERIC_AUTH
36+
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APGA]
37+
msr_s SYS_APGAKEYLO_EL1, \tmp2
38+
msr_s SYS_APGAKEYHI_EL1, \tmp3
39+
alternative_else_nop_endif
40+
.endm
41+
42+
.macro ptrauth_keys_install_kernel tsk, sync, tmp1, tmp2, tmp3
43+
alternative_if ARM64_HAS_ADDRESS_AUTH
44+
mov \tmp1, #THREAD_KEYS_KERNEL
45+
add \tmp1, \tsk, \tmp1
46+
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_KERNEL_KEY_APIA]
47+
msr_s SYS_APIAKEYLO_EL1, \tmp2
48+
msr_s SYS_APIAKEYHI_EL1, \tmp3
49+
.if \sync == 1
50+
isb
51+
.endif
52+
alternative_else_nop_endif
53+
.endm
54+
55+
#else /* CONFIG_ARM64_PTR_AUTH */
56+
57+
.macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
58+
.endm
59+
60+
.macro ptrauth_keys_install_kernel tsk, sync, tmp1, tmp2, tmp3
61+
.endm
62+
63+
#endif /* CONFIG_ARM64_PTR_AUTH */
64+
65+
#endif /* __ASM_ASM_POINTER_AUTH_H */

arch/arm64/include/asm/compiler.h

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
#ifndef __ASM_COMPILER_H
3+
#define __ASM_COMPILER_H
4+
5+
#if defined(CONFIG_ARM64_PTR_AUTH)
6+
7+
/*
8+
* The EL0/EL1 pointer bits used by a pointer authentication code.
9+
* This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply.
10+
*/
11+
#define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual)
12+
#define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual)
13+
14+
/* Valid for EL0 TTBR0 and EL1 TTBR1 instruction pointers */
15+
#define ptrauth_clear_pac(ptr) \
16+
((ptr & BIT_ULL(55)) ? (ptr | ptrauth_kernel_pac_mask()) : \
17+
(ptr & ~ptrauth_user_pac_mask()))
18+
19+
#define __builtin_return_address(val) \
20+
(void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val)))
21+
22+
#endif /* CONFIG_ARM64_PTR_AUTH */
23+
24+
#endif /* __ASM_COMPILER_H */

arch/arm64/include/asm/cpucaps.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,9 @@
5959
#define ARM64_HAS_E0PD 49
6060
#define ARM64_HAS_RNG 50
6161
#define ARM64_HAS_AMU_EXTN 51
62+
#define ARM64_HAS_ADDRESS_AUTH 52
63+
#define ARM64_HAS_GENERIC_AUTH 53
6264

63-
#define ARM64_NCAPS 52
65+
#define ARM64_NCAPS 54
6466

6567
#endif /* __ASM_CPUCAPS_H */

arch/arm64/include/asm/cpufeature.h

Lines changed: 21 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -208,6 +208,10 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
208208
* In some non-typical cases either both (a) and (b), or neither,
209209
* should be permitted. This can be described by including neither
210210
* or both flags in the capability's type field.
211+
*
212+
* In case of a conflict, the CPU is prevented from booting. If the
213+
* ARM64_CPUCAP_PANIC_ON_CONFLICT flag is specified for the capability,
214+
* then a kernel panic is triggered.
211215
*/
212216

213217

@@ -240,6 +244,8 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
240244
#define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
241245
/* Is it safe for a late CPU to miss this capability when system has it */
242246
#define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
247+
/* Panic when a conflict is detected */
248+
#define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6))
243249

244250
/*
245251
* CPU errata workarounds that need to be enabled at boot time if one or
@@ -279,9 +285,20 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
279285

280286
/*
281287
* CPU feature used early in the boot based on the boot CPU. All secondary
282-
* CPUs must match the state of the capability as detected by the boot CPU.
288+
* CPUs must match the state of the capability as detected by the boot CPU. In
289+
* case of a conflict, a kernel panic is triggered.
290+
*/
291+
#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE \
292+
(ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT)
293+
294+
/*
295+
* CPU feature used early in the boot based on the boot CPU. It is safe for a
296+
* late CPU to have this feature even though the boot CPU hasn't enabled it,
297+
* although the feature will not be used by Linux in this case. If the boot CPU
298+
* has enabled this feature already, then every late CPU must have it.
283299
*/
284-
#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU
300+
#define ARM64_CPUCAP_BOOT_CPU_FEATURE \
301+
(ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
285302

286303
struct arm64_cpu_capabilities {
287304
const char *desc;
@@ -340,18 +357,6 @@ static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
340357
return cap->type & ARM64_CPUCAP_SCOPE_MASK;
341358
}
342359

343-
static inline bool
344-
cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
345-
{
346-
return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
347-
}
348-
349-
static inline bool
350-
cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
351-
{
352-
return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
353-
}
354-
355360
/*
356361
* Generic helper for handling capabilties with multiple (match,enable) pairs
357362
* of call backs, sharing the same capability bit.
@@ -654,15 +659,13 @@ static inline bool system_supports_cnp(void)
654659
static inline bool system_supports_address_auth(void)
655660
{
656661
return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
657-
(cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
658-
cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF));
662+
cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH);
659663
}
660664

661665
static inline bool system_supports_generic_auth(void)
662666
{
663667
return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
664-
(cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_ARCH) ||
665-
cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF));
668+
cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
666669
}
667670

668671
static inline bool system_uses_irq_prio_masking(void)

arch/arm64/include/asm/pointer_auth.h

Lines changed: 22 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -22,15 +22,19 @@ struct ptrauth_key {
2222
* We give each process its own keys, which are shared by all threads. The keys
2323
* are inherited upon fork(), and reinitialised upon exec*().
2424
*/
25-
struct ptrauth_keys {
25+
struct ptrauth_keys_user {
2626
struct ptrauth_key apia;
2727
struct ptrauth_key apib;
2828
struct ptrauth_key apda;
2929
struct ptrauth_key apdb;
3030
struct ptrauth_key apga;
3131
};
3232

33-
static inline void ptrauth_keys_init(struct ptrauth_keys *keys)
33+
struct ptrauth_keys_kernel {
34+
struct ptrauth_key apia;
35+
};
36+
37+
static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys)
3438
{
3539
if (system_supports_address_auth()) {
3640
get_random_bytes(&keys->apia, sizeof(keys->apia));
@@ -50,48 +54,38 @@ do { \
5054
write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \
5155
} while (0)
5256

53-
static inline void ptrauth_keys_switch(struct ptrauth_keys *keys)
57+
static __always_inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys)
5458
{
55-
if (system_supports_address_auth()) {
56-
__ptrauth_key_install(APIA, keys->apia);
57-
__ptrauth_key_install(APIB, keys->apib);
58-
__ptrauth_key_install(APDA, keys->apda);
59-
__ptrauth_key_install(APDB, keys->apdb);
60-
}
59+
if (system_supports_address_auth())
60+
get_random_bytes(&keys->apia, sizeof(keys->apia));
61+
}
6162

62-
if (system_supports_generic_auth())
63-
__ptrauth_key_install(APGA, keys->apga);
63+
static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kernel *keys)
64+
{
65+
if (system_supports_address_auth())
66+
__ptrauth_key_install(APIA, keys->apia);
6467
}
6568

6669
extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg);
6770

68-
/*
69-
* The EL0 pointer bits used by a pointer authentication code.
70-
* This is dependent on TBI0 being enabled, or bits 63:56 would also apply.
71-
*/
72-
#define ptrauth_user_pac_mask() GENMASK(54, vabits_actual)
73-
74-
/* Only valid for EL0 TTBR0 instruction pointers */
7571
static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
7672
{
77-
return ptr & ~ptrauth_user_pac_mask();
73+
return ptrauth_clear_pac(ptr);
7874
}
7975

8076
#define ptrauth_thread_init_user(tsk) \
81-
do { \
82-
struct task_struct *__ptiu_tsk = (tsk); \
83-
ptrauth_keys_init(&__ptiu_tsk->thread.keys_user); \
84-
ptrauth_keys_switch(&__ptiu_tsk->thread.keys_user); \
85-
} while (0)
86-
87-
#define ptrauth_thread_switch(tsk) \
88-
ptrauth_keys_switch(&(tsk)->thread.keys_user)
77+
ptrauth_keys_init_user(&(tsk)->thread.keys_user)
78+
#define ptrauth_thread_init_kernel(tsk) \
79+
ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel)
80+
#define ptrauth_thread_switch_kernel(tsk) \
81+
ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)
8982

9083
#else /* CONFIG_ARM64_PTR_AUTH */
9184
#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
9285
#define ptrauth_strip_insn_pac(lr) (lr)
9386
#define ptrauth_thread_init_user(tsk)
94-
#define ptrauth_thread_switch(tsk)
87+
#define ptrauth_thread_init_kernel(tsk)
88+
#define ptrauth_thread_switch_kernel(tsk)
9589
#endif /* CONFIG_ARM64_PTR_AUTH */
9690

9791
#endif /* __ASM_POINTER_AUTH_H */

arch/arm64/include/asm/processor.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,8 @@ struct thread_struct {
146146
unsigned long fault_code; /* ESR_EL1 value */
147147
struct debug_info debug; /* debugging */
148148
#ifdef CONFIG_ARM64_PTR_AUTH
149-
struct ptrauth_keys keys_user;
149+
struct ptrauth_keys_user keys_user;
150+
struct ptrauth_keys_kernel keys_kernel;
150151
#endif
151152
};
152153

arch/arm64/include/asm/smp.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,13 +23,22 @@
2323
#define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT)
2424
#define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT)
2525

26+
/* Possible options for __cpu_setup */
27+
/* Option to setup primary cpu */
28+
#define ARM64_CPU_BOOT_PRIMARY (1)
29+
/* Option to setup secondary cpus */
30+
#define ARM64_CPU_BOOT_SECONDARY (2)
31+
/* Option to setup cpus for different cpu run time services */
32+
#define ARM64_CPU_RUNTIME (3)
33+
2634
#ifndef __ASSEMBLY__
2735

2836
#include <asm/percpu.h>
2937

3038
#include <linux/threads.h>
3139
#include <linux/cpumask.h>
3240
#include <linux/thread_info.h>
41+
#include <asm/pointer_auth.h>
3342

3443
DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
3544

@@ -87,6 +96,9 @@ asmlinkage void secondary_start_kernel(void);
8796
struct secondary_data {
8897
void *stack;
8998
struct task_struct *task;
99+
#ifdef CONFIG_ARM64_PTR_AUTH
100+
struct ptrauth_keys_kernel ptrauth_key;
101+
#endif
90102
long status;
91103
};
92104

0 commit comments

Comments
 (0)