Skip to content

Commit 50e3ed0

Browse files
ardbiesheuvelctmarinas
authored andcommitted
arm64: mm: add support for WXN memory translation attribute
The AArch64 virtual memory system supports a global WXN control, which can be enabled to make all writable mappings implicitly no-exec. This is a useful hardening feature, as it prevents mistakes in managing page table permissions from being exploited to attack the system. When enabled at EL1, the restrictions apply to both EL1 and EL0. EL1 is completely under our control, and has been cleaned up to allow WXN to be enabled from boot onwards. EL0 is not under our control, but given that widely deployed security features such as selinux or PaX already limit the ability of user space to create mappings that are writable and executable at the same time, the impact of enabling this for EL0 is expected to be limited. (For this reason, common user space libraries that have a legitimate need for manipulating executable code already carry fallbacks such as [0].) If enabled at compile time, the feature can still be disabled at boot if needed, by passing arm64.nowxn on the kernel command line. [0] https://github.com/libffi/libffi/blob/master/src/closures.c#L440 Signed-off-by: Ard Biesheuvel <[email protected]> Reviewed-by: Kees Cook <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Catalin Marinas <[email protected]>
1 parent cb1a393 commit 50e3ed0

File tree

7 files changed

+116
-2
lines changed

7 files changed

+116
-2
lines changed

arch/arm64/Kconfig

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1608,6 +1608,17 @@ config RODATA_FULL_DEFAULT_ENABLED
16081608
This requires the linear region to be mapped down to pages,
16091609
which may adversely affect performance in some cases.
16101610

1611+
config ARM64_WXN
1612+
bool "Enable WXN attribute so all writable mappings are non-exec"
1613+
help
1614+
Set the WXN bit in the SCTLR system register so that all writable
1615+
mappings are treated as if the PXN/UXN bit is set as well.
1616+
If this is set to Y, it can still be disabled at runtime by
1617+
passing 'arm64.nowxn' on the kernel command line.
1618+
1619+
This should only be set if no software needs to be supported that
1620+
relies on being able to execute from writable mappings.
1621+
16111622
config ARM64_SW_TTBR0_PAN
16121623
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
16131624
help

arch/arm64/include/asm/cpufeature.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
#define ARM64_SW_FEATURE_OVERRIDE_NOKASLR 0
1919
#define ARM64_SW_FEATURE_OVERRIDE_HVHE 4
2020
#define ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF 8
21+
#define ARM64_SW_FEATURE_OVERRIDE_NOWXN 12
2122

2223
#ifndef __ASSEMBLY__
2324

@@ -962,6 +963,13 @@ static inline bool kaslr_disabled_cmdline(void)
962963
return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOKASLR);
963964
}
964965

966+
static inline bool arm64_wxn_enabled(void)
967+
{
968+
if (!IS_ENABLED(CONFIG_ARM64_WXN))
969+
return false;
970+
return !arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOWXN);
971+
}
972+
965973
u32 get_kvm_ipa_limit(void);
966974
void dump_cpu_features(void);
967975

arch/arm64/include/asm/mman.h

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,11 +35,40 @@ static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
3535
}
3636
#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags)
3737

38+
static inline bool arm64_check_wx_prot(unsigned long prot,
39+
struct task_struct *tsk)
40+
{
41+
/*
42+
* When we are running with SCTLR_ELx.WXN==1, writable mappings are
43+
* implicitly non-executable. This means we should reject such mappings
44+
* when user space attempts to create them using mmap() or mprotect().
45+
*/
46+
if (arm64_wxn_enabled() &&
47+
((prot & (PROT_WRITE | PROT_EXEC)) == (PROT_WRITE | PROT_EXEC))) {
48+
/*
49+
* User space libraries such as libffi carry elaborate
50+
* heuristics to decide whether it is worth it to even attempt
51+
* to create writable executable mappings, as PaX or selinux
52+
* enabled systems will outright reject it. They will usually
53+
* fall back to something else (e.g., two separate shared
54+
* mmap()s of a temporary file) on failure.
55+
*/
56+
pr_info_ratelimited(
57+
"process %s (%d) attempted to create PROT_WRITE+PROT_EXEC mapping\n",
58+
tsk->comm, tsk->pid);
59+
return false;
60+
}
61+
return true;
62+
}
63+
3864
static inline bool arch_validate_prot(unsigned long prot,
3965
unsigned long addr __always_unused)
4066
{
4167
unsigned long supported = PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM;
4268

69+
if (!arm64_check_wx_prot(prot, current))
70+
return false;
71+
4372
if (system_supports_bti())
4473
supported |= PROT_BTI;
4574

@@ -50,6 +79,13 @@ static inline bool arch_validate_prot(unsigned long prot,
5079
}
5180
#define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr)
5281

82+
static inline bool arch_validate_mmap_prot(unsigned long prot,
83+
unsigned long addr)
84+
{
85+
return arm64_check_wx_prot(prot, current);
86+
}
87+
#define arch_validate_mmap_prot arch_validate_mmap_prot
88+
5389
static inline bool arch_validate_flags(unsigned long vm_flags)
5490
{
5591
if (!system_supports_mte())

arch/arm64/include/asm/mmu_context.h

Lines changed: 29 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,41 @@
2020
#include <asm/cpufeature.h>
2121
#include <asm/daifflags.h>
2222
#include <asm/proc-fns.h>
23-
#include <asm-generic/mm_hooks.h>
2423
#include <asm/cputype.h>
2524
#include <asm/sysreg.h>
2625
#include <asm/tlbflush.h>
2726

2827
extern bool rodata_full;
2928

29+
static inline int arch_dup_mmap(struct mm_struct *oldmm,
30+
struct mm_struct *mm)
31+
{
32+
return 0;
33+
}
34+
35+
static inline void arch_exit_mmap(struct mm_struct *mm)
36+
{
37+
}
38+
39+
static inline void arch_unmap(struct mm_struct *mm,
40+
unsigned long start, unsigned long end)
41+
{
42+
}
43+
44+
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
45+
bool write, bool execute, bool foreign)
46+
{
47+
if (IS_ENABLED(CONFIG_ARM64_WXN) && execute &&
48+
(vma->vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
49+
pr_warn_ratelimited(
50+
"process %s (%d) attempted to execute from writable memory\n",
51+
current->comm, current->pid);
52+
/* disallow unless the nowxn override is set */
53+
return !arm64_wxn_enabled();
54+
}
55+
return true;
56+
}
57+
3058
static inline void contextidr_thread_switch(struct task_struct *next)
3159
{
3260
if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))

arch/arm64/kernel/pi/idreg-override.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,6 +189,7 @@ static const struct ftr_set_desc sw_features __prel64_initconst = {
189189
FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL),
190190
FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter),
191191
FIELD("rodataoff", ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF, NULL),
192+
FIELD("nowxn", ARM64_SW_FEATURE_OVERRIDE_NOWXN, NULL),
192193
{}
193194
},
194195
};
@@ -221,8 +222,9 @@ static const struct {
221222
{ "arm64.nomops", "id_aa64isar2.mops=0" },
222223
{ "arm64.nomte", "id_aa64pfr1.mte=0" },
223224
{ "nokaslr", "arm64_sw.nokaslr=1" },
224-
{ "rodata=off", "arm64_sw.rodataoff=1" },
225+
{ "rodata=off", "arm64_sw.rodataoff=1 arm64_sw.nowxn=1" },
225226
{ "arm64.nolva", "id_aa64mmfr2.varange=0" },
227+
{ "arm64.nowxn", "arm64_sw.nowxn=1" },
226228
};
227229

228230
static int __init parse_hexdigit(const char *p, u64 *v)

arch/arm64/kernel/pi/map_kernel.c

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,25 @@ static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level)
132132
idmap_cpu_replace_ttbr1(swapper_pg_dir);
133133
}
134134

135+
static void noinline __section(".idmap.text") disable_wxn(void)
136+
{
137+
u64 sctlr = read_sysreg(sctlr_el1) & ~SCTLR_ELx_WXN;
138+
139+
/*
140+
* We cannot safely clear the WXN bit while the MMU and caches are on,
141+
* so turn the MMU off, flush the TLBs and turn it on again but with
142+
* the WXN bit cleared this time.
143+
*/
144+
asm(" msr sctlr_el1, %0 ;"
145+
" isb ;"
146+
" tlbi vmalle1 ;"
147+
" dsb nsh ;"
148+
" isb ;"
149+
" msr sctlr_el1, %1 ;"
150+
" isb ;"
151+
:: "r"(sctlr & ~SCTLR_ELx_M), "r"(sctlr));
152+
}
153+
135154
static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(u64 ttbr)
136155
{
137156
u64 sctlr = read_sysreg(sctlr_el1);
@@ -229,6 +248,10 @@ asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt)
229248
if (va_bits > VA_BITS_MIN)
230249
sysreg_clear_set(tcr_el1, TCR_T1SZ_MASK, TCR_T1SZ(va_bits));
231250

251+
if (IS_ENABLED(CONFIG_ARM64_WXN) &&
252+
arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOWXN))
253+
disable_wxn();
254+
232255
/*
233256
* The virtual KASLR displacement modulo 2MiB is decided by the
234257
* physical placement of the image, as otherwise, we might not be able

arch/arm64/mm/proc.S

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -546,6 +546,12 @@ alternative_else_nop_endif
546546
* Prepare SCTLR
547547
*/
548548
mov_q x0, INIT_SCTLR_EL1_MMU_ON
549+
#ifdef CONFIG_ARM64_WXN
550+
ldr_l x1, arm64_sw_feature_override + FTR_OVR_VAL_OFFSET
551+
tst x1, #0xf << ARM64_SW_FEATURE_OVERRIDE_NOWXN
552+
orr x1, x0, #SCTLR_ELx_WXN
553+
csel x0, x0, x1, ne
554+
#endif
549555
ret // return to head.S
550556

551557
.unreq mair

0 commit comments

Comments
 (0)