Skip to content

Commit 6b0ef27

Browse files
ardbiesheuvelRussell King (Oracle)
authored andcommitted
ARM: 9384/2: mm: Make tlbflush routines CFI safe
Instead of avoiding CFI entirely on the TLB flush helpers, reorganize the code so that the CFI machinery can deal with it. The important things to take into account are: - functions in asm called indirectly from C need to be defined using SYM_TYPED_FUNC_START() - a reference to the asm function needs to be visible to the compiler, in order to get it to emit the typeid symbol. The latter means that defining the cpu_tlb_fns structs is best done from C code, so that the references in the static initializers will be visible to the compiler. Signed-off-by: Ard Biesheuvel <[email protected]> Tested-by: Kees Cook <[email protected]> Reviewed-by: Sami Tolvanen <[email protected]> Signed-off-by: Linus Walleij <[email protected]> Signed-off-by: Russell King (Oracle) <[email protected]>
1 parent 4cece76 commit 6b0ef27

File tree

9 files changed

+119
-58
lines changed

9 files changed

+119
-58
lines changed

arch/arm/mm/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@ obj-$(CONFIG_CPU_TLB_FEROCEON) += tlb-v4wbi.o # reuse v4wbi TLB functions
6262
obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o
6363
obj-$(CONFIG_CPU_TLB_V7) += tlb-v7.o
6464
obj-$(CONFIG_CPU_TLB_FA) += tlb-fa.o
65+
obj-y += tlb.o
6566

6667
obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o
6768
obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o

arch/arm/mm/proc-macros.S

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -338,21 +338,6 @@ ENTRY(\name\()_cache_fns)
338338
.size \name\()_cache_fns, . - \name\()_cache_fns
339339
.endm
340340

341-
.macro define_tlb_functions name:req, flags_up:req, flags_smp
342-
.type \name\()_tlb_fns, #object
343-
.align 2
344-
ENTRY(\name\()_tlb_fns)
345-
.long \name\()_flush_user_tlb_range
346-
.long \name\()_flush_kern_tlb_range
347-
.ifnb \flags_smp
348-
ALT_SMP(.long \flags_smp )
349-
ALT_UP(.long \flags_up )
350-
.else
351-
.long \flags_up
352-
.endif
353-
.size \name\()_tlb_fns, . - \name\()_tlb_fns
354-
.endm
355-
356341
.macro globl_equ x, y
357342
.globl \x
358343
.equ \x, \y

arch/arm/mm/tlb-fa.S

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
*/
1616
#include <linux/linkage.h>
1717
#include <linux/init.h>
18+
#include <linux/cfi_types.h>
1819
#include <asm/assembler.h>
1920
#include <asm/asm-offsets.h>
2021
#include <asm/tlbflush.h>
@@ -31,7 +32,7 @@
3132
* - mm - mm_struct describing address space
3233
*/
3334
.align 4
34-
ENTRY(fa_flush_user_tlb_range)
35+
SYM_TYPED_FUNC_START(fa_flush_user_tlb_range)
3536
vma_vm_mm ip, r2
3637
act_mm r3 @ get current->active_mm
3738
eors r3, ip, r3 @ == mm ?
@@ -46,9 +47,10 @@ ENTRY(fa_flush_user_tlb_range)
4647
blo 1b
4748
mcr p15, 0, r3, c7, c10, 4 @ data write barrier
4849
ret lr
50+
SYM_FUNC_END(fa_flush_user_tlb_range)
4951

5052

51-
ENTRY(fa_flush_kern_tlb_range)
53+
SYM_TYPED_FUNC_START(fa_flush_kern_tlb_range)
5254
mov r3, #0
5355
mcr p15, 0, r3, c7, c10, 4 @ drain WB
5456
bic r0, r0, #0x0ff
@@ -60,8 +62,4 @@ ENTRY(fa_flush_kern_tlb_range)
6062
mcr p15, 0, r3, c7, c10, 4 @ data write barrier
6163
mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb)
6264
ret lr
63-
64-
__INITDATA
65-
66-
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
67-
define_tlb_functions fa, fa_tlb_flags
65+
SYM_FUNC_END(fa_flush_kern_tlb_range)

arch/arm/mm/tlb-v4.S

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
*/
1212
#include <linux/linkage.h>
1313
#include <linux/init.h>
14+
#include <linux/cfi_types.h>
1415
#include <asm/assembler.h>
1516
#include <asm/asm-offsets.h>
1617
#include <asm/tlbflush.h>
@@ -27,7 +28,7 @@
2728
* - mm - mm_struct describing address space
2829
*/
2930
.align 5
30-
ENTRY(v4_flush_user_tlb_range)
31+
SYM_TYPED_FUNC_START(v4_flush_user_tlb_range)
3132
vma_vm_mm ip, r2
3233
act_mm r3 @ get current->active_mm
3334
eors r3, ip, r3 @ == mm ?
@@ -40,6 +41,7 @@ ENTRY(v4_flush_user_tlb_range)
4041
cmp r0, r1
4142
blo 1b
4243
ret lr
44+
SYM_FUNC_END(v4_flush_user_tlb_range)
4345

4446
/*
4547
* v4_flush_kern_tlb_range(start, end)
@@ -50,10 +52,11 @@ ENTRY(v4_flush_user_tlb_range)
5052
* - start - virtual address (may not be aligned)
5153
* - end - virtual address (may not be aligned)
5254
*/
55+
#ifdef CONFIG_CFI_CLANG
56+
SYM_TYPED_FUNC_START(v4_flush_kern_tlb_range)
57+
b .v4_flush_kern_tlb_range
58+
SYM_FUNC_END(v4_flush_kern_tlb_range)
59+
#else
5360
.globl v4_flush_kern_tlb_range
5461
.equ v4_flush_kern_tlb_range, .v4_flush_kern_tlb_range
55-
56-
__INITDATA
57-
58-
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
59-
define_tlb_functions v4, v4_tlb_flags
62+
#endif

arch/arm/mm/tlb-v4wb.S

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
*/
1212
#include <linux/linkage.h>
1313
#include <linux/init.h>
14+
#include <linux/cfi_types.h>
1415
#include <asm/assembler.h>
1516
#include <asm/asm-offsets.h>
1617
#include <asm/tlbflush.h>
@@ -27,7 +28,7 @@
2728
* - mm - mm_struct describing address space
2829
*/
2930
.align 5
30-
ENTRY(v4wb_flush_user_tlb_range)
31+
SYM_TYPED_FUNC_START(v4wb_flush_user_tlb_range)
3132
vma_vm_mm ip, r2
3233
act_mm r3 @ get current->active_mm
3334
eors r3, ip, r3 @ == mm ?
@@ -43,6 +44,7 @@ ENTRY(v4wb_flush_user_tlb_range)
4344
cmp r0, r1
4445
blo 1b
4546
ret lr
47+
SYM_FUNC_END(v4wb_flush_user_tlb_range)
4648

4749
/*
4850
* v4_flush_kern_tlb_range(start, end)
@@ -53,7 +55,7 @@ ENTRY(v4wb_flush_user_tlb_range)
5355
* - start - virtual address (may not be aligned)
5456
* - end - virtual address (may not be aligned)
5557
*/
56-
ENTRY(v4wb_flush_kern_tlb_range)
58+
SYM_TYPED_FUNC_START(v4wb_flush_kern_tlb_range)
5759
mov r3, #0
5860
mcr p15, 0, r3, c7, c10, 4 @ drain WB
5961
bic r0, r0, #0x0ff
@@ -64,8 +66,4 @@ ENTRY(v4wb_flush_kern_tlb_range)
6466
cmp r0, r1
6567
blo 1b
6668
ret lr
67-
68-
__INITDATA
69-
70-
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
71-
define_tlb_functions v4wb, v4wb_tlb_flags
69+
SYM_FUNC_END(v4wb_flush_kern_tlb_range)

arch/arm/mm/tlb-v4wbi.S

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
*/
1212
#include <linux/linkage.h>
1313
#include <linux/init.h>
14+
#include <linux/cfi_types.h>
1415
#include <asm/assembler.h>
1516
#include <asm/asm-offsets.h>
1617
#include <asm/tlbflush.h>
@@ -26,7 +27,7 @@
2627
* - mm - mm_struct describing address space
2728
*/
2829
.align 5
29-
ENTRY(v4wbi_flush_user_tlb_range)
30+
SYM_TYPED_FUNC_START(v4wbi_flush_user_tlb_range)
3031
vma_vm_mm ip, r2
3132
act_mm r3 @ get current->active_mm
3233
eors r3, ip, r3 @ == mm ?
@@ -43,8 +44,9 @@ ENTRY(v4wbi_flush_user_tlb_range)
4344
cmp r0, r1
4445
blo 1b
4546
ret lr
47+
SYM_FUNC_END(v4wbi_flush_user_tlb_range)
4648

47-
ENTRY(v4wbi_flush_kern_tlb_range)
49+
SYM_TYPED_FUNC_START(v4wbi_flush_kern_tlb_range)
4850
mov r3, #0
4951
mcr p15, 0, r3, c7, c10, 4 @ drain WB
5052
bic r0, r0, #0x0ff
@@ -55,8 +57,4 @@ ENTRY(v4wbi_flush_kern_tlb_range)
5557
cmp r0, r1
5658
blo 1b
5759
ret lr
58-
59-
__INITDATA
60-
61-
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
62-
define_tlb_functions v4wbi, v4wbi_tlb_flags
60+
SYM_FUNC_END(v4wbi_flush_kern_tlb_range)

arch/arm/mm/tlb-v6.S

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
*/
1010
#include <linux/init.h>
1111
#include <linux/linkage.h>
12+
#include <linux/cfi_types.h>
1213
#include <asm/asm-offsets.h>
1314
#include <asm/assembler.h>
1415
#include <asm/page.h>
@@ -32,7 +33,7 @@
3233
* - the "Invalidate single entry" instruction will invalidate
3334
* both the I and the D TLBs on Harvard-style TLBs
3435
*/
35-
ENTRY(v6wbi_flush_user_tlb_range)
36+
SYM_TYPED_FUNC_START(v6wbi_flush_user_tlb_range)
3637
vma_vm_mm r3, r2 @ get vma->vm_mm
3738
mov ip, #0
3839
mmid r3, r3 @ get vm_mm->context.id
@@ -56,6 +57,7 @@ ENTRY(v6wbi_flush_user_tlb_range)
5657
blo 1b
5758
mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier
5859
ret lr
60+
SYM_FUNC_END(v6wbi_flush_user_tlb_range)
5961

6062
/*
6163
* v6wbi_flush_kern_tlb_range(start,end)
@@ -65,7 +67,7 @@ ENTRY(v6wbi_flush_user_tlb_range)
6567
* - start - start address (may not be aligned)
6668
* - end - end address (exclusive, may not be aligned)
6769
*/
68-
ENTRY(v6wbi_flush_kern_tlb_range)
70+
SYM_TYPED_FUNC_START(v6wbi_flush_kern_tlb_range)
6971
mov r2, #0
7072
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
7173
mov r0, r0, lsr #PAGE_SHIFT @ align address
@@ -85,8 +87,4 @@ ENTRY(v6wbi_flush_kern_tlb_range)
8587
mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
8688
mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb)
8789
ret lr
88-
89-
__INIT
90-
91-
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
92-
define_tlb_functions v6wbi, v6wbi_tlb_flags
90+
SYM_FUNC_END(v6wbi_flush_kern_tlb_range)

arch/arm/mm/tlb-v7.S

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
*/
1111
#include <linux/init.h>
1212
#include <linux/linkage.h>
13+
#include <linux/cfi_types.h>
1314
#include <asm/assembler.h>
1415
#include <asm/asm-offsets.h>
1516
#include <asm/page.h>
@@ -31,7 +32,7 @@
3132
* - the "Invalidate single entry" instruction will invalidate
3233
* both the I and the D TLBs on Harvard-style TLBs
3334
*/
34-
ENTRY(v7wbi_flush_user_tlb_range)
35+
SYM_TYPED_FUNC_START(v7wbi_flush_user_tlb_range)
3536
vma_vm_mm r3, r2 @ get vma->vm_mm
3637
mmid r3, r3 @ get vm_mm->context.id
3738
dsb ish
@@ -57,7 +58,7 @@ ENTRY(v7wbi_flush_user_tlb_range)
5758
blo 1b
5859
dsb ish
5960
ret lr
60-
ENDPROC(v7wbi_flush_user_tlb_range)
61+
SYM_FUNC_END(v7wbi_flush_user_tlb_range)
6162

6263
/*
6364
* v7wbi_flush_kern_tlb_range(start,end)
@@ -67,7 +68,7 @@ ENDPROC(v7wbi_flush_user_tlb_range)
6768
* - start - start address (may not be aligned)
6869
* - end - end address (exclusive, may not be aligned)
6970
*/
70-
ENTRY(v7wbi_flush_kern_tlb_range)
71+
SYM_TYPED_FUNC_START(v7wbi_flush_kern_tlb_range)
7172
dsb ish
7273
mov r0, r0, lsr #PAGE_SHIFT @ align address
7374
mov r1, r1, lsr #PAGE_SHIFT
@@ -86,9 +87,4 @@ ENTRY(v7wbi_flush_kern_tlb_range)
8687
dsb ish
8788
isb
8889
ret lr
89-
ENDPROC(v7wbi_flush_kern_tlb_range)
90-
91-
__INIT
92-
93-
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
94-
define_tlb_functions v7wbi, v7wbi_tlb_flags_up, flags_smp=v7wbi_tlb_flags_smp
90+
SYM_FUNC_END(v7wbi_flush_kern_tlb_range)

arch/arm/mm/tlb.c

Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
// Copyright 2024 Google LLC
3+
// Author: Ard Biesheuvel <[email protected]>
4+
5+
#include <linux/types.h>
6+
#include <asm/tlbflush.h>
7+
8+
#ifdef CONFIG_CPU_TLB_V4WT
9+
void v4_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
10+
void v4_flush_kern_tlb_range(unsigned long, unsigned long);
11+
12+
struct cpu_tlb_fns v4_tlb_fns __initconst = {
13+
.flush_user_range = v4_flush_user_tlb_range,
14+
.flush_kern_range = v4_flush_kern_tlb_range,
15+
.tlb_flags = v4_tlb_flags,
16+
};
17+
#endif
18+
19+
#ifdef CONFIG_CPU_TLB_V4WB
20+
void v4wb_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
21+
void v4wb_flush_kern_tlb_range(unsigned long, unsigned long);
22+
23+
struct cpu_tlb_fns v4wb_tlb_fns __initconst = {
24+
.flush_user_range = v4wb_flush_user_tlb_range,
25+
.flush_kern_range = v4wb_flush_kern_tlb_range,
26+
.tlb_flags = v4wb_tlb_flags,
27+
};
28+
#endif
29+
30+
#if defined(CONFIG_CPU_TLB_V4WBI) || defined(CONFIG_CPU_TLB_FEROCEON)
31+
void v4wbi_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
32+
void v4wbi_flush_kern_tlb_range(unsigned long, unsigned long);
33+
34+
struct cpu_tlb_fns v4wbi_tlb_fns __initconst = {
35+
.flush_user_range = v4wbi_flush_user_tlb_range,
36+
.flush_kern_range = v4wbi_flush_kern_tlb_range,
37+
.tlb_flags = v4wbi_tlb_flags,
38+
};
39+
#endif
40+
41+
#ifdef CONFIG_CPU_TLB_V6
42+
void v6wbi_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
43+
void v6wbi_flush_kern_tlb_range(unsigned long, unsigned long);
44+
45+
struct cpu_tlb_fns v6wbi_tlb_fns __initconst = {
46+
.flush_user_range = v6wbi_flush_user_tlb_range,
47+
.flush_kern_range = v6wbi_flush_kern_tlb_range,
48+
.tlb_flags = v6wbi_tlb_flags,
49+
};
50+
#endif
51+
52+
#ifdef CONFIG_CPU_TLB_V7
53+
void v7wbi_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
54+
void v7wbi_flush_kern_tlb_range(unsigned long, unsigned long);
55+
56+
struct cpu_tlb_fns v7wbi_tlb_fns __initconst = {
57+
.flush_user_range = v7wbi_flush_user_tlb_range,
58+
.flush_kern_range = v7wbi_flush_kern_tlb_range,
59+
.tlb_flags = IS_ENABLED(CONFIG_SMP) ? v7wbi_tlb_flags_smp
60+
: v7wbi_tlb_flags_up,
61+
};
62+
63+
#ifdef CONFIG_SMP_ON_UP
64+
/* This will be run-time patched so the offset better be right */
65+
static_assert(offsetof(struct cpu_tlb_fns, tlb_flags) == 8);
66+
67+
asm(" .pushsection \".alt.smp.init\", \"a\" \n" \
68+
" .align 2 \n" \
69+
" .long v7wbi_tlb_fns + 8 - . \n" \
70+
" .long " __stringify(v7wbi_tlb_flags_up) " \n" \
71+
" .popsection \n");
72+
#endif
73+
#endif
74+
75+
#ifdef CONFIG_CPU_TLB_FA
76+
void fa_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
77+
void fa_flush_kern_tlb_range(unsigned long, unsigned long);
78+
79+
struct cpu_tlb_fns fa_tlb_fns __initconst = {
80+
.flush_user_range = fa_flush_user_tlb_range,
81+
.flush_kern_range = fa_flush_kern_tlb_range,
82+
.tlb_flags = fa_tlb_flags,
83+
};
84+
#endif

0 commit comments

Comments
 (0)