3030#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
3131#define TCCNT_PTR (MAX_BPF_JIT_REG + 2)
3232#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
33+ #define PRIVATE_SP (MAX_BPF_JIT_REG + 4)
3334#define ARENA_VM_START (MAX_BPF_JIT_REG + 5)
3435
3536#define check_imm (bits , imm ) do { \
@@ -68,6 +69,8 @@ static const int bpf2a64[] = {
6869 [TCCNT_PTR ] = A64_R (26 ),
6970 /* temporary register for blinding constants */
7071 [BPF_REG_AX ] = A64_R (9 ),
72+ /* callee saved register for private stack pointer */
73+ [PRIVATE_SP ] = A64_R (27 ),
7174 /* callee saved register for kern_vm_start address */
7275 [ARENA_VM_START ] = A64_R (28 ),
7376};
@@ -86,6 +89,7 @@ struct jit_ctx {
8689 u64 user_vm_start ;
8790 u64 arena_vm_start ;
8891 bool fp_used ;
92+ bool priv_sp_used ;
8993 bool write ;
9094};
9195
@@ -98,6 +102,10 @@ struct bpf_plt {
98102#define PLT_TARGET_SIZE sizeof_field(struct bpf_plt, target)
99103#define PLT_TARGET_OFFSET offsetof(struct bpf_plt, target)
100104
105+ /* Memory size/value to protect private stack overflow/underflow */
106+ #define PRIV_STACK_GUARD_SZ 16
107+ #define PRIV_STACK_GUARD_VAL 0xEB9F12345678eb9fULL
108+
101109static inline void emit (const u32 insn , struct jit_ctx * ctx )
102110{
103111 if (ctx -> image != NULL && ctx -> write )
@@ -387,8 +395,11 @@ static void find_used_callee_regs(struct jit_ctx *ctx)
387395 if (reg_used & 8 )
388396 ctx -> used_callee_reg [i ++ ] = bpf2a64 [BPF_REG_9 ];
389397
390- if (reg_used & 16 )
398+ if (reg_used & 16 ) {
391399 ctx -> used_callee_reg [i ++ ] = bpf2a64 [BPF_REG_FP ];
400+ if (ctx -> priv_sp_used )
401+ ctx -> used_callee_reg [i ++ ] = bpf2a64 [PRIVATE_SP ];
402+ }
392403
393404 if (ctx -> arena_vm_start )
394405 ctx -> used_callee_reg [i ++ ] = bpf2a64 [ARENA_VM_START ];
@@ -462,6 +473,19 @@ static void pop_callee_regs(struct jit_ctx *ctx)
462473 }
463474}
464475
476+ static void emit_percpu_ptr (const u8 dst_reg , void __percpu * ptr ,
477+ struct jit_ctx * ctx )
478+ {
479+ const u8 tmp = bpf2a64 [TMP_REG_1 ];
480+
481+ emit_a64_mov_i64 (dst_reg , (__force const u64 )ptr , ctx );
482+ if (cpus_have_cap (ARM64_HAS_VIRT_HOST_EXTN ))
483+ emit (A64_MRS_TPIDR_EL2 (tmp ), ctx );
484+ else
485+ emit (A64_MRS_TPIDR_EL1 (tmp ), ctx );
486+ emit (A64_ADD (1 , dst_reg , dst_reg , tmp ), ctx );
487+ }
488+
465489#define BTI_INSNS (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) ? 1 : 0)
466490#define PAC_INSNS (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) ? 1 : 0)
467491
@@ -477,6 +501,8 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
477501 const bool is_main_prog = !bpf_is_subprog (prog );
478502 const u8 fp = bpf2a64 [BPF_REG_FP ];
479503 const u8 arena_vm_base = bpf2a64 [ARENA_VM_START ];
504+ const u8 priv_sp = bpf2a64 [PRIVATE_SP ];
505+ void __percpu * priv_stack_ptr ;
480506 const int idx0 = ctx -> idx ;
481507 int cur_offset ;
482508
@@ -552,15 +578,23 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
552578 emit (A64_SUB_I (1 , A64_SP , A64_FP , 96 ), ctx );
553579 }
554580
555- if (ctx -> fp_used )
556- /* Set up BPF prog stack base register */
557- emit (A64_MOV (1 , fp , A64_SP ), ctx );
558-
559581 /* Stack must be multiples of 16B */
560582 ctx -> stack_size = round_up (prog -> aux -> stack_depth , 16 );
561583
584+ if (ctx -> fp_used ) {
585+ if (ctx -> priv_sp_used ) {
586+ /* Set up private stack pointer */
587+ priv_stack_ptr = prog -> aux -> priv_stack_ptr + PRIV_STACK_GUARD_SZ ;
588+ emit_percpu_ptr (priv_sp , priv_stack_ptr , ctx );
589+ emit (A64_ADD_I (1 , fp , priv_sp , ctx -> stack_size ), ctx );
590+ } else {
591+ /* Set up BPF prog stack base register */
592+ emit (A64_MOV (1 , fp , A64_SP ), ctx );
593+ }
594+ }
595+
562596 /* Set up function call stack */
563- if (ctx -> stack_size )
597+ if (ctx -> stack_size && ! ctx -> priv_sp_used )
564598 emit (A64_SUB_I (1 , A64_SP , A64_SP , ctx -> stack_size ), ctx );
565599
566600 if (ctx -> arena_vm_start )
@@ -624,7 +658,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
624658 emit (A64_STR64I (tcc , ptr , 0 ), ctx );
625659
626660 /* restore SP */
627- if (ctx -> stack_size )
661+ if (ctx -> stack_size && ! ctx -> priv_sp_used )
628662 emit (A64_ADD_I (1 , A64_SP , A64_SP , ctx -> stack_size ), ctx );
629663
630664 pop_callee_regs (ctx );
@@ -992,7 +1026,7 @@ static void build_epilogue(struct jit_ctx *ctx, bool was_classic)
9921026 const u8 ptr = bpf2a64 [TCCNT_PTR ];
9931027
9941028 /* We're done with BPF stack */
995- if (ctx -> stack_size )
1029+ if (ctx -> stack_size && ! ctx -> priv_sp_used )
9961030 emit (A64_ADD_I (1 , A64_SP , A64_SP , ctx -> stack_size ), ctx );
9971031
9981032 pop_callee_regs (ctx );
@@ -1121,6 +1155,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
11211155 const u8 tmp2 = bpf2a64 [TMP_REG_2 ];
11221156 const u8 fp = bpf2a64 [BPF_REG_FP ];
11231157 const u8 arena_vm_base = bpf2a64 [ARENA_VM_START ];
1158+ const u8 priv_sp = bpf2a64 [PRIVATE_SP ];
11241159 const s16 off = insn -> off ;
11251160 const s32 imm = insn -> imm ;
11261161 const int i = insn - ctx -> prog -> insnsi ;
@@ -1565,7 +1600,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
15651600 src = tmp2 ;
15661601 }
15671602 if (src == fp ) {
1568- src_adj = A64_SP ;
1603+ src_adj = ctx -> priv_sp_used ? priv_sp : A64_SP ;
15691604 off_adj = off + ctx -> stack_size ;
15701605 } else {
15711606 src_adj = src ;
@@ -1655,7 +1690,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
16551690 dst = tmp2 ;
16561691 }
16571692 if (dst == fp ) {
1658- dst_adj = A64_SP ;
1693+ dst_adj = ctx -> priv_sp_used ? priv_sp : A64_SP ;
16591694 off_adj = off + ctx -> stack_size ;
16601695 } else {
16611696 dst_adj = dst ;
@@ -1717,7 +1752,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
17171752 dst = tmp2 ;
17181753 }
17191754 if (dst == fp ) {
1720- dst_adj = A64_SP ;
1755+ dst_adj = ctx -> priv_sp_used ? priv_sp : A64_SP ;
17211756 off_adj = off + ctx -> stack_size ;
17221757 } else {
17231758 dst_adj = dst ;
@@ -1860,6 +1895,39 @@ static inline void bpf_flush_icache(void *start, void *end)
18601895 flush_icache_range ((unsigned long )start , (unsigned long )end );
18611896}
18621897
1898+ static void priv_stack_init_guard (void __percpu * priv_stack_ptr , int alloc_size )
1899+ {
1900+ int cpu , underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ ) >> 3 ;
1901+ u64 * stack_ptr ;
1902+
1903+ for_each_possible_cpu (cpu ) {
1904+ stack_ptr = per_cpu_ptr (priv_stack_ptr , cpu );
1905+ stack_ptr [0 ] = PRIV_STACK_GUARD_VAL ;
1906+ stack_ptr [1 ] = PRIV_STACK_GUARD_VAL ;
1907+ stack_ptr [underflow_idx ] = PRIV_STACK_GUARD_VAL ;
1908+ stack_ptr [underflow_idx + 1 ] = PRIV_STACK_GUARD_VAL ;
1909+ }
1910+ }
1911+
1912+ static void priv_stack_check_guard (void __percpu * priv_stack_ptr , int alloc_size ,
1913+ struct bpf_prog * prog )
1914+ {
1915+ int cpu , underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ ) >> 3 ;
1916+ u64 * stack_ptr ;
1917+
1918+ for_each_possible_cpu (cpu ) {
1919+ stack_ptr = per_cpu_ptr (priv_stack_ptr , cpu );
1920+ if (stack_ptr [0 ] != PRIV_STACK_GUARD_VAL ||
1921+ stack_ptr [1 ] != PRIV_STACK_GUARD_VAL ||
1922+ stack_ptr [underflow_idx ] != PRIV_STACK_GUARD_VAL ||
1923+ stack_ptr [underflow_idx + 1 ] != PRIV_STACK_GUARD_VAL ) {
1924+ pr_err ("BPF private stack overflow/underflow detected for prog %sx\n" ,
1925+ bpf_jit_get_prog_name (prog ));
1926+ break ;
1927+ }
1928+ }
1929+ }
1930+
18631931struct arm64_jit_data {
18641932 struct bpf_binary_header * header ;
18651933 u8 * ro_image ;
@@ -1872,9 +1940,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
18721940 int image_size , prog_size , extable_size , extable_align , extable_offset ;
18731941 struct bpf_prog * tmp , * orig_prog = prog ;
18741942 struct bpf_binary_header * header ;
1875- struct bpf_binary_header * ro_header ;
1943+ struct bpf_binary_header * ro_header = NULL ;
18761944 struct arm64_jit_data * jit_data ;
1945+ void __percpu * priv_stack_ptr = NULL ;
18771946 bool was_classic = bpf_prog_was_classic (prog );
1947+ int priv_stack_alloc_sz ;
18781948 bool tmp_blinded = false;
18791949 bool extra_pass = false;
18801950 struct jit_ctx ctx ;
@@ -1906,6 +1976,23 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
19061976 }
19071977 prog -> aux -> jit_data = jit_data ;
19081978 }
1979+ priv_stack_ptr = prog -> aux -> priv_stack_ptr ;
1980+ if (!priv_stack_ptr && prog -> aux -> jits_use_priv_stack ) {
1981+ /* Allocate actual private stack size with verifier-calculated
1982+ * stack size plus two memory guards to protect overflow and
1983+ * underflow.
1984+ */
1985+ priv_stack_alloc_sz = round_up (prog -> aux -> stack_depth , 16 ) +
1986+ 2 * PRIV_STACK_GUARD_SZ ;
1987+ priv_stack_ptr = __alloc_percpu_gfp (priv_stack_alloc_sz , 16 , GFP_KERNEL );
1988+ if (!priv_stack_ptr ) {
1989+ prog = orig_prog ;
1990+ goto out_priv_stack ;
1991+ }
1992+
1993+ priv_stack_init_guard (priv_stack_ptr , priv_stack_alloc_sz );
1994+ prog -> aux -> priv_stack_ptr = priv_stack_ptr ;
1995+ }
19091996 if (jit_data -> ctx .offset ) {
19101997 ctx = jit_data -> ctx ;
19111998 ro_image_ptr = jit_data -> ro_image ;
@@ -1929,6 +2016,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
19292016 ctx .user_vm_start = bpf_arena_get_user_vm_start (prog -> aux -> arena );
19302017 ctx .arena_vm_start = bpf_arena_get_kern_vm_start (prog -> aux -> arena );
19312018
2019+ if (priv_stack_ptr )
2020+ ctx .priv_sp_used = true;
2021+
19322022 /* Pass 1: Estimate the maximum image size.
19332023 *
19342024 * BPF line info needs ctx->offset[i] to be the offset of
@@ -2068,7 +2158,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
20682158 ctx .offset [i ] *= AARCH64_INSN_SIZE ;
20692159 bpf_prog_fill_jited_linfo (prog , ctx .offset + 1 );
20702160out_off :
2161+ if (!ro_header && priv_stack_ptr ) {
2162+ free_percpu (priv_stack_ptr );
2163+ prog -> aux -> priv_stack_ptr = NULL ;
2164+ }
20712165 kvfree (ctx .offset );
2166+ out_priv_stack :
20722167 kfree (jit_data );
20732168 prog -> aux -> jit_data = NULL ;
20742169 }
@@ -2087,6 +2182,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
20872182 goto out_off ;
20882183}
20892184
2185+ bool bpf_jit_supports_private_stack (void )
2186+ {
2187+ return true;
2188+ }
2189+
20902190bool bpf_jit_supports_kfunc_call (void )
20912191{
20922192 return true;
@@ -2932,6 +3032,8 @@ void bpf_jit_free(struct bpf_prog *prog)
29323032 if (prog -> jited ) {
29333033 struct arm64_jit_data * jit_data = prog -> aux -> jit_data ;
29343034 struct bpf_binary_header * hdr ;
3035+ void __percpu * priv_stack_ptr ;
3036+ int priv_stack_alloc_sz ;
29353037
29363038 /*
29373039 * If we fail the final pass of JIT (from jit_subprogs),
@@ -2945,6 +3047,13 @@ void bpf_jit_free(struct bpf_prog *prog)
29453047 }
29463048 hdr = bpf_jit_binary_pack_hdr (prog );
29473049 bpf_jit_binary_pack_free (hdr , NULL );
3050+ priv_stack_ptr = prog -> aux -> priv_stack_ptr ;
3051+ if (priv_stack_ptr ) {
3052+ priv_stack_alloc_sz = round_up (prog -> aux -> stack_depth , 16 ) +
3053+ 2 * PRIV_STACK_GUARD_SZ ;
3054+ priv_stack_check_guard (priv_stack_ptr , priv_stack_alloc_sz , prog );
3055+ free_percpu (prog -> aux -> priv_stack_ptr );
3056+ }
29483057 WARN_ON_ONCE (!bpf_prog_kallsyms_verify_off (prog ));
29493058 }
29503059
0 commit comments