Skip to content

Commit 1c28618

Browse files
puranjaymohanKernel Patches Daemon
authored andcommitted
bpf: Optimize recursion detection for arm64
BPF programs detect recursion by a per-cpu active flag in struct bpf_prog. This flag is set/unset in the trampoline using atomic operations to prevent inter-context recursion. Some arm64 platforms have slow per-CPU atomic operations, for example, the Neoverse V2. This commit therefore changes the recursion detection mechanism to allow four levels of recursion (normal -> softirq -> hardirq -> NMI). With allowing limited recursion, we can now stop using atomic operations. This approach is similar to get_recursion_context() in perf. Change active to a per-cpu array of four u8 values, one for each context and use non-atomic increment/decrement on them. This improves the performance on ARM64 (64-CPU Neoverse-N1): +----------------+-------------------+-------------------+---------+ | Benchmark | Base run | Patched run | Δ (%) | +----------------+-------------------+-------------------+---------+ | fentry | 3.694 ± 0.003M/s | 3.828 ± 0.007M/s | +3.63% | | fexit | 1.389 ± 0.006M/s | 1.406 ± 0.003M/s | +1.22% | | fmodret | 1.366 ± 0.011M/s | 1.398 ± 0.002M/s | +2.34% | | rawtp | 3.453 ± 0.026M/s | 3.714 ± 0.003M/s | +7.56% | | tp | 2.596 ± 0.005M/s | 2.699 ± 0.006M/s | +3.97% | +----------------+-------------------+-------------------+---------+ Benchmarked using: tools/testing/selftests/bpf/benchs/run_bench_trigger.sh Signed-off-by: Puranjay Mohan <[email protected]>
1 parent 634398c commit 1c28618

File tree

4 files changed

+30
-10
lines changed

4 files changed

+30
-10
lines changed

include/linux/bpf.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1728,6 +1728,8 @@ struct bpf_prog_aux {
17281728
struct bpf_stream stream[2];
17291729
};
17301730

1731+
#define BPF_NR_CONTEXTS 4
1732+
17311733
struct bpf_prog {
17321734
u16 pages; /* Number of allocated pages */
17331735
u16 jited:1, /* Is our filter JIT'ed? */
@@ -1754,7 +1756,7 @@ struct bpf_prog {
17541756
u8 tag[BPF_TAG_SIZE];
17551757
};
17561758
struct bpf_prog_stats __percpu *stats;
1757-
int __percpu *active;
1759+
u8 __percpu *active; /* u8[BPF_NR_CONTEXTS] for rerecursion protection */
17581760
unsigned int (*bpf_func)(const void *ctx,
17591761
const struct bpf_insn *insn);
17601762
struct bpf_prog_aux *aux; /* Auxiliary fields */

kernel/bpf/core.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,8 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
112112
vfree(fp);
113113
return NULL;
114114
}
115-
fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
115+
fp->active = __alloc_percpu_gfp(sizeof(u8[BPF_NR_CONTEXTS]), 8,
116+
bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
116117
if (!fp->active) {
117118
vfree(fp);
118119
kfree(aux);

kernel/bpf/trampoline.c

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -899,11 +899,15 @@ static __always_inline u64 notrace bpf_prog_start_time(void)
899899
static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
900900
__acquires(RCU)
901901
{
902+
u8 rctx = interrupt_context_level();
903+
u8 *active;
904+
902905
rcu_read_lock_dont_migrate();
903906

904907
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
905908

906-
if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
909+
active = this_cpu_ptr(prog->active);
910+
if (unlikely(++active[rctx] != 1)) {
907911
bpf_prog_inc_misses_counter(prog);
908912
if (prog->aux->recursion_detected)
909913
prog->aux->recursion_detected(prog);
@@ -944,10 +948,13 @@ static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
944948
struct bpf_tramp_run_ctx *run_ctx)
945949
__releases(RCU)
946950
{
951+
u8 rctx = interrupt_context_level();
952+
u8 *active = this_cpu_ptr(prog->active);
953+
947954
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
948955

949956
update_prog_stats(prog, start);
950-
this_cpu_dec(*(prog->active));
957+
active[rctx]--;
951958
rcu_read_unlock_migrate();
952959
}
953960

@@ -977,13 +984,17 @@ static void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
977984
u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
978985
struct bpf_tramp_run_ctx *run_ctx)
979986
{
987+
u8 rctx = interrupt_context_level();
988+
u8 *active;
989+
980990
rcu_read_lock_trace();
981991
migrate_disable();
982992
might_fault();
983993

984994
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
985995

986-
if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
996+
active = this_cpu_ptr(prog->active);
997+
if (unlikely(++active[rctx] != 1)) {
987998
bpf_prog_inc_misses_counter(prog);
988999
if (prog->aux->recursion_detected)
9891000
prog->aux->recursion_detected(prog);
@@ -995,10 +1006,13 @@ u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
9951006
void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
9961007
struct bpf_tramp_run_ctx *run_ctx)
9971008
{
1009+
u8 rctx = interrupt_context_level();
1010+
u8 *active = this_cpu_ptr(prog->active);
1011+
9981012
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
9991013

10001014
update_prog_stats(prog, start);
1001-
this_cpu_dec(*(prog->active));
1015+
active[rctx]--;
10021016
migrate_enable();
10031017
rcu_read_unlock_trace();
10041018
}

kernel/trace/bpf_trace.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2059,14 +2059,18 @@ static __always_inline
20592059
void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
20602060
{
20612061
struct bpf_prog *prog = link->link.prog;
2062+
u8 rctx = interrupt_context_level();
20622063
struct bpf_run_ctx *old_run_ctx;
20632064
struct bpf_trace_run_ctx run_ctx;
2065+
u8 *active;
20642066

20652067
cant_sleep();
2066-
if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2068+
active = this_cpu_ptr(prog->active);
2069+
if (unlikely(active[rctx])) {
20672070
bpf_prog_inc_misses_counter(prog);
2068-
goto out;
2071+
return;
20692072
}
2073+
active[rctx]++;
20702074

20712075
run_ctx.bpf_cookie = link->cookie;
20722076
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
@@ -2076,8 +2080,7 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
20762080
rcu_read_unlock();
20772081

20782082
bpf_reset_run_ctx(old_run_ctx);
2079-
out:
2080-
this_cpu_dec(*(prog->active));
2083+
active[rctx]--;
20812084
}
20822085

20832086
#define UNPACK(...) __VA_ARGS__

0 commit comments

Comments
 (0)