Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -1728,6 +1728,8 @@ struct bpf_prog_aux {
struct bpf_stream stream[2];
};

#define BPF_NR_CONTEXTS 4

struct bpf_prog {
u16 pages; /* Number of allocated pages */
u16 jited:1, /* Is our filter JIT'ed? */
Expand All @@ -1754,7 +1756,7 @@ struct bpf_prog {
u8 tag[BPF_TAG_SIZE];
};
struct bpf_prog_stats __percpu *stats;
int __percpu *active;
u8 __percpu *active; /* u8[BPF_NR_CONTEXTS] for rerecursion protection */
unsigned int (*bpf_func)(const void *ctx,
const struct bpf_insn *insn);
struct bpf_prog_aux *aux; /* Auxiliary fields */
Expand Down
3 changes: 2 additions & 1 deletion kernel/bpf/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,8 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
vfree(fp);
return NULL;
}
fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
fp->active = __alloc_percpu_gfp(sizeof(u8[BPF_NR_CONTEXTS]), 8,
bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
if (!fp->active) {
vfree(fp);
kfree(aux);
Expand Down
22 changes: 18 additions & 4 deletions kernel/bpf/trampoline.c
Original file line number Diff line number Diff line change
Expand Up @@ -899,11 +899,15 @@ static __always_inline u64 notrace bpf_prog_start_time(void)
static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
__acquires(RCU)
{
u8 rctx = interrupt_context_level();
u8 *active;

rcu_read_lock_dont_migrate();

run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);

if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
active = this_cpu_ptr(prog->active);
if (unlikely(++active[rctx] != 1)) {
bpf_prog_inc_misses_counter(prog);
if (prog->aux->recursion_detected)
prog->aux->recursion_detected(prog);
Expand Down Expand Up @@ -944,10 +948,13 @@ static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
struct bpf_tramp_run_ctx *run_ctx)
__releases(RCU)
{
u8 rctx = interrupt_context_level();
u8 *active = this_cpu_ptr(prog->active);

bpf_reset_run_ctx(run_ctx->saved_run_ctx);

update_prog_stats(prog, start);
this_cpu_dec(*(prog->active));
active[rctx]--;
rcu_read_unlock_migrate();
}

Expand Down Expand Up @@ -977,13 +984,17 @@ static void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
struct bpf_tramp_run_ctx *run_ctx)
{
u8 rctx = interrupt_context_level();
u8 *active;

rcu_read_lock_trace();
migrate_disable();
might_fault();

run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);

if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
active = this_cpu_ptr(prog->active);
if (unlikely(++active[rctx] != 1)) {
bpf_prog_inc_misses_counter(prog);
if (prog->aux->recursion_detected)
prog->aux->recursion_detected(prog);
Expand All @@ -995,10 +1006,13 @@ u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
struct bpf_tramp_run_ctx *run_ctx)
{
u8 rctx = interrupt_context_level();
u8 *active = this_cpu_ptr(prog->active);

bpf_reset_run_ctx(run_ctx->saved_run_ctx);

update_prog_stats(prog, start);
this_cpu_dec(*(prog->active));
active[rctx]--;
migrate_enable();
rcu_read_unlock_trace();
}
Expand Down
11 changes: 7 additions & 4 deletions kernel/trace/bpf_trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -2059,14 +2059,18 @@ static __always_inline
void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
{
struct bpf_prog *prog = link->link.prog;
u8 rctx = interrupt_context_level();
struct bpf_run_ctx *old_run_ctx;
struct bpf_trace_run_ctx run_ctx;
u8 *active;

cant_sleep();
if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
active = this_cpu_ptr(prog->active);
if (unlikely(active[rctx])) {
bpf_prog_inc_misses_counter(prog);
goto out;
return;
}
active[rctx]++;

run_ctx.bpf_cookie = link->cookie;
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
Expand All @@ -2076,8 +2080,7 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
rcu_read_unlock();

bpf_reset_run_ctx(old_run_ctx);
out:
this_cpu_dec(*(prog->active));
active[rctx]--;
}

#define UNPACK(...) __VA_ARGS__
Expand Down
Loading