Skip to content

Commit 4c03fe1

Browse files
khueyIngo Molnar
authored andcommitted
perf/bpf: Reorder bpf_overflow_handler() ahead of __perf_event_overflow()
This will allow __perf_event_overflow() to call bpf_overflow_handler(). Signed-off-by: Kyle Huey <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent acf68d9 commit 4c03fe1

File tree

1 file changed

+92
-91
lines changed

1 file changed

+92
-91
lines changed

kernel/events/core.c

Lines changed: 92 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -9563,6 +9563,98 @@ static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *r
95639563
return true;
95649564
}
95659565

9566+
#ifdef CONFIG_BPF_SYSCALL
9567+
static void bpf_overflow_handler(struct perf_event *event,
9568+
struct perf_sample_data *data,
9569+
struct pt_regs *regs)
9570+
{
9571+
struct bpf_perf_event_data_kern ctx = {
9572+
.data = data,
9573+
.event = event,
9574+
};
9575+
struct bpf_prog *prog;
9576+
int ret = 0;
9577+
9578+
ctx.regs = perf_arch_bpf_user_pt_regs(regs);
9579+
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
9580+
goto out;
9581+
rcu_read_lock();
9582+
prog = READ_ONCE(event->prog);
9583+
if (prog) {
9584+
perf_prepare_sample(data, event, regs);
9585+
ret = bpf_prog_run(prog, &ctx);
9586+
}
9587+
rcu_read_unlock();
9588+
out:
9589+
__this_cpu_dec(bpf_prog_active);
9590+
if (!ret)
9591+
return;
9592+
9593+
event->orig_overflow_handler(event, data, regs);
9594+
}
9595+
9596+
static int perf_event_set_bpf_handler(struct perf_event *event,
9597+
struct bpf_prog *prog,
9598+
u64 bpf_cookie)
9599+
{
9600+
if (event->overflow_handler_context)
9601+
/* hw breakpoint or kernel counter */
9602+
return -EINVAL;
9603+
9604+
if (event->prog)
9605+
return -EEXIST;
9606+
9607+
if (prog->type != BPF_PROG_TYPE_PERF_EVENT)
9608+
return -EINVAL;
9609+
9610+
if (event->attr.precise_ip &&
9611+
prog->call_get_stack &&
9612+
(!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) ||
9613+
event->attr.exclude_callchain_kernel ||
9614+
event->attr.exclude_callchain_user)) {
9615+
/*
9616+
* On perf_event with precise_ip, calling bpf_get_stack()
9617+
* may trigger unwinder warnings and occasional crashes.
9618+
* bpf_get_[stack|stackid] works around this issue by using
9619+
* callchain attached to perf_sample_data. If the
9620+
* perf_event does not full (kernel and user) callchain
9621+
* attached to perf_sample_data, do not allow attaching BPF
9622+
* program that calls bpf_get_[stack|stackid].
9623+
*/
9624+
return -EPROTO;
9625+
}
9626+
9627+
event->prog = prog;
9628+
event->bpf_cookie = bpf_cookie;
9629+
event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
9630+
WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
9631+
return 0;
9632+
}
9633+
9634+
static void perf_event_free_bpf_handler(struct perf_event *event)
9635+
{
9636+
struct bpf_prog *prog = event->prog;
9637+
9638+
if (!prog)
9639+
return;
9640+
9641+
WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler);
9642+
event->prog = NULL;
9643+
bpf_prog_put(prog);
9644+
}
9645+
#else
9646+
static int perf_event_set_bpf_handler(struct perf_event *event,
9647+
struct bpf_prog *prog,
9648+
u64 bpf_cookie)
9649+
{
9650+
return -EOPNOTSUPP;
9651+
}
9652+
9653+
static void perf_event_free_bpf_handler(struct perf_event *event)
9654+
{
9655+
}
9656+
#endif
9657+
95669658
/*
95679659
* Generic event overflow handling, sampling.
95689660
*/
@@ -10441,97 +10533,6 @@ static void perf_event_free_filter(struct perf_event *event)
1044110533
ftrace_profile_free_filter(event);
1044210534
}
1044310535

10444-
#ifdef CONFIG_BPF_SYSCALL
10445-
static void bpf_overflow_handler(struct perf_event *event,
10446-
struct perf_sample_data *data,
10447-
struct pt_regs *regs)
10448-
{
10449-
struct bpf_perf_event_data_kern ctx = {
10450-
.data = data,
10451-
.event = event,
10452-
};
10453-
struct bpf_prog *prog;
10454-
int ret = 0;
10455-
10456-
ctx.regs = perf_arch_bpf_user_pt_regs(regs);
10457-
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
10458-
goto out;
10459-
rcu_read_lock();
10460-
prog = READ_ONCE(event->prog);
10461-
if (prog) {
10462-
perf_prepare_sample(data, event, regs);
10463-
ret = bpf_prog_run(prog, &ctx);
10464-
}
10465-
rcu_read_unlock();
10466-
out:
10467-
__this_cpu_dec(bpf_prog_active);
10468-
if (!ret)
10469-
return;
10470-
10471-
event->orig_overflow_handler(event, data, regs);
10472-
}
10473-
10474-
static int perf_event_set_bpf_handler(struct perf_event *event,
10475-
struct bpf_prog *prog,
10476-
u64 bpf_cookie)
10477-
{
10478-
if (event->overflow_handler_context)
10479-
/* hw breakpoint or kernel counter */
10480-
return -EINVAL;
10481-
10482-
if (event->prog)
10483-
return -EEXIST;
10484-
10485-
if (prog->type != BPF_PROG_TYPE_PERF_EVENT)
10486-
return -EINVAL;
10487-
10488-
if (event->attr.precise_ip &&
10489-
prog->call_get_stack &&
10490-
(!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) ||
10491-
event->attr.exclude_callchain_kernel ||
10492-
event->attr.exclude_callchain_user)) {
10493-
/*
10494-
* On perf_event with precise_ip, calling bpf_get_stack()
10495-
* may trigger unwinder warnings and occasional crashes.
10496-
* bpf_get_[stack|stackid] works around this issue by using
10497-
* callchain attached to perf_sample_data. If the
10498-
* perf_event does not full (kernel and user) callchain
10499-
* attached to perf_sample_data, do not allow attaching BPF
10500-
* program that calls bpf_get_[stack|stackid].
10501-
*/
10502-
return -EPROTO;
10503-
}
10504-
10505-
event->prog = prog;
10506-
event->bpf_cookie = bpf_cookie;
10507-
event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
10508-
WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
10509-
return 0;
10510-
}
10511-
10512-
static void perf_event_free_bpf_handler(struct perf_event *event)
10513-
{
10514-
struct bpf_prog *prog = event->prog;
10515-
10516-
if (!prog)
10517-
return;
10518-
10519-
WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler);
10520-
event->prog = NULL;
10521-
bpf_prog_put(prog);
10522-
}
10523-
#else
10524-
static int perf_event_set_bpf_handler(struct perf_event *event,
10525-
struct bpf_prog *prog,
10526-
u64 bpf_cookie)
10527-
{
10528-
return -EOPNOTSUPP;
10529-
}
10530-
static void perf_event_free_bpf_handler(struct perf_event *event)
10531-
{
10532-
}
10533-
#endif
10534-
1053510536
/*
1053610537
* returns true if the event is a tracepoint, or a kprobe/upprobe created
1053710538
* with perf_event_open()

0 commit comments

Comments
 (0)