From 3b5554d657de70fdc197529f019ecd6319112e76 Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Wed, 29 Oct 2025 00:25:01 +0800 Subject: [PATCH 1/2] perf: Refactor get_perf_callchain From BPF stack map, we want to use our own buffers to avoid unnecessary copy and ensure that the buffer will not be overwritten by other preemptive tasks. Peter suggested provide more flexible stack-sampling APIs, which can be used in BPF, and we can still use the perf callchain entry with the help of these APIs. The next patch will modify the BPF part. Signed-off-by: Peter Zijlstra Signed-off-by: Tao Chen --- include/linux/perf_event.h | 11 +++++- kernel/bpf/stackmap.c | 4 +- kernel/events/callchain.c | 75 ++++++++++++++++++++++++-------------- kernel/events/core.c | 2 +- 4 files changed, 61 insertions(+), 31 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index fd1d91017b99b..14a382cad1dd4 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -67,6 +67,7 @@ struct perf_callchain_entry_ctx { u32 nr; short contexts; bool contexts_maxed; + bool add_mark; }; typedef unsigned long (*perf_copy_f)(void *dst, const void *src, @@ -1718,9 +1719,17 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); + +extern void __init_perf_callchain_ctx(struct perf_callchain_entry_ctx *ctx, + struct perf_callchain_entry *entry, + u32 max_stack, bool add_mark); + +extern void __get_perf_callchain_kernel(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs); +extern void __get_perf_callchain_user(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs); + extern struct perf_callchain_entry * get_perf_callchain(struct pt_regs *regs, bool kernel, bool user, - u32 max_stack, bool crosstask, bool add_mark); + u32 max_stack, bool crosstask); extern int get_callchain_buffers(int max_stack); extern void put_callchain_buffers(void); extern struct perf_callchain_entry *get_callchain_entry(int *rctx); diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 4d53cdd1374cf..e28b35c7e0b6c 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -315,7 +315,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, max_depth = sysctl_perf_event_max_stack; trace = get_perf_callchain(regs, kernel, user, max_depth, - false, false); + false); if (unlikely(!trace)) /* couldn't fetch the stack trace */ @@ -452,7 +452,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, trace = get_callchain_entry_for_task(task, max_depth); else trace = get_perf_callchain(regs, kernel, user, max_depth, - crosstask, false); + crosstask); if (unlikely(!trace) || trace->nr < skip) { if (may_fault) diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 808c0d7a31faf..2c36e49062520 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -216,13 +216,54 @@ static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entr #endif } +void __init_perf_callchain_ctx(struct perf_callchain_entry_ctx *ctx, + struct perf_callchain_entry *entry, + u32 max_stack, bool add_mark) + +{ + ctx->entry = entry; + ctx->max_stack = max_stack; + ctx->nr = entry->nr = 0; + ctx->contexts = 0; + ctx->contexts_maxed = false; + ctx->add_mark = add_mark; +} + +void __get_perf_callchain_kernel(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs) +{ + if (user_mode(regs)) + return; + + if (ctx->add_mark) + perf_callchain_store_context(ctx, PERF_CONTEXT_KERNEL); + perf_callchain_kernel(ctx, regs); +} + +void __get_perf_callchain_user(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs) +{ + int start_entry_idx; + + if (!user_mode(regs)) { + if (current->flags & (PF_KTHREAD | PF_USER_WORKER)) + return; + regs = task_pt_regs(current); + } + + if (ctx->add_mark) + perf_callchain_store_context(ctx, PERF_CONTEXT_USER); + + start_entry_idx = ctx->nr; + perf_callchain_user(ctx, regs); + fixup_uretprobe_trampoline_entries(ctx->entry, start_entry_idx); +} + struct perf_callchain_entry * get_perf_callchain(struct pt_regs *regs, bool kernel, bool user, - u32 max_stack, bool crosstask, bool add_mark) + u32 max_stack, bool crosstask) { struct perf_callchain_entry *entry; struct perf_callchain_entry_ctx ctx; - int rctx, start_entry_idx; + int rctx; /* crosstask is not supported for user stacks */ if (crosstask && user && !kernel) @@ -232,34 +273,14 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user, if (!entry) return NULL; - ctx.entry = entry; - ctx.max_stack = max_stack; - ctx.nr = entry->nr = 0; - ctx.contexts = 0; - ctx.contexts_maxed = false; + __init_perf_callchain_ctx(&ctx, entry, max_stack, true); - if (kernel && !user_mode(regs)) { - if (add_mark) - perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL); - perf_callchain_kernel(&ctx, regs); - } - - if (user && !crosstask) { - if (!user_mode(regs)) { - if (current->flags & (PF_KTHREAD | PF_USER_WORKER)) - goto exit_put; - regs = task_pt_regs(current); - } + if (kernel) + __get_perf_callchain_kernel(&ctx, regs); - if (add_mark) - perf_callchain_store_context(&ctx, PERF_CONTEXT_USER); - - start_entry_idx = entry->nr; - perf_callchain_user(&ctx, regs); - fixup_uretprobe_trampoline_entries(entry, start_entry_idx); - } + if (user && !crosstask) + __get_perf_callchain_user(&ctx, regs); -exit_put: put_callchain_entry(rctx); return entry; diff --git a/kernel/events/core.c b/kernel/events/core.c index 7541f6f85fcb0..eb0f110593d9a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -8218,7 +8218,7 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs) return &__empty_callchain; callchain = get_perf_callchain(regs, kernel, user, - max_stack, crosstask, true); + max_stack, crosstask); return callchain ?: &__empty_callchain; } From 9b46ee0e02088da5cee45769a37ef5ce0b3711f6 Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Wed, 29 Oct 2025 00:25:02 +0800 Subject: [PATCH 2/2] bpf: Hold the perf callchain entry until used completely As Alexei noted, get_perf_callchain() return values may be reused if a task is preempted after the BPF program enters migrate disable mode. The perf_callchain_entres has a small stack of entries, and we can reuse it as follows: 1. get the perf callchain entry 2. BPF use... 3. put the perf callchain entry Signed-off-by: Tao Chen --- kernel/bpf/stackmap.c | 61 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 48 insertions(+), 13 deletions(-) diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index e28b35c7e0b6c..70d382490830d 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -188,13 +188,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, } static struct perf_callchain_entry * -get_callchain_entry_for_task(struct task_struct *task, u32 max_depth) +get_callchain_entry_for_task(int *rctx, struct task_struct *task, u32 max_depth) { #ifdef CONFIG_STACKTRACE struct perf_callchain_entry *entry; - int rctx; - entry = get_callchain_entry(&rctx); + entry = get_callchain_entry(rctx); if (!entry) return NULL; @@ -216,8 +215,6 @@ get_callchain_entry_for_task(struct task_struct *task, u32 max_depth) to[i] = (u64)(from[i]); } - put_callchain_entry(rctx); - return entry; #else /* CONFIG_STACKTRACE */ return NULL; @@ -297,6 +294,31 @@ static long __bpf_get_stackid(struct bpf_map *map, return id; } +static struct perf_callchain_entry * +bpf_get_perf_callchain(int *rctx, struct pt_regs *regs, bool kernel, bool user, + int max_stack, bool crosstask) +{ + struct perf_callchain_entry_ctx ctx; + struct perf_callchain_entry *entry; + + entry = get_callchain_entry(rctx); + if (unlikely(!entry)) + return NULL; + + __init_perf_callchain_ctx(&ctx, entry, max_stack, false); + if (kernel) + __get_perf_callchain_kernel(&ctx, regs); + if (user && !crosstask) + __get_perf_callchain_user(&ctx, regs); + + return entry; +} + +static void bpf_put_callchain_entry(int rctx) +{ + put_callchain_entry(rctx); +} + BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, u64, flags) { @@ -305,6 +327,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, bool user = flags & BPF_F_USER_STACK; struct perf_callchain_entry *trace; bool kernel = !user; + int rctx, ret; if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) @@ -314,14 +337,15 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, if (max_depth > sysctl_perf_event_max_stack) max_depth = sysctl_perf_event_max_stack; - trace = get_perf_callchain(regs, kernel, user, max_depth, - false); - + trace = bpf_get_perf_callchain(&rctx, regs, kernel, user, max_depth, false); if (unlikely(!trace)) /* couldn't fetch the stack trace */ return -EFAULT; - return __bpf_get_stackid(map, trace, flags); + ret = __bpf_get_stackid(map, trace, flags); + bpf_put_callchain_entry(rctx); + + return ret; } const struct bpf_func_proto bpf_get_stackid_proto = { @@ -415,6 +439,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, bool kernel = !user; int err = -EINVAL; u64 *ips; + int rctx; if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | BPF_F_USER_BUILD_ID))) @@ -449,17 +474,24 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, if (trace_in) trace = trace_in; else if (kernel && task) - trace = get_callchain_entry_for_task(task, max_depth); + trace = get_callchain_entry_for_task(&rctx, task, max_depth); else - trace = get_perf_callchain(regs, kernel, user, max_depth, - crosstask); + trace = bpf_get_perf_callchain(&rctx, regs, kernel, user, max_depth, crosstask); - if (unlikely(!trace) || trace->nr < skip) { + if (unlikely(!trace)) { if (may_fault) rcu_read_unlock(); goto err_fault; } + if (trace->nr < skip) { + if (may_fault) + rcu_read_unlock(); + if (!trace_in) + bpf_put_callchain_entry(rctx); + goto err_fault; + } + trace_nr = trace->nr - skip; trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem; copy_len = trace_nr * elem_size; @@ -479,6 +511,9 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, if (may_fault) rcu_read_unlock(); + if (!trace_in) + bpf_put_callchain_entry(rctx); + if (user_build_id) stack_map_get_build_id_offset(buf, trace_nr, user, may_fault);