Skip to content

Commit e17d62f

Browse files
ArnaudLcmanakryiko
authored andcommitted
bpf: Refactor stack map trace depth calculation into helper function
Extract the duplicated maximum allowed depth computation for stack traces stored in BPF stacks from bpf_get_stackid() and __bpf_get_stack() into a dedicated stack_map_calculate_max_depth() helper function. This unifies the logic for: - The max depth computation - Enforcing the sysctl_perf_event_max_stack limit No functional changes for existing code paths. Signed-off-by: Arnaud Lecomte <[email protected]> Signed-off-by: Andrii Nakryiko <[email protected]> Acked-by: Yonghong Song <[email protected]> Acked-by: Song Liu <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
1 parent 8842732 commit e17d62f

File tree

1 file changed

+32
-15
lines changed

1 file changed

+32
-15
lines changed

kernel/bpf/stackmap.c

Lines changed: 32 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,28 @@ static inline int stack_map_data_size(struct bpf_map *map)
4242
sizeof(struct bpf_stack_build_id) : sizeof(u64);
4343
}
4444

45+
/**
46+
* stack_map_calculate_max_depth - Calculate maximum allowed stack trace depth
47+
* @size: Size of the buffer/map value in bytes
48+
* @elem_size: Size of each stack trace element
49+
* @flags: BPF stack trace flags (BPF_F_USER_STACK, BPF_F_USER_BUILD_ID, ...)
50+
*
51+
* Return: Maximum number of stack trace entries that can be safely stored
52+
*/
53+
static u32 stack_map_calculate_max_depth(u32 size, u32 elem_size, u64 flags)
54+
{
55+
u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
56+
u32 max_depth;
57+
u32 curr_sysctl_max_stack = READ_ONCE(sysctl_perf_event_max_stack);
58+
59+
max_depth = size / elem_size;
60+
max_depth += skip;
61+
if (max_depth > curr_sysctl_max_stack)
62+
return curr_sysctl_max_stack;
63+
64+
return max_depth;
65+
}
66+
4567
static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
4668
{
4769
u64 elem_size = sizeof(struct stack_map_bucket) +
@@ -300,20 +322,17 @@ static long __bpf_get_stackid(struct bpf_map *map,
300322
BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
301323
u64, flags)
302324
{
303-
u32 max_depth = map->value_size / stack_map_data_size(map);
304-
u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
325+
u32 elem_size = stack_map_data_size(map);
305326
bool user = flags & BPF_F_USER_STACK;
306327
struct perf_callchain_entry *trace;
307328
bool kernel = !user;
329+
u32 max_depth;
308330

309331
if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
310332
BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
311333
return -EINVAL;
312334

313-
max_depth += skip;
314-
if (max_depth > sysctl_perf_event_max_stack)
315-
max_depth = sysctl_perf_event_max_stack;
316-
335+
max_depth = stack_map_calculate_max_depth(map->value_size, elem_size, flags);
317336
trace = get_perf_callchain(regs, kernel, user, max_depth,
318337
false, false);
319338

@@ -406,7 +425,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
406425
struct perf_callchain_entry *trace_in,
407426
void *buf, u32 size, u64 flags, bool may_fault)
408427
{
409-
u32 trace_nr, copy_len, elem_size, num_elem, max_depth;
428+
u32 trace_nr, copy_len, elem_size, max_depth;
410429
bool user_build_id = flags & BPF_F_USER_BUILD_ID;
411430
bool crosstask = task && task != current;
412431
u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
@@ -438,21 +457,20 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
438457
goto clear;
439458
}
440459

441-
num_elem = size / elem_size;
442-
max_depth = num_elem + skip;
443-
if (sysctl_perf_event_max_stack < max_depth)
444-
max_depth = sysctl_perf_event_max_stack;
460+
max_depth = stack_map_calculate_max_depth(size, elem_size, flags);
445461

446462
if (may_fault)
447463
rcu_read_lock(); /* need RCU for perf's callchain below */
448464

449-
if (trace_in)
465+
if (trace_in) {
450466
trace = trace_in;
451-
else if (kernel && task)
467+
trace->nr = min_t(u32, trace->nr, max_depth);
468+
} else if (kernel && task) {
452469
trace = get_callchain_entry_for_task(task, max_depth);
453-
else
470+
} else {
454471
trace = get_perf_callchain(regs, kernel, user, max_depth,
455472
crosstask, false);
473+
}
456474

457475
if (unlikely(!trace) || trace->nr < skip) {
458476
if (may_fault)
@@ -461,7 +479,6 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
461479
}
462480

463481
trace_nr = trace->nr - skip;
464-
trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
465482
copy_len = trace_nr * elem_size;
466483

467484
ips = trace->ip + skip;

0 commit comments

Comments
 (0)