Skip to content

Commit 4f4c4fc

Browse files
anakryikoAlexei Starovoitov
authored andcommitted
bpf: decouple stack_map_get_build_id_offset() from perf_callchain_entry
Change stack_map_get_build_id_offset() which is used to convert stack trace IP addresses into build ID+offset pairs. Right now this function accepts an array of u64s as an input, and uses array of struct bpf_stack_build_id as an output. This is problematic because u64 array is coming from perf_callchain_entry, which is (non-sleepable) RCU protected, so once we allows sleepable build ID fetching, this all breaks down. But its actually pretty easy to make stack_map_get_build_id_offset() works with array of struct bpf_stack_build_id as both input and output. Which is what this patch is doing, eliminating the dependency on perf_callchain_entry. We require caller to fill out bpf_stack_build_id.ip fields (all other can be left uninitialized), and update in place as we do build ID resolution. We make sure to READ_ONCE() and cache locally current IP value as we used it in a few places to find matching VMA and so on. Given this data is directly accessible and modifiable by user's BPF code, we should make sure to have a consistent view of it. Reviewed-by: Eduard Zingerman <[email protected]> Signed-off-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent cdbb44f commit 4f4c4fc

File tree

1 file changed

+33
-16
lines changed

1 file changed

+33
-16
lines changed

kernel/bpf/stackmap.c

Lines changed: 33 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -124,8 +124,18 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
124124
return ERR_PTR(err);
125125
}
126126

127+
/*
128+
* Expects all id_offs[i].ip values to be set to correct initial IPs.
129+
* They will be subsequently:
130+
* - either adjusted in place to a file offset, if build ID fetching
131+
* succeeds; in this case id_offs[i].build_id is set to correct build ID,
132+
* and id_offs[i].status is set to BPF_STACK_BUILD_ID_VALID;
133+
* - or IP will be kept intact, if build ID fetching failed; in this case
134+
* id_offs[i].build_id is zeroed out and id_offs[i].status is set to
135+
* BPF_STACK_BUILD_ID_IP.
136+
*/
127137
static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
128-
u64 *ips, u32 trace_nr, bool user)
138+
u32 trace_nr, bool user)
129139
{
130140
int i;
131141
struct mmap_unlock_irq_work *work = NULL;
@@ -142,30 +152,28 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
142152
/* cannot access current->mm, fall back to ips */
143153
for (i = 0; i < trace_nr; i++) {
144154
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
145-
id_offs[i].ip = ips[i];
146155
memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
147156
}
148157
return;
149158
}
150159

151160
for (i = 0; i < trace_nr; i++) {
152-
if (range_in_vma(prev_vma, ips[i], ips[i])) {
161+
u64 ip = READ_ONCE(id_offs[i].ip);
162+
163+
if (range_in_vma(prev_vma, ip, ip)) {
153164
vma = prev_vma;
154-
memcpy(id_offs[i].build_id, prev_build_id,
155-
BUILD_ID_SIZE_MAX);
165+
memcpy(id_offs[i].build_id, prev_build_id, BUILD_ID_SIZE_MAX);
156166
goto build_id_valid;
157167
}
158-
vma = find_vma(current->mm, ips[i]);
168+
vma = find_vma(current->mm, ip);
159169
if (!vma || build_id_parse_nofault(vma, id_offs[i].build_id, NULL)) {
160170
/* per entry fall back to ips */
161171
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
162-
id_offs[i].ip = ips[i];
163172
memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
164173
continue;
165174
}
166175
build_id_valid:
167-
id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
168-
- vma->vm_start;
176+
id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ip - vma->vm_start;
169177
id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
170178
prev_vma = vma;
171179
prev_build_id = id_offs[i].build_id;
@@ -216,7 +224,7 @@ static long __bpf_get_stackid(struct bpf_map *map,
216224
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
217225
struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
218226
u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
219-
u32 hash, id, trace_nr, trace_len;
227+
u32 hash, id, trace_nr, trace_len, i;
220228
bool user = flags & BPF_F_USER_STACK;
221229
u64 *ips;
222230
bool hash_matches;
@@ -238,15 +246,18 @@ static long __bpf_get_stackid(struct bpf_map *map,
238246
return id;
239247

240248
if (stack_map_use_build_id(map)) {
249+
struct bpf_stack_build_id *id_offs;
250+
241251
/* for build_id+offset, pop a bucket before slow cmp */
242252
new_bucket = (struct stack_map_bucket *)
243253
pcpu_freelist_pop(&smap->freelist);
244254
if (unlikely(!new_bucket))
245255
return -ENOMEM;
246256
new_bucket->nr = trace_nr;
247-
stack_map_get_build_id_offset(
248-
(struct bpf_stack_build_id *)new_bucket->data,
249-
ips, trace_nr, user);
257+
id_offs = (struct bpf_stack_build_id *)new_bucket->data;
258+
for (i = 0; i < trace_nr; i++)
259+
id_offs[i].ip = ips[i];
260+
stack_map_get_build_id_offset(id_offs, trace_nr, user);
250261
trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
251262
if (hash_matches && bucket->nr == trace_nr &&
252263
memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
@@ -445,10 +456,16 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
445456
copy_len = trace_nr * elem_size;
446457

447458
ips = trace->ip + skip;
448-
if (user && user_build_id)
449-
stack_map_get_build_id_offset(buf, ips, trace_nr, user);
450-
else
459+
if (user && user_build_id) {
460+
struct bpf_stack_build_id *id_offs = buf;
461+
u32 i;
462+
463+
for (i = 0; i < trace_nr; i++)
464+
id_offs[i].ip = ips[i];
465+
stack_map_get_build_id_offset(buf, trace_nr, user);
466+
} else {
451467
memcpy(buf, ips, copy_len);
468+
}
452469

453470
if (size > copy_len)
454471
memset(buf + copy_len, 0, size - copy_len);

0 commit comments

Comments
 (0)