Skip to content

Commit d029633

Browse files
hyperenjuKernel Patches Daemon
authored andcommitted
bpf: cpumap: propagate underlying error in cpu_map_update_elem()
After commit 9216477 ("bpf: cpumap: Add the possibility to attach an eBPF program to cpumap"), __cpu_map_entry_alloc() may fail with errors other than -ENOMEM, such as -EBADF or -EINVAL. However, __cpu_map_entry_alloc() returns NULL on all failures, and cpu_map_update_elem() unconditionally converts this NULL into -ENOMEM. As a result, user space always receives -ENOMEM regardless of the actual underlying error. Examples of unexpected behavior: - Nonexistent fd : -ENOMEM (should be -EBADF) - Non-BPF fd : -ENOMEM (should be -EINVAL) - Bad attach type : -ENOMEM (should be -EINVAL) Change __cpu_map_entry_alloc() to return ERR_PTR(err) instead of NULL and have cpu_map_update_elem() propagate this error. Fixes: 9216477 ("bpf: cpumap: Add the possibility to attach an eBPF program to cpumap") Signed-off-by: Kohei Enju <[email protected]>
1 parent 0d4aada commit d029633

File tree

1 file changed

+13
-8
lines changed

1 file changed

+13
-8
lines changed

kernel/bpf/cpumap.c

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -430,7 +430,7 @@ static struct bpf_cpu_map_entry *
430430
__cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
431431
u32 cpu)
432432
{
433-
int numa, err, i, fd = value->bpf_prog.fd;
433+
int numa, err = -ENOMEM, i, fd = value->bpf_prog.fd;
434434
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
435435
struct bpf_cpu_map_entry *rcpu;
436436
struct xdp_bulk_queue *bq;
@@ -440,7 +440,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
440440

441441
rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa);
442442
if (!rcpu)
443-
return NULL;
443+
return ERR_PTR(err);
444444

445445
/* Alloc percpu bulkq */
446446
rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq),
@@ -468,16 +468,21 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
468468
rcpu->value.qsize = value->qsize;
469469
gro_init(&rcpu->gro);
470470

471-
if (fd > 0 && __cpu_map_load_bpf_program(rcpu, map, fd))
472-
goto free_ptr_ring;
471+
if (fd > 0) {
472+
err = __cpu_map_load_bpf_program(rcpu, map, fd);
473+
if (err)
474+
goto free_ptr_ring;
475+
}
473476

474477
/* Setup kthread */
475478
init_completion(&rcpu->kthread_running);
476479
rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
477480
"cpumap/%d/map:%d", cpu,
478481
map->id);
479-
if (IS_ERR(rcpu->kthread))
482+
if (IS_ERR(rcpu->kthread)) {
483+
err = PTR_ERR(rcpu->kthread);
480484
goto free_prog;
485+
}
481486

482487
/* Make sure kthread runs on a single CPU */
483488
kthread_bind(rcpu->kthread, cpu);
@@ -503,7 +508,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
503508
free_percpu(rcpu->bulkq);
504509
free_rcu:
505510
kfree(rcpu);
506-
return NULL;
511+
return ERR_PTR(err);
507512
}
508513

509514
static void __cpu_map_entry_free(struct work_struct *work)
@@ -596,8 +601,8 @@ static long cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
596601
} else {
597602
/* Updating qsize cause re-allocation of bpf_cpu_map_entry */
598603
rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu);
599-
if (!rcpu)
600-
return -ENOMEM;
604+
if (IS_ERR(rcpu))
605+
return PTR_ERR(rcpu);
601606
}
602607
rcu_read_lock();
603608
__cpu_map_entry_replace(cmap, key_cpu, rcpu);

0 commit comments

Comments
 (0)