Skip to content

Commit 3413018

Browse files
listoutKernel Patches Daemon
authored andcommitted
bpf: avoid sleeping in invalid context during sock_map_delete_elem path
#syz test The syzkaller report exposed a BUG: “sleeping function called from invalid context” in sock_map_delete_elem, which happens when `bpf_test_timer_enter()` disables preemption but the delete path later invokes a sleeping function while still in that context. Specifically: - The crash trace shows `bpf_test_timer_enter()` acquiring a preempt_disable path (via t->mode == NO_PREEMPT), but the symmetric release path always calls migrate_enable(), mismatching the earlier disable. - As a result, preemption remains disabled across the sock_map_delete_elem path, leading to a sleeping call under an invalid context. :contentReference[oaicite:0]{index=0} To fix this, normalize the disable/enable pairing: always use migrate_disable()/migrate_enable() regardless of t->mode. This ensures that we never remain with preemption disabled unintentionally when entering the delete path, and avoids invalid-context sleeping. Reported-by: [email protected] Signed-off-by: Brahmajit Das <[email protected]>
1 parent 06bf66d commit 3413018

File tree

1 file changed

+9
-13
lines changed

1 file changed

+9
-13
lines changed

net/bpf/test_run.c

Lines changed: 9 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
// SPDX-License-Identifier: GPL-2.0-only
22
/* Copyright (c) 2017 Facebook
33
*/
4+
#include "linux/rcupdate.h"
45
#include <linux/bpf.h>
56
#include <linux/btf.h>
67
#include <linux/btf_ids.h>
@@ -29,7 +30,6 @@
2930
#include <trace/events/bpf_test_run.h>
3031

3132
struct bpf_test_timer {
32-
enum { NO_PREEMPT, NO_MIGRATE } mode;
3333
u32 i;
3434
u64 time_start, time_spent;
3535
};
@@ -38,10 +38,8 @@ static void bpf_test_timer_enter(struct bpf_test_timer *t)
3838
__acquires(rcu)
3939
{
4040
rcu_read_lock();
41-
if (t->mode == NO_PREEMPT)
42-
preempt_disable();
43-
else
44-
migrate_disable();
41+
/*migrate_disable();*/
42+
rcu_read_lock_dont_migrate();
4543

4644
t->time_start = ktime_get_ns();
4745
}
@@ -51,10 +49,8 @@ static void bpf_test_timer_leave(struct bpf_test_timer *t)
5149
{
5250
t->time_start = 0;
5351

54-
if (t->mode == NO_PREEMPT)
55-
preempt_enable();
56-
else
57-
migrate_enable();
52+
/*migrate_enable();*/
53+
rcu_read_unlock_migrate();
5854
rcu_read_unlock();
5955
}
6056

@@ -374,7 +370,7 @@ static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
374370

375371
{
376372
struct xdp_test_data xdp = { .batch_size = batch_size };
377-
struct bpf_test_timer t = { .mode = NO_MIGRATE };
373+
struct bpf_test_timer t = {};
378374
int ret;
379375

380376
if (!repeat)
@@ -404,7 +400,7 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
404400
struct bpf_prog_array_item item = {.prog = prog};
405401
struct bpf_run_ctx *old_ctx;
406402
struct bpf_cg_run_ctx run_ctx;
407-
struct bpf_test_timer t = { NO_MIGRATE };
403+
struct bpf_test_timer t = {};
408404
enum bpf_cgroup_storage_type stype;
409405
int ret;
410406

@@ -1440,7 +1436,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
14401436
const union bpf_attr *kattr,
14411437
union bpf_attr __user *uattr)
14421438
{
1443-
struct bpf_test_timer t = { NO_PREEMPT };
1439+
struct bpf_test_timer t = {};
14441440
u32 size = kattr->test.data_size_in;
14451441
struct bpf_flow_dissector ctx = {};
14461442
u32 repeat = kattr->test.repeat;
@@ -1508,7 +1504,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
15081504
int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
15091505
union bpf_attr __user *uattr)
15101506
{
1511-
struct bpf_test_timer t = { NO_PREEMPT };
1507+
struct bpf_test_timer t = {};
15121508
struct bpf_prog_array *progs = NULL;
15131509
struct bpf_sk_lookup_kern ctx = {};
15141510
u32 repeat = kattr->test.repeat;

0 commit comments

Comments
 (0)