Skip to content

Commit 0524a47

Browse files
Sahil ChandnaKernel Patches Daemon
authored andcommitted
bpf: test_run: Use migrate_enable()/disable() universally
The timer context can safely use migrate_disable()/migrate_enable() universally instead of conditional preemption or migration disabling. Previously, the timer was initialized in NO_PREEMPT mode by default, which disabled preemption and forced execution in atomic context. This caused issues on PREEMPT_RT configurations when invoking spin_lock_bh() — a sleeping lock — leading to the following warning: BUG: sleeping function called from invalid context at kernel/locking/spinlock_rt.c:48 in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 6107, name: syz.0.17 preempt_count: 1, expected: 0 RCU nest depth: 1, expected: 1 Preemption disabled at: [<ffffffff891fce58>] bpf_test_timer_enter+0xf8/0x140 net/bpf/test_run.c:42 Reported-by: [email protected] Closes: https://syzkaller.appspot.com/bug?extid=1f1fbecb9413cdbfbef8 Tested-by: [email protected] Signed-off-by: Sahil Chandna <[email protected]>
1 parent 5ba1e72 commit 0524a47

File tree

1 file changed

+6
-14
lines changed

1 file changed

+6
-14
lines changed

net/bpf/test_run.c

Lines changed: 6 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@
2929
#include <trace/events/bpf_test_run.h>
3030

3131
struct bpf_test_timer {
32-
enum { NO_PREEMPT, NO_MIGRATE } mode;
3332
u32 i;
3433
u64 time_start, time_spent;
3534
};
@@ -38,10 +37,7 @@ static void bpf_test_timer_enter(struct bpf_test_timer *t)
3837
__acquires(rcu)
3938
{
4039
rcu_read_lock();
41-
if (t->mode == NO_PREEMPT)
42-
preempt_disable();
43-
else
44-
migrate_disable();
40+
migrate_disable();
4541

4642
t->time_start = ktime_get_ns();
4743
}
@@ -50,11 +46,7 @@ static void bpf_test_timer_leave(struct bpf_test_timer *t)
5046
__releases(rcu)
5147
{
5248
t->time_start = 0;
53-
54-
if (t->mode == NO_PREEMPT)
55-
preempt_enable();
56-
else
57-
migrate_enable();
49+
migrate_enable();
5850
rcu_read_unlock();
5951
}
6052

@@ -374,7 +366,7 @@ static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
374366

375367
{
376368
struct xdp_test_data xdp = { .batch_size = batch_size };
377-
struct bpf_test_timer t = { .mode = NO_MIGRATE };
369+
struct bpf_test_timer t;
378370
int ret;
379371

380372
if (!repeat)
@@ -404,7 +396,7 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
404396
struct bpf_prog_array_item item = {.prog = prog};
405397
struct bpf_run_ctx *old_ctx;
406398
struct bpf_cg_run_ctx run_ctx;
407-
struct bpf_test_timer t = { NO_MIGRATE };
399+
struct bpf_test_timer t;
408400
enum bpf_cgroup_storage_type stype;
409401
int ret;
410402

@@ -1377,7 +1369,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
13771369
const union bpf_attr *kattr,
13781370
union bpf_attr __user *uattr)
13791371
{
1380-
struct bpf_test_timer t = { NO_PREEMPT };
1372+
struct bpf_test_timer t;
13811373
u32 size = kattr->test.data_size_in;
13821374
struct bpf_flow_dissector ctx = {};
13831375
u32 repeat = kattr->test.repeat;
@@ -1445,7 +1437,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
14451437
int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
14461438
union bpf_attr __user *uattr)
14471439
{
1448-
struct bpf_test_timer t = { NO_PREEMPT };
1440+
struct bpf_test_timer t;
14491441
struct bpf_prog_array *progs = NULL;
14501442
struct bpf_sk_lookup_kern ctx = {};
14511443
u32 repeat = kattr->test.repeat;

0 commit comments

Comments
 (0)