Skip to content

Commit f4c227c

Browse files
author
Alexei Starovoitov
committed
Merge branch 'bpf-introduce-and-use-rcu_read_lock_dont_migrate'
Menglong Dong says: ==================== bpf: introduce and use rcu_read_lock_dont_migrate migrate_disable() and rcu_read_lock() are used to together in many case in bpf. However, when PREEMPT_RCU is not enabled, rcu_read_lock() will disable preemption, which indicate migrate_disable(), so we don't need to call it in this case. In this series, we introduce rcu_read_lock_dont_migrate and rcu_read_unlock_migrate, which will call migrate_disable and migrate_enable only when PREEMPT_RCU enabled. And use rcu_read_lock_dont_migrate in bpf subsystem. Changes since V2: * make rcu_read_lock_dont_migrate() more compatible by using IS_ENABLED() Changes since V1: * introduce rcu_read_lock_dont_migrate() instead of rcu_migrate_disable() + rcu_read_lock() ==================== Link: https://patch.msgid.link/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
2 parents 4223bf8 + 8e4f0b1 commit f4c227c

File tree

7 files changed

+30
-32
lines changed

7 files changed

+30
-32
lines changed

include/linux/rcupdate.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -962,6 +962,20 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
962962
preempt_enable_notrace();
963963
}
964964

965+
static __always_inline void rcu_read_lock_dont_migrate(void)
966+
{
967+
if (IS_ENABLED(CONFIG_PREEMPT_RCU))
968+
migrate_disable();
969+
rcu_read_lock();
970+
}
971+
972+
static inline void rcu_read_unlock_migrate(void)
973+
{
974+
rcu_read_unlock();
975+
if (IS_ENABLED(CONFIG_PREEMPT_RCU))
976+
migrate_enable();
977+
}
978+
965979
/**
966980
* RCU_INIT_POINTER() - initialize an RCU protected pointer
967981
* @p: The pointer to be initialized.

kernel/bpf/bpf_cgrp_storage.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,7 @@ void bpf_cgrp_storage_free(struct cgroup *cgroup)
4545
{
4646
struct bpf_local_storage *local_storage;
4747

48-
migrate_disable();
49-
rcu_read_lock();
48+
rcu_read_lock_dont_migrate();
5049
local_storage = rcu_dereference(cgroup->bpf_cgrp_storage);
5150
if (!local_storage)
5251
goto out;
@@ -55,8 +54,7 @@ void bpf_cgrp_storage_free(struct cgroup *cgroup)
5554
bpf_local_storage_destroy(local_storage);
5655
bpf_cgrp_storage_unlock();
5756
out:
58-
rcu_read_unlock();
59-
migrate_enable();
57+
rcu_read_unlock_migrate();
6058
}
6159

6260
static struct bpf_local_storage_data *

kernel/bpf/bpf_inode_storage.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -62,17 +62,15 @@ void bpf_inode_storage_free(struct inode *inode)
6262
if (!bsb)
6363
return;
6464

65-
migrate_disable();
66-
rcu_read_lock();
65+
rcu_read_lock_dont_migrate();
6766

6867
local_storage = rcu_dereference(bsb->storage);
6968
if (!local_storage)
7069
goto out;
7170

7271
bpf_local_storage_destroy(local_storage);
7372
out:
74-
rcu_read_unlock();
75-
migrate_enable();
73+
rcu_read_unlock_migrate();
7674
}
7775

7876
static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)

kernel/bpf/bpf_iter.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -705,13 +705,11 @@ int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx)
705705
migrate_enable();
706706
rcu_read_unlock_trace();
707707
} else {
708-
rcu_read_lock();
709-
migrate_disable();
708+
rcu_read_lock_dont_migrate();
710709
old_run_ctx = bpf_set_run_ctx(&run_ctx);
711710
ret = bpf_prog_run(prog, ctx);
712711
bpf_reset_run_ctx(old_run_ctx);
713-
migrate_enable();
714-
rcu_read_unlock();
712+
rcu_read_unlock_migrate();
715713
}
716714

717715
/* bpf program can only return 0 or 1:

kernel/bpf/bpf_task_storage.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -70,8 +70,7 @@ void bpf_task_storage_free(struct task_struct *task)
7070
{
7171
struct bpf_local_storage *local_storage;
7272

73-
migrate_disable();
74-
rcu_read_lock();
73+
rcu_read_lock_dont_migrate();
7574

7675
local_storage = rcu_dereference(task->bpf_storage);
7776
if (!local_storage)
@@ -81,8 +80,7 @@ void bpf_task_storage_free(struct task_struct *task)
8180
bpf_local_storage_destroy(local_storage);
8281
bpf_task_storage_unlock();
8382
out:
84-
rcu_read_unlock();
85-
migrate_enable();
83+
rcu_read_unlock_migrate();
8684
}
8785

8886
static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)

kernel/bpf/cgroup.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -71,8 +71,7 @@ bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
7171
u32 func_ret;
7272

7373
run_ctx.retval = retval;
74-
migrate_disable();
75-
rcu_read_lock();
74+
rcu_read_lock_dont_migrate();
7675
array = rcu_dereference(cgrp->effective[atype]);
7776
item = &array->items[0];
7877
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
@@ -88,8 +87,7 @@ bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
8887
item++;
8988
}
9089
bpf_reset_run_ctx(old_run_ctx);
91-
rcu_read_unlock();
92-
migrate_enable();
90+
rcu_read_unlock_migrate();
9391
return run_ctx.retval;
9492
}
9593

kernel/bpf/trampoline.c

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -899,8 +899,7 @@ static __always_inline u64 notrace bpf_prog_start_time(void)
899899
static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
900900
__acquires(RCU)
901901
{
902-
rcu_read_lock();
903-
migrate_disable();
902+
rcu_read_lock_dont_migrate();
904903

905904
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
906905

@@ -949,8 +948,7 @@ static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
949948

950949
update_prog_stats(prog, start);
951950
this_cpu_dec(*(prog->active));
952-
migrate_enable();
953-
rcu_read_unlock();
951+
rcu_read_unlock_migrate();
954952
}
955953

956954
static u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
@@ -960,8 +958,7 @@ static u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
960958
/* Runtime stats are exported via actual BPF_LSM_CGROUP
961959
* programs, not the shims.
962960
*/
963-
rcu_read_lock();
964-
migrate_disable();
961+
rcu_read_lock_dont_migrate();
965962

966963
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
967964

@@ -974,8 +971,7 @@ static void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
974971
{
975972
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
976973

977-
migrate_enable();
978-
rcu_read_unlock();
974+
rcu_read_unlock_migrate();
979975
}
980976

981977
u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
@@ -1033,8 +1029,7 @@ static u64 notrace __bpf_prog_enter(struct bpf_prog *prog,
10331029
struct bpf_tramp_run_ctx *run_ctx)
10341030
__acquires(RCU)
10351031
{
1036-
rcu_read_lock();
1037-
migrate_disable();
1032+
rcu_read_lock_dont_migrate();
10381033

10391034
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
10401035

@@ -1048,8 +1043,7 @@ static void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start,
10481043
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
10491044

10501045
update_prog_stats(prog, start);
1051-
migrate_enable();
1052-
rcu_read_unlock();
1046+
rcu_read_unlock_migrate();
10531047
}
10541048

10551049
void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)

0 commit comments

Comments
 (0)