Skip to content

Commit 49a8453

Browse files
AsphalttKernel Patches Daemon
authored andcommitted
bpf: Introduce BPF_F_CPU and BPF_F_ALL_CPUS flags
Introduce BPF_F_CPU and BPF_F_ALL_CPUS flags and check them for following APIs: * 'map_lookup_elem()' * 'map_update_elem()' * 'generic_map_lookup_batch()' * 'generic_map_update_batch()' And, get the correct value size for these APIs. Acked-by: Andrii Nakryiko <[email protected]> Signed-off-by: Leon Hwang <[email protected]>
1 parent fabf631 commit 49a8453

File tree

4 files changed

+47
-16
lines changed

4 files changed

+47
-16
lines changed

include/linux/bpf.h

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3829,14 +3829,35 @@ bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
38293829
}
38303830
#endif
38313831

3832+
static inline bool bpf_map_supports_cpu_flags(enum bpf_map_type map_type)
3833+
{
3834+
return false;
3835+
}
3836+
38323837
static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 allowed_flags)
38333838
{
3834-
if (flags & ~allowed_flags)
3839+
u32 cpu;
3840+
3841+
if ((u32)flags & ~allowed_flags)
38353842
return -EINVAL;
38363843

38373844
if ((flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK))
38383845
return -EINVAL;
38393846

3847+
if (!(flags & BPF_F_CPU) && flags >> 32)
3848+
return -EINVAL;
3849+
3850+
if (flags & (BPF_F_CPU | BPF_F_ALL_CPUS)) {
3851+
if (!bpf_map_supports_cpu_flags(map->map_type))
3852+
return -EINVAL;
3853+
if ((flags & BPF_F_CPU) && (flags & BPF_F_ALL_CPUS))
3854+
return -EINVAL;
3855+
3856+
cpu = flags >> 32;
3857+
if ((flags & BPF_F_CPU) && cpu >= num_possible_cpus())
3858+
return -ERANGE;
3859+
}
3860+
38403861
return 0;
38413862
}
38423863

include/uapi/linux/bpf.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1373,6 +1373,8 @@ enum {
13731373
BPF_NOEXIST = 1, /* create new element if it didn't exist */
13741374
BPF_EXIST = 2, /* update existing element */
13751375
BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */
1376+
BPF_F_CPU = 8, /* cpu flag for percpu maps, upper 32-bit of flags is a cpu number */
1377+
BPF_F_ALL_CPUS = 16, /* update value across all CPUs for percpu maps */
13761378
};
13771379

13781380
/* flags for BPF_MAP_CREATE command */

kernel/bpf/syscall.c

Lines changed: 21 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -133,12 +133,14 @@ bool bpf_map_write_active(const struct bpf_map *map)
133133
return atomic64_read(&map->writecnt) != 0;
134134
}
135135

136-
static u32 bpf_map_value_size(const struct bpf_map *map)
137-
{
138-
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
139-
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
140-
map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
141-
map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
136+
static u32 bpf_map_value_size(const struct bpf_map *map, u64 flags)
137+
{
138+
if (flags & (BPF_F_CPU | BPF_F_ALL_CPUS))
139+
return map->value_size;
140+
else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
141+
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
142+
map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
143+
map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
142144
return round_up(map->value_size, 8) * num_possible_cpus();
143145
else if (IS_FD_MAP(map))
144146
return sizeof(u32);
@@ -1732,15 +1734,15 @@ static int map_lookup_elem(union bpf_attr *attr)
17321734
if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
17331735
return -EPERM;
17341736

1735-
err = bpf_map_check_op_flags(map, attr->flags, BPF_F_LOCK);
1737+
err = bpf_map_check_op_flags(map, attr->flags, BPF_F_LOCK | BPF_F_CPU);
17361738
if (err)
17371739
return err;
17381740

17391741
key = __bpf_copy_key(ukey, map->key_size);
17401742
if (IS_ERR(key))
17411743
return PTR_ERR(key);
17421744

1743-
value_size = bpf_map_value_size(map);
1745+
value_size = bpf_map_value_size(map, attr->flags);
17441746

17451747
err = -ENOMEM;
17461748
value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
@@ -1781,6 +1783,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
17811783
bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
17821784
struct bpf_map *map;
17831785
void *key, *value;
1786+
u64 allowed_flags;
17841787
u32 value_size;
17851788
int err;
17861789

@@ -1797,7 +1800,8 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
17971800
goto err_put;
17981801
}
17991802

1800-
err = bpf_map_check_op_flags(map, attr->flags, ~0);
1803+
allowed_flags = (BPF_F_ALL_CPUS << 1) - 1;
1804+
err = bpf_map_check_op_flags(map, attr->flags, allowed_flags);
18011805
if (err)
18021806
goto err_put;
18031807

@@ -1807,7 +1811,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
18071811
goto err_put;
18081812
}
18091813

1810-
value_size = bpf_map_value_size(map);
1814+
value_size = bpf_map_value_size(map, attr->flags);
18111815
value = kvmemdup_bpfptr(uvalue, value_size);
18121816
if (IS_ERR(value)) {
18131817
err = PTR_ERR(value);
@@ -2001,13 +2005,15 @@ int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
20012005
void __user *keys = u64_to_user_ptr(attr->batch.keys);
20022006
u32 value_size, cp, max_count;
20032007
void *key, *value;
2008+
u64 allowed_flags;
20042009
int err = 0;
20052010

2006-
err = bpf_map_check_op_flags(map, attr->batch.elem_flags, BPF_F_LOCK);
2011+
allowed_flags = BPF_EXIST | BPF_F_LOCK | BPF_F_CPU | BPF_F_ALL_CPUS;
2012+
err = bpf_map_check_op_flags(map, attr->batch.elem_flags, allowed_flags);
20072013
if (err)
20082014
return err;
20092015

2010-
value_size = bpf_map_value_size(map);
2016+
value_size = bpf_map_value_size(map, attr->batch.elem_flags);
20112017

20122018
max_count = attr->batch.count;
20132019
if (!max_count)
@@ -2062,11 +2068,11 @@ int generic_map_lookup_batch(struct bpf_map *map,
20622068
u32 value_size, cp, max_count;
20632069
int err;
20642070

2065-
err = bpf_map_check_op_flags(map, attr->batch.elem_flags, BPF_F_LOCK);
2071+
err = bpf_map_check_op_flags(map, attr->batch.elem_flags, BPF_F_LOCK | BPF_F_CPU);
20662072
if (err)
20672073
return err;
20682074

2069-
value_size = bpf_map_value_size(map);
2075+
value_size = bpf_map_value_size(map, attr->batch.elem_flags);
20702076

20712077
max_count = attr->batch.count;
20722078
if (!max_count)
@@ -2188,7 +2194,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
21882194
goto err_put;
21892195
}
21902196

2191-
value_size = bpf_map_value_size(map);
2197+
value_size = bpf_map_value_size(map, 0);
21922198

21932199
err = -ENOMEM;
21942200
value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);

tools/include/uapi/linux/bpf.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1373,6 +1373,8 @@ enum {
13731373
BPF_NOEXIST = 1, /* create new element if it didn't exist */
13741374
BPF_EXIST = 2, /* update existing element */
13751375
BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */
1376+
BPF_F_CPU = 8, /* cpu flag for percpu maps, upper 32-bit of flags is a cpu number */
1377+
BPF_F_ALL_CPUS = 16, /* update value across all CPUs for percpu maps */
13761378
};
13771379

13781380
/* flags for BPF_MAP_CREATE command */

0 commit comments

Comments
 (0)