Skip to content

Commit 7a4bd6a

Browse files
AsphalttKernel Patches Daemon
authored andcommitted
bpf: Introduce internal bpf_map_check_op_flags helper function
It is to unify map flags checking for lookup_elem, update_elem, lookup_batch and update_batch APIs. Therefore, it will be convenient to check BPF_F_CPU and BPF_F_ALL_CPUS flags in it for these APIs in next patch. Signed-off-by: Leon Hwang <[email protected]>
1 parent 41fb65e commit 7a4bd6a

File tree

2 files changed

+22
-23
lines changed

2 files changed

+22
-23
lines changed

include/linux/bpf.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3716,4 +3716,15 @@ int bpf_prog_get_file_line(struct bpf_prog *prog, unsigned long ip, const char *
37163716
const char **linep, int *nump);
37173717
struct bpf_prog *bpf_prog_find_from_stack(void);
37183718

3719+
static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 allowed_flags)
3720+
{
3721+
if (flags & ~allowed_flags)
3722+
return -EINVAL;
3723+
3724+
if ((flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK))
3725+
return -EINVAL;
3726+
3727+
return 0;
3728+
}
3729+
37193730
#endif /* _LINUX_BPF_H */

kernel/bpf/syscall.c

Lines changed: 11 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1669,19 +1669,16 @@ static int map_lookup_elem(union bpf_attr *attr)
16691669
if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
16701670
return -EINVAL;
16711671

1672-
if (attr->flags & ~BPF_F_LOCK)
1673-
return -EINVAL;
1674-
16751672
CLASS(fd, f)(attr->map_fd);
16761673
map = __bpf_map_get(f);
16771674
if (IS_ERR(map))
16781675
return PTR_ERR(map);
16791676
if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ))
16801677
return -EPERM;
16811678

1682-
if ((attr->flags & BPF_F_LOCK) &&
1683-
!btf_record_has_field(map->record, BPF_SPIN_LOCK))
1684-
return -EINVAL;
1679+
err = bpf_map_check_op_flags(map, attr->flags, BPF_F_LOCK);
1680+
if (err)
1681+
return err;
16851682

16861683
key = __bpf_copy_key(ukey, map->key_size);
16871684
if (IS_ERR(key))
@@ -1744,11 +1741,9 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
17441741
goto err_put;
17451742
}
17461743

1747-
if ((attr->flags & BPF_F_LOCK) &&
1748-
!btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1749-
err = -EINVAL;
1744+
err = bpf_map_check_op_flags(map, attr->flags, ~0);
1745+
if (err)
17501746
goto err_put;
1751-
}
17521747

17531748
key = ___bpf_copy_key(ukey, map->key_size);
17541749
if (IS_ERR(key)) {
@@ -1952,13 +1947,9 @@ int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
19521947
void *key, *value;
19531948
int err = 0;
19541949

1955-
if (attr->batch.elem_flags & ~BPF_F_LOCK)
1956-
return -EINVAL;
1957-
1958-
if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1959-
!btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1960-
return -EINVAL;
1961-
}
1950+
err = bpf_map_check_op_flags(map, attr->batch.elem_flags, BPF_F_LOCK);
1951+
if (err)
1952+
return err;
19621953

19631954
value_size = bpf_map_value_size(map);
19641955

@@ -2015,12 +2006,9 @@ int generic_map_lookup_batch(struct bpf_map *map,
20152006
u32 value_size, cp, max_count;
20162007
int err;
20172008

2018-
if (attr->batch.elem_flags & ~BPF_F_LOCK)
2019-
return -EINVAL;
2020-
2021-
if ((attr->batch.elem_flags & BPF_F_LOCK) &&
2022-
!btf_record_has_field(map->record, BPF_SPIN_LOCK))
2023-
return -EINVAL;
2009+
err = bpf_map_check_op_flags(map, attr->batch.elem_flags, BPF_F_LOCK);
2010+
if (err)
2011+
return err;
20242012

20252013
value_size = bpf_map_value_size(map);
20262014

0 commit comments

Comments
 (0)