Skip to content
5 changes: 3 additions & 2 deletions include/linux/bpf-cgroup.h
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,8 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);

int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value,
u64 flags);
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
void *value, u64 flags);

Expand Down Expand Up @@ -467,7 +468,7 @@ static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
static inline void bpf_cgroup_storage_free(
struct bpf_cgroup_storage *storage) {}
static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
void *value) {
void *value, u64 flags) {
return 0;
}
static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
Expand Down
126 changes: 123 additions & 3 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -547,6 +547,56 @@ static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src
bpf_obj_memcpy(map->record, dst, src, map->value_size, true);
}

#ifdef CONFIG_BPF_SYSCALL
static inline void bpf_percpu_copy_to_user(struct bpf_map *map, void __percpu *pptr, void *value,
u32 size, u64 flags)
{
int current_cpu = raw_smp_processor_id();
int cpu, off = 0;

if (flags & BPF_F_CPU) {
cpu = flags >> 32;
copy_map_value_long(map, value, cpu != current_cpu ? per_cpu_ptr(pptr, cpu) :
this_cpu_ptr(pptr));
check_and_init_map_value(map, value);
} else {
for_each_possible_cpu(cpu) {
copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
check_and_init_map_value(map, value + off);
off += size;
}
}
}

void bpf_obj_free_fields(const struct btf_record *rec, void *obj);

static inline void bpf_percpu_copy_from_user(struct bpf_map *map, void __percpu *pptr, void *value,
u32 size, u64 flags)
{
int current_cpu = raw_smp_processor_id();
int cpu, off = 0;
void *ptr;

if (flags & BPF_F_CPU) {
cpu = flags >> 32;
ptr = cpu == current_cpu ? this_cpu_ptr(pptr) : per_cpu_ptr(pptr, cpu);
copy_map_value_long(map, ptr, value);
bpf_obj_free_fields(map->record, ptr);
} else {
for_each_possible_cpu(cpu) {
copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off);
/* same user-provided value is used if
* BPF_F_ALL_CPUS is specified, otherwise value is
* an array of per-cpu values.
*/
if (!(flags & BPF_F_ALL_CPUS))
off += size;
bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu));
}
}
}
#endif

static inline void bpf_obj_swap_uptrs(const struct btf_record *rec, void *dst, void *src)
{
unsigned long *src_uptr, *dst_uptr;
Expand Down Expand Up @@ -2417,7 +2467,6 @@ struct btf_record *btf_record_dup(const struct btf_record *rec);
bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj);
void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);

struct bpf_map *bpf_map_get(u32 ufd);
Expand Down Expand Up @@ -2696,8 +2745,9 @@ int map_set_for_each_callback_args(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee);

int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value, u64 flags);
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value,
u64 flags);
int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
u64 flags);
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
Expand Down Expand Up @@ -3709,4 +3759,74 @@ int bpf_prog_get_file_line(struct bpf_prog *prog, unsigned long ip, const char *
const char **linep, int *nump);
struct bpf_prog *bpf_prog_find_from_stack(void);

static inline bool bpf_map_supports_cpu_flags(enum bpf_map_type map_type)
{
switch (map_type) {
case BPF_MAP_TYPE_PERCPU_ARRAY:
case BPF_MAP_TYPE_PERCPU_HASH:
case BPF_MAP_TYPE_LRU_PERCPU_HASH:
case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
return true;
default:
return false;
}
}

static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 extra_flags_mask)
{
if (extra_flags_mask && ((u32)flags & extra_flags_mask))
return -EINVAL;

if ((flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK))
return -EINVAL;

if (!(flags & BPF_F_CPU) && flags >> 32)
return -EINVAL;

if ((flags & (BPF_F_CPU | BPF_F_ALL_CPUS)) && !bpf_map_supports_cpu_flags(map->map_type))
return -EINVAL;

return 0;
}

static inline int bpf_map_check_update_flags(struct bpf_map *map, u64 flags)
{
return bpf_map_check_op_flags(map, flags, 0);
}

#define BPF_MAP_LOOKUP_ELEM_EXTRA_FLAGS_MASK (~(BPF_F_LOCK | BPF_F_CPU | BPF_F_ALL_CPUS))

static inline int bpf_map_check_lookup_flags(struct bpf_map *map, u64 flags)
{
return bpf_map_check_op_flags(map, flags, BPF_MAP_LOOKUP_ELEM_EXTRA_FLAGS_MASK);
}

static inline int bpf_map_check_batch_flags(struct bpf_map *map, u64 flags)
{
return bpf_map_check_op_flags(map, flags, BPF_MAP_LOOKUP_ELEM_EXTRA_FLAGS_MASK);
}

static inline int bpf_map_check_cpu_flags(u64 flags, bool check_all_cpus_flag)
{
const u64 cpu_flags = BPF_F_CPU | BPF_F_ALL_CPUS;
u32 cpu;

if (check_all_cpus_flag) {
if (unlikely((u32)flags > BPF_F_ALL_CPUS))
/* unknown flags */
return -EINVAL;
if (unlikely((flags & cpu_flags) == cpu_flags))
return -EINVAL;
} else {
if (unlikely((u32)flags & ~BPF_F_CPU))
return -EINVAL;
}

cpu = flags >> 32;
if (unlikely((flags & BPF_F_CPU) && cpu >= num_possible_cpus()))
return -ERANGE;

return 0;
}

#endif /* _LINUX_BPF_H */
2 changes: 2 additions & 0 deletions include/uapi/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -1372,6 +1372,8 @@ enum {
BPF_NOEXIST = 1, /* create new element if it didn't exist */
BPF_EXIST = 2, /* update existing element */
BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */
BPF_F_CPU = 8, /* cpu flag for percpu maps, upper 32-bit of flags is a cpu number */
BPF_F_ALL_CPUS = 16, /* update value across all CPUs for percpu maps */
};

/* flags for BPF_MAP_CREATE command */
Expand Down
28 changes: 12 additions & 16 deletions kernel/bpf/arraymap.c
Original file line number Diff line number Diff line change
Expand Up @@ -295,29 +295,29 @@ static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key,
return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
}

int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value, u64 flags)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
u32 index = *(u32 *)key;
void __percpu *pptr;
int cpu, off = 0;
u32 size;
int err;

if (unlikely(index >= array->map.max_entries))
return -ENOENT;

err = bpf_map_check_cpu_flags(flags, false);
if (unlikely(err))
return err;

/* per_cpu areas are zero-filled and bpf programs can only
* access 'value_size' of them, so copying rounded areas
* will not leak any kernel data
*/
size = array->elem_size;
rcu_read_lock();
pptr = array->pptrs[index & array->index_mask];
for_each_possible_cpu(cpu) {
copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
check_and_init_map_value(map, value + off);
off += size;
}
bpf_percpu_copy_to_user(map, pptr, value, size, flags);
rcu_read_unlock();
return 0;
}
Expand Down Expand Up @@ -387,12 +387,12 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
struct bpf_array *array = container_of(map, struct bpf_array, map);
u32 index = *(u32 *)key;
void __percpu *pptr;
int cpu, off = 0;
u32 size;
int err;

if (unlikely(map_flags > BPF_EXIST))
/* unknown flags */
return -EINVAL;
err = bpf_map_check_cpu_flags(map_flags, true);
if (unlikely(err))
return err;

if (unlikely(index >= array->map.max_entries))
/* all elements were pre-allocated, cannot insert a new one */
Expand All @@ -411,11 +411,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
size = array->elem_size;
rcu_read_lock();
pptr = array->pptrs[index & array->index_mask];
for_each_possible_cpu(cpu) {
copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off);
bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));
off += size;
}
bpf_percpu_copy_from_user(map, pptr, value, size, map_flags);
rcu_read_unlock();
return 0;
}
Expand Down
Loading
Loading