Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 28 additions & 1 deletion include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -547,6 +547,34 @@ static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src
bpf_obj_memcpy(map->record, dst, src, map->value_size, true);
}

#ifdef CONFIG_BPF_SYSCALL
static inline void bpf_percpu_copy_to_user(struct bpf_map *map, void __percpu *pptr, void *value,
u32 size)
{
int cpu, off = 0;

for_each_possible_cpu(cpu) {
copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
check_and_init_map_value(map, value + off);
off += size;
}
}

void bpf_obj_free_fields(const struct btf_record *rec, void *obj);

static inline void bpf_percpu_copy_from_user(struct bpf_map *map, void __percpu *pptr, void *value,
u32 size)
{
int cpu, off = 0;

for_each_possible_cpu(cpu) {
copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off);
bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu));
off += size;
}
}
#endif

static inline void bpf_obj_swap_uptrs(const struct btf_record *rec, void *dst, void *src)
{
unsigned long *src_uptr, *dst_uptr;
Expand Down Expand Up @@ -2417,7 +2445,6 @@ struct btf_record *btf_record_dup(const struct btf_record *rec);
bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj);
void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);

struct bpf_map *bpf_map_get(u32 ufd);
Expand Down
14 changes: 2 additions & 12 deletions kernel/bpf/arraymap.c
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,6 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
struct bpf_array *array = container_of(map, struct bpf_array, map);
u32 index = *(u32 *)key;
void __percpu *pptr;
int cpu, off = 0;
u32 size;

if (unlikely(index >= array->map.max_entries))
Expand All @@ -313,11 +312,7 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
size = array->elem_size;
rcu_read_lock();
pptr = array->pptrs[index & array->index_mask];
for_each_possible_cpu(cpu) {
copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
check_and_init_map_value(map, value + off);
off += size;
}
bpf_percpu_copy_to_user(map, pptr, value, size);
rcu_read_unlock();
return 0;
}
Expand Down Expand Up @@ -387,7 +382,6 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
struct bpf_array *array = container_of(map, struct bpf_array, map);
u32 index = *(u32 *)key;
void __percpu *pptr;
int cpu, off = 0;
u32 size;

if (unlikely(map_flags > BPF_EXIST))
Expand All @@ -411,11 +405,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
size = array->elem_size;
rcu_read_lock();
pptr = array->pptrs[index & array->index_mask];
for_each_possible_cpu(cpu) {
copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off);
bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));
off += size;
}
bpf_percpu_copy_from_user(map, pptr, value, size);
rcu_read_unlock();
return 0;
}
Expand Down
20 changes: 3 additions & 17 deletions kernel/bpf/hashtab.c
Original file line number Diff line number Diff line change
Expand Up @@ -944,12 +944,8 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
copy_map_value(&htab->map, this_cpu_ptr(pptr), value);
} else {
u32 size = round_up(htab->map.value_size, 8);
int off = 0, cpu;

for_each_possible_cpu(cpu) {
copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off);
off += size;
}
bpf_percpu_copy_from_user(&htab->map, pptr, value, size);
}
}

Expand Down Expand Up @@ -1802,15 +1798,10 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
memcpy(dst_key, l->key, key_size);

if (is_percpu) {
int off = 0, cpu;
void __percpu *pptr;

pptr = htab_elem_get_ptr(l, map->key_size);
for_each_possible_cpu(cpu) {
copy_map_value_long(&htab->map, dst_val + off, per_cpu_ptr(pptr, cpu));
check_and_init_map_value(&htab->map, dst_val + off);
off += size;
}
bpf_percpu_copy_to_user(&htab->map, pptr, dst_val, size);
} else {
value = htab_elem_value(l, key_size);
if (is_fd_htab(htab)) {
Expand Down Expand Up @@ -2370,7 +2361,6 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
struct htab_elem *l;
void __percpu *pptr;
int ret = -ENOENT;
int cpu, off = 0;
u32 size;

/* per_cpu areas are zero-filled and bpf programs can only
Expand All @@ -2386,11 +2376,7 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
* eviction heuristics when user space does a map walk.
*/
pptr = htab_elem_get_ptr(l, map->key_size);
for_each_possible_cpu(cpu) {
copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
check_and_init_map_value(map, value + off);
off += size;
}
bpf_percpu_copy_to_user(map, pptr, value, size);
ret = 0;
out:
rcu_read_unlock();
Expand Down
18 changes: 6 additions & 12 deletions kernel/bpf/local_storage.c
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
{
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
struct bpf_cgroup_storage *storage;
int cpu, off = 0;
void __percpu *pptr;
u32 size;

rcu_read_lock();
Expand All @@ -199,11 +199,8 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
* will not leak any kernel data
*/
size = round_up(_map->value_size, 8);
for_each_possible_cpu(cpu) {
bpf_long_memcpy(value + off,
per_cpu_ptr(storage->percpu_buf, cpu), size);
off += size;
}
pptr = storage->percpu_buf;
bpf_percpu_copy_to_user(_map, pptr, value, size);
rcu_read_unlock();
return 0;
}
Expand All @@ -213,7 +210,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
{
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
struct bpf_cgroup_storage *storage;
int cpu, off = 0;
void __percpu *pptr;
u32 size;

if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
Expand All @@ -233,11 +230,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
* so no kernel data leaks possible
*/
size = round_up(_map->value_size, 8);
for_each_possible_cpu(cpu) {
bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
value + off, size);
off += size;
}
pptr = storage->percpu_buf;
bpf_percpu_copy_from_user(_map, pptr, value, size);
rcu_read_unlock();
return 0;
}
Expand Down
Loading