Skip to content

Commit d76b198

Browse files
AsphalttKernel Patches Daemon
authored andcommitted
bpf: Add BPF_F_CPU and BPF_F_ALL_CPUS flags support for percpu_cgroup_storage maps
Introduce BPF_F_ALL_CPUS flag support for percpu_cgroup_storage maps to allow updating values for all CPUs with a single value for update_elem API. Introduce BPF_F_CPU flag support for percpu_cgroup_storage maps to allow: * update value for specified CPU for update_elem API. * lookup value for specified CPU for lookup_elem API. The BPF_F_CPU flag is passed via map_flags along with embedded cpu info. Signed-off-by: Leon Hwang <[email protected]>
1 parent 9e07cb2 commit d76b198

File tree

4 files changed

+24
-10
lines changed

4 files changed

+24
-10
lines changed

include/linux/bpf-cgroup.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
172172
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
173173
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
174174

175-
int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
175+
int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value, u64 flags);
176176
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
177177
void *value, u64 flags);
178178

@@ -467,7 +467,7 @@ static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
467467
static inline void bpf_cgroup_storage_free(
468468
struct bpf_cgroup_storage *storage) {}
469469
static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
470-
void *value) {
470+
void *value, u64 flags) {
471471
return 0;
472472
}
473473
static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,

include/linux/bpf.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3774,6 +3774,7 @@ static inline bool bpf_map_supports_cpu_flags(enum bpf_map_type map_type)
37743774
case BPF_MAP_TYPE_PERCPU_ARRAY:
37753775
case BPF_MAP_TYPE_PERCPU_HASH:
37763776
case BPF_MAP_TYPE_LRU_PERCPU_HASH:
3777+
case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
37773778
return true;
37783779
default:
37793780
return false;

kernel/bpf/local_storage.c

Lines changed: 20 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ static long cgroup_storage_update_elem(struct bpf_map *map, void *key,
180180
}
181181

182182
int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
183-
void *value)
183+
void *value, u64 map_flags)
184184
{
185185
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
186186
struct bpf_cgroup_storage *storage;
@@ -198,12 +198,18 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
198198
* access 'value_size' of them, so copying rounded areas
199199
* will not leak any kernel data
200200
*/
201+
if (map_flags & BPF_F_CPU) {
202+
cpu = map_flags >> 32;
203+
memcpy(value, per_cpu_ptr(storage->percpu_buf, cpu), _map->value_size);
204+
goto unlock;
205+
}
201206
size = round_up(_map->value_size, 8);
202207
for_each_possible_cpu(cpu) {
203208
bpf_long_memcpy(value + off,
204209
per_cpu_ptr(storage->percpu_buf, cpu), size);
205210
off += size;
206211
}
212+
unlock:
207213
rcu_read_unlock();
208214
return 0;
209215
}
@@ -213,10 +219,11 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
213219
{
214220
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
215221
struct bpf_cgroup_storage *storage;
216-
int cpu, off = 0;
222+
void *ptr;
217223
u32 size;
224+
int cpu;
218225

219-
if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
226+
if ((u32)map_flags & ~(BPF_ANY | BPF_EXIST | BPF_F_CPU | BPF_F_ALL_CPUS))
220227
return -EINVAL;
221228

222229
rcu_read_lock();
@@ -232,12 +239,18 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
232239
* returned or zeros which were zero-filled by percpu_alloc,
233240
* so no kernel data leaks possible
234241
*/
235-
size = round_up(_map->value_size, 8);
242+
size = (map_flags & (BPF_F_CPU | BPF_F_ALL_CPUS)) ? _map->value_size :
243+
round_up(_map->value_size, 8);
244+
if (map_flags & BPF_F_CPU) {
245+
cpu = map_flags >> 32;
246+
memcpy(per_cpu_ptr(storage->percpu_buf, cpu), value, size);
247+
goto unlock;
248+
}
236249
for_each_possible_cpu(cpu) {
237-
bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
238-
value + off, size);
239-
off += size;
250+
ptr = (map_flags & BPF_F_ALL_CPUS) ? value : value + size * cpu;
251+
memcpy(per_cpu_ptr(storage->percpu_buf, cpu), ptr, size);
240252
}
253+
unlock:
241254
rcu_read_unlock();
242255
return 0;
243256
}

kernel/bpf/syscall.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -320,7 +320,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
320320
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
321321
err = bpf_percpu_array_copy(map, key, value, flags);
322322
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
323-
err = bpf_percpu_cgroup_storage_copy(map, key, value);
323+
err = bpf_percpu_cgroup_storage_copy(map, key, value, flags);
324324
} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
325325
err = bpf_stackmap_extract(map, key, value, false);
326326
} else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {

0 commit comments

Comments
 (0)