Skip to content

Commit 2f6815c

Browse files
AsphalttKernel Patches Daemon
authored andcommitted
bpf: Introduce BPF_F_CPU and BPF_F_ALL_CPUS flags for percpu_cgroup_storage maps
Introduce BPF_F_ALL_CPUS flag support for percpu_cgroup_storage maps to allow updating values for all CPUs with a single value for update_elem API. Introduce BPF_F_CPU flag support for percpu_cgroup_storage maps to allow: * update value for specified CPU for update_elem API. * lookup value for specified CPU for lookup_elem API. The BPF_F_CPU flag is passed via map_flags along with embedded cpu info. Signed-off-by: Leon Hwang <[email protected]>
1 parent d0d7f43 commit 2f6815c

File tree

4 files changed

+27
-23
lines changed

4 files changed

+27
-23
lines changed

include/linux/bpf-cgroup.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,8 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
172172
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
173173
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
174174

175-
int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
175+
int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value,
176+
u64 flags);
176177
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
177178
void *value, u64 flags);
178179

@@ -467,7 +468,7 @@ static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
467468
static inline void bpf_cgroup_storage_free(
468469
struct bpf_cgroup_storage *storage) {}
469470
static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
470-
void *value) {
471+
void *value, u64 flags) {
471472
return 0;
472473
}
473474
static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,

include/linux/bpf.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3765,6 +3765,7 @@ static inline bool bpf_map_supports_cpu_flags(enum bpf_map_type map_type)
37653765
case BPF_MAP_TYPE_PERCPU_ARRAY:
37663766
case BPF_MAP_TYPE_PERCPU_HASH:
37673767
case BPF_MAP_TYPE_LRU_PERCPU_HASH:
3768+
case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
37683769
return true;
37693770
default:
37703771
return false;

kernel/bpf/local_storage.c

Lines changed: 22 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -180,50 +180,55 @@ static long cgroup_storage_update_elem(struct bpf_map *map, void *key,
180180
}
181181

182182
int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
183-
void *value)
183+
void *value, u64 map_flags)
184184
{
185185
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
186186
struct bpf_cgroup_storage *storage;
187-
int cpu, off = 0;
188187
u32 size;
188+
int err;
189+
190+
err = bpf_map_check_cpu_flags(map_flags, false);
191+
if (err)
192+
return err;
189193

190194
rcu_read_lock();
191195
storage = cgroup_storage_lookup(map, key, false);
192196
if (!storage) {
193-
rcu_read_unlock();
194-
return -ENOENT;
197+
err = -ENOENT;
198+
goto unlock;
195199
}
196200

197201
/* per_cpu areas are zero-filled and bpf programs can only
198202
* access 'value_size' of them, so copying rounded areas
199203
* will not leak any kernel data
200204
*/
201205
size = round_up(_map->value_size, 8);
202-
for_each_possible_cpu(cpu) {
203-
bpf_long_memcpy(value + off,
204-
per_cpu_ptr(storage->percpu_buf, cpu), size);
205-
off += size;
206-
}
206+
bpf_percpu_copy_to_user(_map, storage->percpu_buf, value, size, map_flags);
207+
unlock:
207208
rcu_read_unlock();
208-
return 0;
209+
return err;
209210
}
210211

211212
int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
212213
void *value, u64 map_flags)
213214
{
214215
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
215216
struct bpf_cgroup_storage *storage;
216-
int cpu, off = 0;
217217
u32 size;
218+
int err;
218219

219-
if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
220+
if ((u32)map_flags & ~(BPF_ANY | BPF_EXIST | BPF_F_CPU | BPF_F_ALL_CPUS))
220221
return -EINVAL;
221222

223+
err = bpf_map_check_cpu_flags(map_flags, true);
224+
if (err)
225+
return err;
226+
222227
rcu_read_lock();
223228
storage = cgroup_storage_lookup(map, key, false);
224229
if (!storage) {
225-
rcu_read_unlock();
226-
return -ENOENT;
230+
err = -ENOENT;
231+
goto unlock;
227232
}
228233

229234
/* the user space will provide round_up(value_size, 8) bytes that
@@ -233,13 +238,10 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
233238
* so no kernel data leaks possible
234239
*/
235240
size = round_up(_map->value_size, 8);
236-
for_each_possible_cpu(cpu) {
237-
bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
238-
value + off, size);
239-
off += size;
240-
}
241+
bpf_percpu_copy_from_user(_map, storage->percpu_buf, value, size, map_flags);
242+
unlock:
241243
rcu_read_unlock();
242-
return 0;
244+
return err;
243245
}
244246

245247
static int cgroup_storage_get_next_key(struct bpf_map *_map, void *key,

kernel/bpf/syscall.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -318,7 +318,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
318318
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
319319
err = bpf_percpu_array_copy(map, key, value, flags);
320320
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
321-
err = bpf_percpu_cgroup_storage_copy(map, key, value);
321+
err = bpf_percpu_cgroup_storage_copy(map, key, value, flags);
322322
} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
323323
err = bpf_stackmap_copy(map, key, value);
324324
} else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {

0 commit comments

Comments
 (0)