Skip to content

Commit ef9003e

Browse files
AsphalttKernel Patches Daemon
authored andcommitted
bpf: Introduce BPF_F_CPU flag for percpu_cgroup_storage maps
Introduce BPF_F_ALL_CPUS flag support for percpu_cgroup_storage maps to allow updating values for all CPUs with a single value. Introduce BPF_F_CPU flag support for percpu_cgroup_storage maps to allow updating value for specified CPU. This enhancement enables: * Efficient update values across all CPUs with a single value when BPF_F_ALL_CPUS is set for update_elem API. * Targeted update or lookup for a specified CPU when BPF_F_CPU is set. The BPF_F_CPU flag is passed via map_flags of lookup_elem and update_elem APIs along with embedded cpu field. Signed-off-by: Leon Hwang <[email protected]>
1 parent bf7854d commit ef9003e

File tree

4 files changed

+46
-23
lines changed

4 files changed

+46
-23
lines changed

include/linux/bpf-cgroup.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,8 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
172172
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
173173
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
174174

175-
int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
175+
int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value,
176+
u64 flags);
176177
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
177178
void *value, u64 flags);
178179

@@ -467,7 +468,7 @@ static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
467468
static inline void bpf_cgroup_storage_free(
468469
struct bpf_cgroup_storage *storage) {}
469470
static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
470-
void *value) {
471+
void *value, u64 flags) {
471472
return 0;
472473
}
473474
static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,

include/linux/bpf.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3733,12 +3733,13 @@ static inline int bpf_map_check_cpu_flags(u64 flags, bool check_all_cpus)
37333733
return 0;
37343734
}
37353735

3736-
static inline bool bpf_map_support_cpu_flags(enum bpf_map_type map_type)
3736+
static inline bool bpf_map_is_percpu(enum bpf_map_type map_type)
37373737
{
37383738
switch (map_type) {
37393739
case BPF_MAP_TYPE_PERCPU_ARRAY:
37403740
case BPF_MAP_TYPE_PERCPU_HASH:
37413741
case BPF_MAP_TYPE_LRU_PERCPU_HASH:
3742+
case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
37423743
return true;
37433744
default:
37443745
return false;
@@ -3756,7 +3757,7 @@ static inline int bpf_map_check_flags(struct bpf_map *map, u64 flags, bool check
37563757
if (!(flags & BPF_F_CPU) && flags >> 32)
37573758
return -EINVAL;
37583759

3759-
if ((flags & (BPF_F_CPU | BPF_F_ALL_CPUS)) && !bpf_map_support_cpu_flags(map->map_type))
3760+
if ((flags & (BPF_F_CPU | BPF_F_ALL_CPUS)) && !bpf_map_is_percpu(map->map_type))
37603761
return -EINVAL;
37613762

37623763
return 0;

kernel/bpf/local_storage.c

Lines changed: 36 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -180,50 +180,64 @@ static long cgroup_storage_update_elem(struct bpf_map *map, void *key,
180180
}
181181

182182
int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
183-
void *value)
183+
void *value, u64 map_flags)
184184
{
185185
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
186186
struct bpf_cgroup_storage *storage;
187-
int cpu, off = 0;
187+
int cpu, off = 0, err;
188188
u32 size;
189189

190+
err = bpf_map_check_cpu_flags(map_flags, false);
191+
if (err)
192+
return err;
193+
190194
rcu_read_lock();
191195
storage = cgroup_storage_lookup(map, key, false);
192196
if (!storage) {
193-
rcu_read_unlock();
194-
return -ENOENT;
197+
err = -ENOENT;
198+
goto unlock;
195199
}
196200

197201
/* per_cpu areas are zero-filled and bpf programs can only
198202
* access 'value_size' of them, so copying rounded areas
199203
* will not leak any kernel data
200204
*/
201205
size = round_up(_map->value_size, 8);
206+
if (map_flags & BPF_F_CPU) {
207+
cpu = map_flags >> 32;
208+
bpf_long_memcpy(value, per_cpu_ptr(storage->percpu_buf, cpu), size);
209+
goto unlock;
210+
}
202211
for_each_possible_cpu(cpu) {
203212
bpf_long_memcpy(value + off,
204213
per_cpu_ptr(storage->percpu_buf, cpu), size);
205214
off += size;
206215
}
216+
unlock:
207217
rcu_read_unlock();
208-
return 0;
218+
return err;
209219
}
210220

211221
int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
212222
void *value, u64 map_flags)
213223
{
214224
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
215225
struct bpf_cgroup_storage *storage;
216-
int cpu, off = 0;
226+
int cpu, off = 0, err;
217227
u32 size;
218228

219-
if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
229+
if ((u32)map_flags & ~(BPF_ANY | BPF_EXIST | BPF_F_CPU | BPF_F_ALL_CPUS))
220230
return -EINVAL;
221231

232+
err = bpf_map_check_cpu_flags(map_flags, true);
233+
if (err)
234+
return err;
235+
222236
rcu_read_lock();
223237
storage = cgroup_storage_lookup(map, key, false);
224238
if (!storage) {
225-
rcu_read_unlock();
226-
return -ENOENT;
239+
err = -ENOENT;
240+
goto unlock;
227241
}
228242

229243
/* the user space will provide round_up(value_size, 8) bytes that
@@ -233,13 +247,24 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
233247
* so no kernel data leaks possible
234248
*/
235249
size = round_up(_map->value_size, 8);
250+
if (map_flags & BPF_F_CPU) {
251+
cpu = map_flags >> 32;
252+
bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu), value, size);
253+
goto unlock;
254+
}
236255
for_each_possible_cpu(cpu) {
237256
bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
238257
value + off, size);
239-
off += size;
258+
/* same user-provided value is used if
259+
* BPF_F_ALL_CPUS is specified, otherwise value is
260+
* an array of per-cpu values.
261+
*/
262+
if (!(map_flags & BPF_F_ALL_CPUS))
263+
off += size;
240264
}
265+
unlock:
241266
rcu_read_unlock();
242-
return 0;
267+
return err;
243268
}
244269

245270
static int cgroup_storage_get_next_key(struct bpf_map *_map, void *key,

kernel/bpf/syscall.c

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -133,13 +133,9 @@ bool bpf_map_write_active(const struct bpf_map *map)
133133

134134
static u32 bpf_map_value_size(const struct bpf_map *map, u64 flags)
135135
{
136-
if (bpf_map_support_cpu_flags(map->map_type) && (flags & (BPF_F_CPU | BPF_F_ALL_CPUS)))
137-
return round_up(map->value_size, 8);
138-
else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
139-
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
140-
map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
141-
map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
142-
return round_up(map->value_size, 8) * num_possible_cpus();
136+
if (bpf_map_is_percpu(map->map_type))
137+
return flags & (BPF_F_CPU | BPF_F_ALL_CPUS) ? round_up(map->value_size, 8) :
138+
round_up(map->value_size, 8) * num_possible_cpus();
143139
else if (IS_FD_MAP(map))
144140
return sizeof(u32);
145141
else
@@ -318,7 +314,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
318314
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
319315
err = bpf_percpu_array_copy(map, key, value, flags);
320316
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
321-
err = bpf_percpu_cgroup_storage_copy(map, key, value);
317+
err = bpf_percpu_cgroup_storage_copy(map, key, value, flags);
322318
} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
323319
err = bpf_stackmap_copy(map, key, value);
324320
} else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {

0 commit comments

Comments
 (0)