Skip to content

Commit 885b92f

Browse files
AsphalttKernel Patches Daemon
authored andcommitted
bpf: Add BPF_F_CPU and BPF_F_ALL_CPUS flags support for percpu_hash and lru_percpu_hash maps
Introduce BPF_F_ALL_CPUS flag support for percpu_hash and lru_percpu_hash maps to allow updating values for all CPUs with a single value for both update_elem and update_batch APIs. Introduce BPF_F_CPU flag support for percpu_hash and lru_percpu_hash maps to allow: * update value for specified CPU for both update_elem and update_batch APIs. * lookup value for specified CPU for both lookup_elem and lookup_batch APIs. The BPF_F_CPU flag is passed via: * map_flags along with embedded cpu info. * elem_flags along with embedded cpu info. Signed-off-by: Leon Hwang <[email protected]>
1 parent 36aaf5b commit 885b92f

File tree

2 files changed

+33
-10
lines changed

2 files changed

+33
-10
lines changed

include/linux/bpf.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3756,6 +3756,8 @@ static inline bool bpf_map_supports_cpu_flags(enum bpf_map_type map_type)
37563756
{
37573757
switch (map_type) {
37583758
case BPF_MAP_TYPE_PERCPU_ARRAY:
3759+
case BPF_MAP_TYPE_PERCPU_HASH:
3760+
case BPF_MAP_TYPE_LRU_PERCPU_HASH:
37593761
return true;
37603762
default:
37613763
return false;

kernel/bpf/hashtab.c

Lines changed: 31 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1259,9 +1259,15 @@ static long htab_map_update_elem_in_place(struct bpf_map *map, void *key,
12591259
u32 key_size, hash;
12601260
int ret;
12611261

1262-
if (unlikely(map_flags > BPF_EXIST))
1263-
/* unknown flags */
1264-
return -EINVAL;
1262+
if (percpu) {
1263+
ret = bpf_map_check_cpu_flags(map_flags, true);
1264+
if (unlikely(ret))
1265+
return ret;
1266+
} else {
1267+
if (unlikely(map_flags > BPF_EXIST))
1268+
/* unknown flags */
1269+
return -EINVAL;
1270+
}
12651271

12661272
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
12671273
!rcu_read_lock_bh_held());
@@ -1322,9 +1328,9 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
13221328
u32 key_size, hash;
13231329
int ret;
13241330

1325-
if (unlikely(map_flags > BPF_EXIST))
1326-
/* unknown flags */
1327-
return -EINVAL;
1331+
ret = bpf_map_check_cpu_flags(map_flags, true);
1332+
if (unlikely(ret))
1333+
return ret;
13281334

13291335
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
13301336
!rcu_read_lock_bh_held());
@@ -1689,9 +1695,19 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
16891695
int ret = 0;
16901696

16911697
elem_map_flags = attr->batch.elem_flags;
1692-
if ((elem_map_flags & ~BPF_F_LOCK) ||
1693-
((elem_map_flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
1694-
return -EINVAL;
1698+
if (!do_delete && is_percpu) {
1699+
ret = bpf_map_check_lookup_batch_flags(map, elem_map_flags);
1700+
if (ret)
1701+
return ret;
1702+
ret = bpf_map_check_cpu_flags(elem_map_flags, false);
1703+
if (ret)
1704+
return ret;
1705+
} else {
1706+
if ((elem_map_flags & ~BPF_F_LOCK) ||
1707+
((elem_map_flags & BPF_F_LOCK) &&
1708+
!btf_record_has_field(map->record, BPF_SPIN_LOCK)))
1709+
return -EINVAL;
1710+
}
16951711

16961712
map_flags = attr->batch.flags;
16971713
if (map_flags)
@@ -2355,8 +2371,13 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value, u64 map_fl
23552371
{
23562372
struct htab_elem *l;
23572373
void __percpu *pptr;
2358-
int ret = -ENOENT;
23592374
u32 size;
2375+
int ret;
2376+
2377+
ret = bpf_map_check_cpu_flags(map_flags, false);
2378+
if (unlikely(ret))
2379+
return ret;
2380+
ret = -ENOENT;
23602381

23612382
/* per_cpu areas are zero-filled and bpf programs can only
23622383
* access 'value_size' of them, so copying rounded areas

0 commit comments

Comments
 (0)