@@ -131,12 +131,14 @@ bool bpf_map_write_active(const struct bpf_map *map)
131131 return atomic64_read (& map -> writecnt ) != 0 ;
132132}
133133
134- static u32 bpf_map_value_size (const struct bpf_map * map )
135- {
136- if (map -> map_type == BPF_MAP_TYPE_PERCPU_HASH ||
137- map -> map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
138- map -> map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
139- map -> map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE )
134+ static u32 bpf_map_value_size (const struct bpf_map * map , u64 flags )
135+ {
136+ if (flags & (BPF_F_CPU | BPF_F_ALL_CPUS ))
137+ return round_up (map -> value_size , 8 );
138+ else if (map -> map_type == BPF_MAP_TYPE_PERCPU_HASH ||
139+ map -> map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
140+ map -> map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
141+ map -> map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE )
140142 return round_up (map -> value_size , 8 ) * num_possible_cpus ();
141143 else if (IS_FD_MAP (map ))
142144 return sizeof (u32 );
@@ -1676,15 +1678,15 @@ static int map_lookup_elem(union bpf_attr *attr)
16761678 if (!(map_get_sys_perms (map , f ) & FMODE_CAN_READ ))
16771679 return - EPERM ;
16781680
1679- err = bpf_map_check_op_flags (map , attr -> flags , BPF_F_LOCK );
1681+ err = bpf_map_check_op_flags (map , attr -> flags , BPF_F_LOCK | BPF_F_CPU );
16801682 if (err )
16811683 return err ;
16821684
16831685 key = __bpf_copy_key (ukey , map -> key_size );
16841686 if (IS_ERR (key ))
16851687 return PTR_ERR (key );
16861688
1687- value_size = bpf_map_value_size (map );
1689+ value_size = bpf_map_value_size (map , attr -> flags );
16881690
16891691 err = - ENOMEM ;
16901692 value = kvmalloc (value_size , GFP_USER | __GFP_NOWARN );
@@ -1751,7 +1753,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
17511753 goto err_put ;
17521754 }
17531755
1754- value_size = bpf_map_value_size (map );
1756+ value_size = bpf_map_value_size (map , attr -> flags );
17551757 value = kvmemdup_bpfptr (uvalue , value_size );
17561758 if (IS_ERR (value )) {
17571759 err = PTR_ERR (value );
@@ -1947,11 +1949,12 @@ int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
19471949 void * key , * value ;
19481950 int err = 0 ;
19491951
1950- err = bpf_map_check_op_flags (map , attr -> batch .elem_flags , BPF_F_LOCK );
1952+ err = bpf_map_check_op_flags (map , attr -> batch .elem_flags ,
1953+ BPF_F_LOCK | BPF_F_CPU | BPF_F_ALL_CPUS );
19511954 if (err )
19521955 return err ;
19531956
1954- value_size = bpf_map_value_size (map );
1957+ value_size = bpf_map_value_size (map , attr -> batch . elem_flags );
19551958
19561959 max_count = attr -> batch .count ;
19571960 if (!max_count )
@@ -2006,11 +2009,11 @@ int generic_map_lookup_batch(struct bpf_map *map,
20062009 u32 value_size , cp , max_count ;
20072010 int err ;
20082011
2009- err = bpf_map_check_op_flags (map , attr -> batch .elem_flags , BPF_F_LOCK );
2012+ err = bpf_map_check_op_flags (map , attr -> batch .elem_flags , BPF_F_LOCK | BPF_F_CPU );
20102013 if (err )
20112014 return err ;
20122015
2013- value_size = bpf_map_value_size (map );
2016+ value_size = bpf_map_value_size (map , attr -> batch . elem_flags );
20142017
20152018 max_count = attr -> batch .count ;
20162019 if (!max_count )
@@ -2132,7 +2135,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
21322135 goto err_put ;
21332136 }
21342137
2135- value_size = bpf_map_value_size (map );
2138+ value_size = bpf_map_value_size (map , 0 );
21362139
21372140 err = - ENOMEM ;
21382141 value = kvmalloc (value_size , GFP_USER | __GFP_NOWARN );
0 commit comments