@@ -133,12 +133,14 @@ bool bpf_map_write_active(const struct bpf_map *map)
133133 return atomic64_read (& map -> writecnt ) != 0 ;
134134}
135135
136- static u32 bpf_map_value_size (const struct bpf_map * map )
137- {
138- if (map -> map_type == BPF_MAP_TYPE_PERCPU_HASH ||
139- map -> map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
140- map -> map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
141- map -> map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE )
136+ static u32 bpf_map_value_size (const struct bpf_map * map , u64 flags )
137+ {
138+ if (flags & (BPF_F_CPU | BPF_F_ALL_CPUS ))
139+ return map -> value_size ;
140+ else if (map -> map_type == BPF_MAP_TYPE_PERCPU_HASH ||
141+ map -> map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
142+ map -> map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
143+ map -> map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE )
142144 return round_up (map -> value_size , 8 ) * num_possible_cpus ();
143145 else if (IS_FD_MAP (map ))
144146 return sizeof (u32 );
@@ -1732,15 +1734,15 @@ static int map_lookup_elem(union bpf_attr *attr)
17321734 if (!(map_get_sys_perms (map , f ) & FMODE_CAN_READ ))
17331735 return - EPERM ;
17341736
1735- err = bpf_map_check_op_flags (map , attr -> flags , BPF_F_LOCK );
1737+ err = bpf_map_check_op_flags (map , attr -> flags , BPF_F_LOCK | BPF_F_CPU );
17361738 if (err )
17371739 return err ;
17381740
17391741 key = __bpf_copy_key (ukey , map -> key_size );
17401742 if (IS_ERR (key ))
17411743 return PTR_ERR (key );
17421744
1743- value_size = bpf_map_value_size (map );
1745+ value_size = bpf_map_value_size (map , attr -> flags );
17441746
17451747 err = - ENOMEM ;
17461748 value = kvmalloc (value_size , GFP_USER | __GFP_NOWARN );
@@ -1807,7 +1809,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
18071809 goto err_put ;
18081810 }
18091811
1810- value_size = bpf_map_value_size (map );
1812+ value_size = bpf_map_value_size (map , attr -> flags );
18111813 value = kvmemdup_bpfptr (uvalue , value_size );
18121814 if (IS_ERR (value )) {
18131815 err = PTR_ERR (value );
@@ -2003,11 +2005,12 @@ int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
20032005 void * key , * value ;
20042006 int err = 0 ;
20052007
2006- err = bpf_map_check_op_flags (map , attr -> batch .elem_flags , BPF_F_LOCK );
2008+ err = bpf_map_check_op_flags (map , attr -> batch .elem_flags ,
2009+ BPF_F_LOCK | BPF_F_CPU | BPF_F_ALL_CPUS );
20072010 if (err )
20082011 return err ;
20092012
2010- value_size = bpf_map_value_size (map );
2013+ value_size = bpf_map_value_size (map , attr -> batch . elem_flags );
20112014
20122015 max_count = attr -> batch .count ;
20132016 if (!max_count )
@@ -2062,11 +2065,11 @@ int generic_map_lookup_batch(struct bpf_map *map,
20622065 u32 value_size , cp , max_count ;
20632066 int err ;
20642067
2065- err = bpf_map_check_op_flags (map , attr -> batch .elem_flags , BPF_F_LOCK );
2068+ err = bpf_map_check_op_flags (map , attr -> batch .elem_flags , BPF_F_LOCK | BPF_F_CPU );
20662069 if (err )
20672070 return err ;
20682071
2069- value_size = bpf_map_value_size (map );
2072+ value_size = bpf_map_value_size (map , attr -> batch . elem_flags );
20702073
20712074 max_count = attr -> batch .count ;
20722075 if (!max_count )
@@ -2188,7 +2191,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
21882191 goto err_put ;
21892192 }
21902193
2191- value_size = bpf_map_value_size (map );
2194+ value_size = bpf_map_value_size (map , 0 );
21922195
21932196 err = - ENOMEM ;
21942197 value = kvmalloc (value_size , GFP_USER | __GFP_NOWARN );
0 commit comments