11// SPDX-License-Identifier: GPL-2.0
22#include <test_progs.h>
3+ #include "cgroup_helpers.h"
34#include "percpu_alloc_array.skel.h"
45#include "percpu_alloc_cgrp_local_storage.skel.h"
56#include "percpu_alloc_fail.skel.h"
@@ -115,6 +116,231 @@ static void test_failure(void) {
115116 RUN_TESTS (percpu_alloc_fail );
116117}
117118
119+ static void test_percpu_map_op_cpu_flag (struct bpf_map * map , void * keys , size_t key_sz ,
120+ u32 max_entries , bool test_batch )
121+ {
122+ int i , j , cpu , map_fd , value_size , nr_cpus , err ;
123+ u64 * values = NULL , batch = 0 , flags ;
124+ const u64 value = 0xDEADC0DE ;
125+ size_t value_sz = sizeof (u64 );
126+ u32 count = max_entries ;
127+ LIBBPF_OPTS (bpf_map_batch_opts , batch_opts );
128+
129+ nr_cpus = libbpf_num_possible_cpus ();
130+ if (!ASSERT_GT (nr_cpus , 0 , "libbpf_num_possible_cpus" ))
131+ return ;
132+
133+ value_size = value_sz * nr_cpus ;
134+ values = calloc (max_entries , value_size );
135+ if (!ASSERT_OK_PTR (values , "calloc values" ))
136+ goto out ;
137+ memset (values , 0 , value_size * max_entries );
138+
139+ map_fd = bpf_map__fd (map );
140+ flags = BPF_F_CPU | BPF_F_ALL_CPUS ;
141+ err = bpf_map_lookup_elem_flags (map_fd , keys , values , flags );
142+ if (!ASSERT_ERR (err , "bpf_map_lookup_elem_flags err" ))
143+ goto out ;
144+
145+ err = bpf_map_update_elem (map_fd , keys , values , flags );
146+ if (!ASSERT_ERR (err , "bpf_map_update_elem err" ))
147+ goto out ;
148+
149+ flags = (u64 )nr_cpus << 32 | BPF_F_CPU ;
150+ err = bpf_map_update_elem (map_fd , keys , values , flags );
151+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_update_elem -ERANGE" ))
152+ goto out ;
153+
154+ err = bpf_map__update_elem (map , keys , key_sz , values , value_sz , flags );
155+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map__update_elem -ERANGE" ))
156+ goto out ;
157+
158+ err = bpf_map_lookup_elem_flags (map_fd , keys , values , flags );
159+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_lookup_elem_flags -ERANGE" ))
160+ goto out ;
161+
162+ err = bpf_map__lookup_elem (map , keys , key_sz , values , value_sz , flags );
163+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map__lookup_elem -ERANGE" ))
164+ goto out ;
165+
166+ for (cpu = 0 ; cpu < nr_cpus ; cpu ++ ) {
167+ /* clear value on all cpus */
168+ values [0 ] = 0 ;
169+ flags = BPF_F_ALL_CPUS ;
170+ for (i = 0 ; i < max_entries ; i ++ ) {
171+ err = bpf_map__update_elem (map , keys + i * key_sz , key_sz , values ,
172+ value_sz , flags );
173+ if (!ASSERT_OK (err , "bpf_map__update_elem all_cpus" ))
174+ goto out ;
175+ }
176+
177+ /* update value on specified cpu */
178+ for (i = 0 ; i < max_entries ; i ++ ) {
179+ values [0 ] = value ;
180+ flags = (u64 )cpu << 32 | BPF_F_CPU ;
181+ err = bpf_map__update_elem (map , keys + i * key_sz , key_sz , values ,
182+ value_sz , flags );
183+ if (!ASSERT_OK (err , "bpf_map__update_elem specified cpu" ))
184+ goto out ;
185+
186+ /* lookup then check value on CPUs */
187+ for (j = 0 ; j < nr_cpus ; j ++ ) {
188+ flags = (u64 )j << 32 | BPF_F_CPU ;
189+ err = bpf_map__lookup_elem (map , keys + i * key_sz , key_sz , values ,
190+ value_sz , flags );
191+ if (!ASSERT_OK (err , "bpf_map__lookup_elem specified cpu" ))
192+ goto out ;
193+ if (!ASSERT_EQ (values [0 ], j != cpu ? 0 : value ,
194+ "bpf_map__lookup_elem value on specified cpu" ))
195+ goto out ;
196+ }
197+ }
198+ }
199+
200+ if (!test_batch )
201+ goto out ;
202+
203+ batch_opts .elem_flags = (u64 )nr_cpus << 32 | BPF_F_CPU ;
204+ err = bpf_map_update_batch (map_fd , keys , values , & max_entries , & batch_opts );
205+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_update_batch -ERANGE" ))
206+ goto out ;
207+
208+ for (cpu = 0 ; cpu < nr_cpus ; cpu ++ ) {
209+ memset (values , 0 , max_entries * value_size );
210+
211+ /* clear values across all CPUs */
212+ batch_opts .elem_flags = BPF_F_ALL_CPUS ;
213+ err = bpf_map_update_batch (map_fd , keys , values , & max_entries , & batch_opts );
214+ if (!ASSERT_OK (err , "bpf_map_update_batch all_cpus" ))
215+ goto out ;
216+
217+ /* update values on specified CPU */
218+ for (i = 0 ; i < max_entries ; i ++ )
219+ values [i ] = value ;
220+
221+ batch_opts .elem_flags = (u64 )cpu << 32 | BPF_F_CPU ;
222+ err = bpf_map_update_batch (map_fd , keys , values , & max_entries , & batch_opts );
223+ if (!ASSERT_OK (err , "bpf_map_update_batch specified cpu" ))
224+ goto out ;
225+
226+ /* lookup values on specified CPU */
227+ memset (values , 0 , max_entries * value_sz );
228+ err = bpf_map_lookup_batch (map_fd , NULL , & batch , keys , values , & count , & batch_opts );
229+ if (!ASSERT_TRUE (!err || err == - ENOENT , "bpf_map_lookup_batch specified cpu" ))
230+ goto out ;
231+
232+ for (i = 0 ; i < max_entries ; i ++ )
233+ if (!ASSERT_EQ (values [i ], value ,
234+ "bpf_map_lookup_batch value on specified cpu" ))
235+ goto out ;
236+
237+ /* lookup values from all CPUs */
238+ batch_opts .elem_flags = 0 ;
239+ memset (values , 0 , max_entries * value_size );
240+ err = bpf_map_lookup_batch (map_fd , NULL , & batch , keys , values , & count , & batch_opts );
241+ if (!ASSERT_TRUE (!err || err == - ENOENT , "bpf_map_lookup_batch all_cpus" ))
242+ goto out ;
243+
244+ for (i = 0 ; i < max_entries ; i ++ ) {
245+ for (j = 0 ; j < nr_cpus ; j ++ ) {
246+ if (!ASSERT_EQ (values [i * nr_cpus + j ], j != cpu ? 0 : value ,
247+ "bpf_map_lookup_batch value all_cpus" ))
248+ goto out ;
249+ }
250+ }
251+ }
252+
253+ out :
254+ if (values )
255+ free (values );
256+ }
257+
258+ static void test_percpu_map_cpu_flag (enum bpf_map_type map_type )
259+ {
260+ struct percpu_alloc_array * skel ;
261+ size_t key_sz = sizeof (int );
262+ int * keys = NULL , i , err ;
263+ struct bpf_map * map ;
264+ u32 max_entries ;
265+
266+ skel = percpu_alloc_array__open ();
267+ if (!ASSERT_OK_PTR (skel , "percpu_alloc_array__open" ))
268+ return ;
269+
270+ map = skel -> maps .percpu ;
271+ bpf_map__set_type (map , map_type );
272+
273+ err = percpu_alloc_array__load (skel );
274+ if (!ASSERT_OK (err , "test_percpu_alloc__load" ))
275+ goto out ;
276+
277+ max_entries = bpf_map__max_entries (map );
278+ keys = calloc (max_entries , key_sz );
279+ if (!ASSERT_OK_PTR (keys , "calloc keys" ))
280+ goto out ;
281+
282+ for (i = 0 ; i < max_entries ; i ++ )
283+ keys [i ] = i ;
284+
285+ test_percpu_map_op_cpu_flag (map , keys , key_sz , max_entries , true);
286+ out :
287+ if (keys )
288+ free (keys );
289+ percpu_alloc_array__destroy (skel );
290+ }
291+
292+ static void test_percpu_array_cpu_flag (void )
293+ {
294+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_PERCPU_ARRAY );
295+ }
296+
297+ static void test_percpu_hash_cpu_flag (void )
298+ {
299+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_PERCPU_HASH );
300+ }
301+
302+ static void test_lru_percpu_hash_cpu_flag (void )
303+ {
304+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_LRU_PERCPU_HASH );
305+ }
306+
307+ static void test_percpu_cgroup_storage_cpu_flag (void )
308+ {
309+ struct bpf_cgroup_storage_key key ;
310+ struct percpu_alloc_array * skel ;
311+ int cgroup = -1 , prog_fd , err ;
312+ struct bpf_map * map ;
313+
314+ skel = percpu_alloc_array__open_and_load ();
315+ if (!ASSERT_OK_PTR (skel , "percpu_alloc_array__open_and_load" ))
316+ return ;
317+
318+ cgroup = create_and_get_cgroup ("/cg_percpu" );
319+ if (!ASSERT_GE (cgroup , 0 , "create_and_get_cgroup" ))
320+ goto out ;
321+
322+ err = join_cgroup ("/cg_percpu" );
323+ if (!ASSERT_OK (err , "join_cgroup" ))
324+ goto out ;
325+
326+ prog_fd = bpf_program__fd (skel -> progs .cgroup_egress );
327+ err = bpf_prog_attach (prog_fd , cgroup , BPF_CGROUP_INET_EGRESS , 0 );
328+ if (!ASSERT_OK (err , "bpf_prog_attach" ))
329+ goto out ;
330+
331+ map = skel -> maps .percpu_cgroup_storage ;
332+ err = bpf_map_get_next_key (bpf_map__fd (map ), NULL , & key );
333+ if (!ASSERT_OK (err , "bpf_map_get_next_key" ))
334+ goto out ;
335+
336+ test_percpu_map_op_cpu_flag (map , & key , sizeof (key ), 1 , false);
337+ out :
338+ bpf_prog_detach2 (-1 , cgroup , BPF_CGROUP_INET_EGRESS );
339+ close (cgroup );
340+ cleanup_cgroup_environment ();
341+ percpu_alloc_array__destroy (skel );
342+ }
343+
118344void test_percpu_alloc (void )
119345{
120346 if (test__start_subtest ("array" ))
@@ -125,4 +351,12 @@ void test_percpu_alloc(void)
125351 test_cgrp_local_storage ();
126352 if (test__start_subtest ("failure_tests" ))
127353 test_failure ();
354+ if (test__start_subtest ("cpu_flag_percpu_array" ))
355+ test_percpu_array_cpu_flag ();
356+ if (test__start_subtest ("cpu_flag_percpu_hash" ))
357+ test_percpu_hash_cpu_flag ();
358+ if (test__start_subtest ("cpu_flag_lru_percpu_hash" ))
359+ test_lru_percpu_hash_cpu_flag ();
360+ if (test__start_subtest ("cpu_flag_percpu_cgroup_storage" ))
361+ test_percpu_cgroup_storage_cpu_flag ();
128362}
0 commit comments