11// SPDX-License-Identifier: GPL-2.0
22#include <test_progs.h>
3+ #include "cgroup_helpers.h"
34#include "percpu_alloc_array.skel.h"
45#include "percpu_alloc_cgrp_local_storage.skel.h"
56#include "percpu_alloc_fail.skel.h"
@@ -115,6 +116,328 @@ static void test_failure(void) {
115116 RUN_TESTS (percpu_alloc_fail );
116117}
117118
119+ static void test_percpu_map_op_cpu_flag (struct bpf_map * map , void * keys , size_t key_sz ,
120+ u32 max_entries , bool test_batch )
121+ {
122+ size_t value_sz = sizeof (u32 ), value_sz_total ;
123+ u32 * values = NULL , * values_percpu = NULL ;
124+ int i , j , cpu , map_fd , nr_cpus , err ;
125+ const u32 value = 0xDEADC0DE ;
126+ u64 batch = 0 , flags ;
127+ void * values_row ;
128+ u32 count , v ;
129+ LIBBPF_OPTS (bpf_map_batch_opts , batch_opts );
130+
131+ nr_cpus = libbpf_num_possible_cpus ();
132+ if (!ASSERT_GT (nr_cpus , 0 , "libbpf_num_possible_cpus" ))
133+ return ;
134+
135+ values = calloc (max_entries , value_sz * nr_cpus );
136+ if (!ASSERT_OK_PTR (values , "calloc values" ))
137+ return ;
138+
139+ values_percpu = calloc (max_entries , roundup (value_sz , 8 ) * nr_cpus );
140+ if (!ASSERT_OK_PTR (values_percpu , "calloc values_percpu" )) {
141+ free (values );
142+ return ;
143+ }
144+ memset (values_percpu , 0 , roundup (value_sz , 8 ) * nr_cpus * max_entries );
145+
146+ value_sz_total = value_sz * nr_cpus * max_entries ;
147+ memset (values , 0 , value_sz_total );
148+
149+ map_fd = bpf_map__fd (map );
150+ flags = BPF_F_CPU | BPF_F_ALL_CPUS ;
151+ err = bpf_map_lookup_elem_flags (map_fd , keys , values , flags );
152+ if (!ASSERT_ERR (err , "bpf_map_lookup_elem_flags cpu|all_cpus" ))
153+ goto out ;
154+
155+ err = bpf_map_update_elem (map_fd , keys , values , flags );
156+ if (!ASSERT_ERR (err , "bpf_map_update_elem cpu|all_cpus" ))
157+ goto out ;
158+
159+ flags = BPF_F_ALL_CPUS ;
160+ err = bpf_map_lookup_elem_flags (map_fd , keys , values , flags );
161+ if (!ASSERT_ERR (err , "bpf_map_lookup_elem_flags all_cpus" ))
162+ goto out ;
163+
164+ flags = BPF_F_LOCK | BPF_F_CPU ;
165+ err = bpf_map_lookup_elem_flags (map_fd , keys , values , flags );
166+ if (!ASSERT_ERR (err , "bpf_map_lookup_elem_flags BPF_F_LOCK" ))
167+ goto out ;
168+
169+ flags = BPF_F_LOCK | BPF_F_ALL_CPUS ;
170+ err = bpf_map_update_elem (map_fd , keys , values , flags );
171+ if (!ASSERT_ERR (err , "bpf_map_update_elem BPF_F_LOCK" ))
172+ goto out ;
173+
174+ flags = (u64 )nr_cpus << 32 | BPF_F_CPU ;
175+ err = bpf_map_update_elem (map_fd , keys , values , flags );
176+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_update_elem -ERANGE" ))
177+ goto out ;
178+
179+ err = bpf_map__update_elem (map , keys , key_sz , values , value_sz , flags );
180+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map__update_elem -ERANGE" ))
181+ goto out ;
182+
183+ err = bpf_map_lookup_elem_flags (map_fd , keys , values , flags );
184+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_lookup_elem_flags -ERANGE" ))
185+ goto out ;
186+
187+ err = bpf_map__lookup_elem (map , keys , key_sz , values , value_sz , flags );
188+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map__lookup_elem -ERANGE" ))
189+ goto out ;
190+
191+
192+ flags = BPF_ANY ;
193+ for (i = 0 ; i < max_entries ; i ++ ) {
194+ err = bpf_map__update_elem (map , keys + i * key_sz , key_sz , values_percpu ,
195+ roundup (value_sz , 8 ) * nr_cpus , flags );
196+ if (!ASSERT_OK (err , "bpf_map__update_elem init" ))
197+ goto out ;
198+ }
199+
200+ for (cpu = 0 ; cpu < nr_cpus ; cpu ++ ) {
201+ /* clear value on all cpus */
202+ values [0 ] = 0 ;
203+ flags = BPF_F_ALL_CPUS | BPF_EXIST ;
204+ for (i = 0 ; i < max_entries ; i ++ ) {
205+ err = bpf_map__update_elem (map , keys + i * key_sz , key_sz , values ,
206+ value_sz , flags );
207+ if (!ASSERT_OK (err , "bpf_map__update_elem all_cpus" ))
208+ goto out ;
209+ }
210+
211+ /* update value on specified cpu */
212+ for (i = 0 ; i < max_entries ; i ++ ) {
213+ values [0 ] = value ;
214+ flags = (u64 )cpu << 32 | BPF_F_CPU | BPF_EXIST ;
215+ err = bpf_map__update_elem (map , keys + i * key_sz , key_sz , values ,
216+ value_sz , flags );
217+ if (!ASSERT_OK (err , "bpf_map__update_elem specified cpu" ))
218+ goto out ;
219+
220+ /* lookup then check value on CPUs */
221+ for (j = 0 ; j < nr_cpus ; j ++ ) {
222+ flags = (u64 )j << 32 | BPF_F_CPU ;
223+ err = bpf_map__lookup_elem (map , keys + i * key_sz , key_sz , values ,
224+ value_sz , flags );
225+ if (!ASSERT_OK (err , "bpf_map__lookup_elem specified cpu" ))
226+ goto out ;
227+ if (!ASSERT_EQ (values [0 ], j != cpu ? 0 : value ,
228+ "bpf_map__lookup_elem value on specified cpu" ))
229+ goto out ;
230+ }
231+ }
232+ }
233+
234+ if (!test_batch )
235+ goto out ;
236+
237+ count = max_entries ;
238+ batch_opts .elem_flags = (u64 )nr_cpus << 32 | BPF_F_CPU ;
239+ err = bpf_map_update_batch (map_fd , keys , values , & count , & batch_opts );
240+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_update_batch -ERANGE" ))
241+ goto out ;
242+
243+ for (cpu = 0 ; cpu < nr_cpus ; cpu ++ ) {
244+ memset (values , 0 , value_sz_total );
245+
246+ /* clear values across all CPUs */
247+ count = max_entries ;
248+ batch_opts .elem_flags = BPF_F_ALL_CPUS | BPF_EXIST ;
249+ err = bpf_map_update_batch (map_fd , keys , values , & count , & batch_opts );
250+ if (!ASSERT_OK (err , "bpf_map_update_batch all_cpus" ))
251+ goto out ;
252+
253+ /* update values on specified CPU */
254+ for (i = 0 ; i < max_entries ; i ++ )
255+ values [i ] = value ;
256+
257+ count = max_entries ;
258+ batch_opts .elem_flags = (u64 )cpu << 32 | BPF_F_CPU | BPF_EXIST ;
259+ err = bpf_map_update_batch (map_fd , keys , values , & count , & batch_opts );
260+ if (!ASSERT_OK (err , "bpf_map_update_batch specified cpu" ))
261+ goto out ;
262+
263+ /* lookup values on specified CPU */
264+ count = max_entries ;
265+ memset (values , 0 , max_entries * value_sz );
266+ batch_opts .elem_flags = (u64 )cpu << 32 | BPF_F_CPU ;
267+ err = bpf_map_lookup_batch (map_fd , NULL , & batch , keys , values , & count , & batch_opts );
268+ if (!ASSERT_TRUE (!err || err == - ENOENT , "bpf_map_lookup_batch specified cpu" ))
269+ goto out ;
270+
271+ for (i = 0 ; i < max_entries ; i ++ )
272+ if (!ASSERT_EQ (values [i ], value ,
273+ "bpf_map_lookup_batch value on specified cpu" ))
274+ goto out ;
275+
276+ /* lookup values from all CPUs */
277+ batch = 0 ;
278+ count = max_entries ;
279+ batch_opts .elem_flags = 0 ;
280+ memset (values_percpu , 0 , roundup (value_sz , 8 ) * nr_cpus * max_entries );
281+ err = bpf_map_lookup_batch (map_fd , NULL , & batch , keys , values_percpu , & count ,
282+ & batch_opts );
283+ if (!ASSERT_TRUE (!err || err == - ENOENT , "bpf_map_lookup_batch all_cpus" ))
284+ goto out ;
285+
286+ for (i = 0 ; i < max_entries ; i ++ ) {
287+ values_row = (void * ) values_percpu +
288+ roundup (value_sz , 8 ) * i * nr_cpus ;
289+ for (j = 0 ; j < nr_cpus ; j ++ ) {
290+ v = * (u32 * ) (values_row + roundup (value_sz , 8 ) * j );
291+ if (!ASSERT_EQ (v , j != cpu ? 0 : value ,
292+ "bpf_map_lookup_batch value all_cpus" ))
293+ goto out ;
294+ }
295+ }
296+ }
297+
298+ out :
299+ free (values_percpu );
300+ free (values );
301+ }
302+
303+ static void test_percpu_map_cpu_flag (enum bpf_map_type map_type , u32 max_entries )
304+ {
305+ struct percpu_alloc_array * skel ;
306+ size_t key_sz = sizeof (int );
307+ struct bpf_map * map ;
308+ int * keys , i , err ;
309+
310+ keys = calloc (max_entries , key_sz );
311+ if (!ASSERT_OK_PTR (keys , "calloc keys" ))
312+ return ;
313+
314+ for (i = 0 ; i < max_entries ; i ++ )
315+ keys [i ] = i ;
316+
317+ skel = percpu_alloc_array__open ();
318+ if (!ASSERT_OK_PTR (skel , "percpu_alloc_array__open" )) {
319+ free (keys );
320+ return ;
321+ }
322+
323+ map = skel -> maps .percpu ;
324+ bpf_map__set_type (map , map_type );
325+ bpf_map__set_max_entries (map , max_entries );
326+
327+ err = percpu_alloc_array__load (skel );
328+ if (!ASSERT_OK (err , "test_percpu_alloc__load" ))
329+ goto out ;
330+
331+ test_percpu_map_op_cpu_flag (map , keys , key_sz , max_entries , true);
332+ out :
333+ percpu_alloc_array__destroy (skel );
334+ free (keys );
335+ }
336+
337+ static void test_percpu_array_cpu_flag (void )
338+ {
339+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_PERCPU_ARRAY , 2 );
340+ }
341+
342+ static void test_percpu_hash_cpu_flag (void )
343+ {
344+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_PERCPU_HASH , 2 );
345+ }
346+
347+ static void test_lru_percpu_hash_cpu_flag (void )
348+ {
349+ int nr_cpus = libbpf_num_possible_cpus ();
350+
351+ if (!ASSERT_GT (nr_cpus , 0 , "libbpf_num_possible_cpus" ))
352+ return ;
353+
354+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_LRU_PERCPU_HASH , nr_cpus );
355+ }
356+
357+ static void test_percpu_cgroup_storage_cpu_flag (void )
358+ {
359+ struct percpu_alloc_array * skel = NULL ;
360+ struct bpf_cgroup_storage_key key ;
361+ int cgroup , prog_fd , err ;
362+ struct bpf_map * map ;
363+
364+ err = setup_cgroup_environment ();
365+ if (!ASSERT_OK (err , "setup_cgroup_environment" ))
366+ return ;
367+
368+ cgroup = create_and_get_cgroup ("/cg_percpu" );
369+ if (!ASSERT_GE (cgroup , 0 , "create_and_get_cgroup" )) {
370+ cleanup_cgroup_environment ();
371+ return ;
372+ }
373+
374+ err = join_cgroup ("/cg_percpu" );
375+ if (!ASSERT_OK (err , "join_cgroup" ))
376+ goto out ;
377+
378+ skel = percpu_alloc_array__open_and_load ();
379+ if (!ASSERT_OK_PTR (skel , "percpu_alloc_array__open_and_load" ))
380+ goto out ;
381+
382+ prog_fd = bpf_program__fd (skel -> progs .cgroup_egress );
383+ err = bpf_prog_attach (prog_fd , cgroup , BPF_CGROUP_INET_EGRESS , 0 );
384+ if (!ASSERT_OK (err , "bpf_prog_attach" ))
385+ goto out ;
386+
387+ map = skel -> maps .percpu_cgroup_storage ;
388+ err = bpf_map_get_next_key (bpf_map__fd (map ), NULL , & key );
389+ if (!ASSERT_OK (err , "bpf_map_get_next_key" ))
390+ goto out ;
391+
392+ test_percpu_map_op_cpu_flag (map , & key , sizeof (key ), 1 , false);
393+ out :
394+ bpf_prog_detach2 (-1 , cgroup , BPF_CGROUP_INET_EGRESS );
395+ close (cgroup );
396+ cleanup_cgroup_environment ();
397+ percpu_alloc_array__destroy (skel );
398+ }
399+
400+ static void test_map_op_cpu_flag (enum bpf_map_type map_type )
401+ {
402+ u32 max_entries = 1 , count = max_entries ;
403+ u64 flags , batch = 0 , val = 0 ;
404+ int err , map_fd , key = 0 ;
405+ LIBBPF_OPTS (bpf_map_batch_opts , batch_opts );
406+
407+ map_fd = bpf_map_create (map_type , "test_cpu_flag" , sizeof (int ), sizeof (u64 ), max_entries ,
408+ NULL );
409+ if (!ASSERT_GE (map_fd , 0 , "bpf_map_create" ))
410+ return ;
411+
412+ flags = BPF_F_ALL_CPUS ;
413+ err = bpf_map_update_elem (map_fd , & key , & val , flags );
414+ ASSERT_ERR (err , "bpf_map_update_elem all_cpus" );
415+
416+ batch_opts .elem_flags = BPF_F_ALL_CPUS ;
417+ err = bpf_map_update_batch (map_fd , & key , & val , & count , & batch_opts );
418+ ASSERT_ERR (err , "bpf_map_update_batch all_cpus" );
419+
420+ flags = BPF_F_CPU ;
421+ err = bpf_map_lookup_elem_flags (map_fd , & key , & val , flags );
422+ ASSERT_ERR (err , "bpf_map_lookup_elem_flags cpu" );
423+
424+ batch_opts .elem_flags = BPF_F_CPU ;
425+ err = bpf_map_lookup_batch (map_fd , NULL , & batch , & key , & val , & count , & batch_opts );
426+ ASSERT_ERR (err , "bpf_map_lookup_batch cpu" );
427+
428+ close (map_fd );
429+ }
430+
431+ static void test_array_cpu_flag (void )
432+ {
433+ test_map_op_cpu_flag (BPF_MAP_TYPE_ARRAY );
434+ }
435+
436+ static void test_hash_cpu_flag (void )
437+ {
438+ test_map_op_cpu_flag (BPF_MAP_TYPE_HASH );
439+ }
440+
118441void test_percpu_alloc (void )
119442{
120443 if (test__start_subtest ("array" ))
@@ -125,4 +448,16 @@ void test_percpu_alloc(void)
125448 test_cgrp_local_storage ();
126449 if (test__start_subtest ("failure_tests" ))
127450 test_failure ();
451+ if (test__start_subtest ("cpu_flag_percpu_array" ))
452+ test_percpu_array_cpu_flag ();
453+ if (test__start_subtest ("cpu_flag_percpu_hash" ))
454+ test_percpu_hash_cpu_flag ();
455+ if (test__start_subtest ("cpu_flag_lru_percpu_hash" ))
456+ test_lru_percpu_hash_cpu_flag ();
457+ if (test__start_subtest ("cpu_flag_percpu_cgroup_storage" ))
458+ test_percpu_cgroup_storage_cpu_flag ();
459+ if (test__start_subtest ("cpu_flag_array" ))
460+ test_array_cpu_flag ();
461+ if (test__start_subtest ("cpu_flag_hash" ))
462+ test_hash_cpu_flag ();
128463}
0 commit comments