11// SPDX-License-Identifier: GPL-2.0
22#include <test_progs.h>
3+ #include "cgroup_helpers.h"
34#include "percpu_alloc_array.skel.h"
45#include "percpu_alloc_cgrp_local_storage.skel.h"
56#include "percpu_alloc_fail.skel.h"
@@ -115,6 +116,321 @@ static void test_failure(void) {
115116 RUN_TESTS (percpu_alloc_fail );
116117}
117118
119+ static void test_percpu_map_op_cpu_flag (struct bpf_map * map , void * keys , size_t key_sz , u32 entries ,
120+ int nr_cpus , bool test_batch )
121+ {
122+ size_t value_sz = sizeof (u32 ), value_sz_cpus , value_sz_total ;
123+ u32 * values = NULL , * values_percpu = NULL ;
124+ const u32 value = 0xDEADC0DE ;
125+ int i , j , cpu , map_fd , err ;
126+ u64 batch = 0 , flags ;
127+ void * values_row ;
128+ u32 count , v ;
129+ LIBBPF_OPTS (bpf_map_batch_opts , batch_opts );
130+
131+ value_sz_cpus = value_sz * nr_cpus ;
132+ values = calloc (entries , value_sz_cpus );
133+ if (!ASSERT_OK_PTR (values , "calloc values" ))
134+ return ;
135+
136+ values_percpu = calloc (entries , roundup (value_sz , 8 ) * nr_cpus );
137+ if (!ASSERT_OK_PTR (values_percpu , "calloc values_percpu" )) {
138+ free (values );
139+ return ;
140+ }
141+
142+ value_sz_total = value_sz_cpus * entries ;
143+ memset (values , 0 , value_sz_total );
144+
145+ map_fd = bpf_map__fd (map );
146+ flags = BPF_F_CPU | BPF_F_ALL_CPUS ;
147+ err = bpf_map_lookup_elem_flags (map_fd , keys , values , flags );
148+ if (!ASSERT_ERR (err , "bpf_map_lookup_elem_flags cpu|all_cpus" ))
149+ goto out ;
150+
151+ err = bpf_map_update_elem (map_fd , keys , values , flags );
152+ if (!ASSERT_ERR (err , "bpf_map_update_elem cpu|all_cpus" ))
153+ goto out ;
154+
155+ flags = BPF_F_ALL_CPUS ;
156+ err = bpf_map_lookup_elem_flags (map_fd , keys , values , flags );
157+ if (!ASSERT_ERR (err , "bpf_map_lookup_elem_flags all_cpus" ))
158+ goto out ;
159+
160+ flags = BPF_F_LOCK | BPF_F_CPU ;
161+ err = bpf_map_lookup_elem_flags (map_fd , keys , values , flags );
162+ if (!ASSERT_ERR (err , "bpf_map_lookup_elem_flags BPF_F_LOCK" ))
163+ goto out ;
164+
165+ flags = BPF_F_LOCK | BPF_F_ALL_CPUS ;
166+ err = bpf_map_update_elem (map_fd , keys , values , flags );
167+ if (!ASSERT_ERR (err , "bpf_map_update_elem BPF_F_LOCK" ))
168+ goto out ;
169+
170+ flags = (u64 )nr_cpus << 32 | BPF_F_CPU ;
171+ err = bpf_map_update_elem (map_fd , keys , values , flags );
172+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_update_elem -ERANGE" ))
173+ goto out ;
174+
175+ err = bpf_map__update_elem (map , keys , key_sz , values , value_sz , flags );
176+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map__update_elem -ERANGE" ))
177+ goto out ;
178+
179+ err = bpf_map_lookup_elem_flags (map_fd , keys , values , flags );
180+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_lookup_elem_flags -ERANGE" ))
181+ goto out ;
182+
183+ err = bpf_map__lookup_elem (map , keys , key_sz , values , value_sz , flags );
184+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map__lookup_elem -ERANGE" ))
185+ goto out ;
186+
187+ for (cpu = 0 ; cpu < nr_cpus ; cpu ++ ) {
188+ /* clear value on all cpus */
189+ values [0 ] = 0 ;
190+ flags = BPF_F_ALL_CPUS ;
191+ for (i = 0 ; i < entries ; i ++ ) {
192+ err = bpf_map__update_elem (map , keys + i * key_sz , key_sz , values ,
193+ value_sz , flags );
194+ if (!ASSERT_OK (err , "bpf_map__update_elem all_cpus" ))
195+ goto out ;
196+ }
197+
198+ /* update value on specified cpu */
199+ for (i = 0 ; i < entries ; i ++ ) {
200+ values [0 ] = value ;
201+ flags = (u64 )cpu << 32 | BPF_F_CPU ;
202+ err = bpf_map__update_elem (map , keys + i * key_sz , key_sz , values ,
203+ value_sz , flags );
204+ if (!ASSERT_OK (err , "bpf_map__update_elem specified cpu" ))
205+ goto out ;
206+
207+ /* lookup then check value on CPUs */
208+ for (j = 0 ; j < nr_cpus ; j ++ ) {
209+ flags = (u64 )j << 32 | BPF_F_CPU ;
210+ err = bpf_map__lookup_elem (map , keys + i * key_sz , key_sz , values ,
211+ value_sz , flags );
212+ if (!ASSERT_OK (err , "bpf_map__lookup_elem specified cpu" ))
213+ goto out ;
214+ if (!ASSERT_EQ (values [0 ], j != cpu ? 0 : value ,
215+ "bpf_map__lookup_elem value on specified cpu" ))
216+ goto out ;
217+ }
218+ }
219+ }
220+
221+ if (!test_batch )
222+ goto out ;
223+
224+ count = entries ;
225+ batch_opts .elem_flags = (u64 )nr_cpus << 32 | BPF_F_CPU ;
226+ err = bpf_map_update_batch (map_fd , keys , values , & count , & batch_opts );
227+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_update_batch -ERANGE" ))
228+ goto out ;
229+
230+ for (cpu = 0 ; cpu < nr_cpus ; cpu ++ ) {
231+ memset (values , 0 , value_sz_total );
232+
233+ /* clear values across all CPUs */
234+ count = entries ;
235+ batch_opts .elem_flags = BPF_F_ALL_CPUS ;
236+ err = bpf_map_update_batch (map_fd , keys , values , & count , & batch_opts );
237+ if (!ASSERT_OK (err , "bpf_map_update_batch all_cpus" ))
238+ goto out ;
239+
240+ /* update values on specified CPU */
241+ for (i = 0 ; i < entries ; i ++ )
242+ values [i ] = value ;
243+
244+ count = entries ;
245+ batch_opts .elem_flags = (u64 )cpu << 32 | BPF_F_CPU ;
246+ err = bpf_map_update_batch (map_fd , keys , values , & count , & batch_opts );
247+ if (!ASSERT_OK (err , "bpf_map_update_batch specified cpu" ))
248+ goto out ;
249+
250+ /* lookup values on specified CPU */
251+ batch = 0 ;
252+ count = entries ;
253+ memset (values , 0 , entries * value_sz );
254+ err = bpf_map_lookup_batch (map_fd , NULL , & batch , keys , values , & count , & batch_opts );
255+ if (!ASSERT_TRUE (!err || err == - ENOENT , "bpf_map_lookup_batch specified cpu" ))
256+ goto out ;
257+
258+ for (i = 0 ; i < entries ; i ++ )
259+ if (!ASSERT_EQ (values [i ], value ,
260+ "bpf_map_lookup_batch value on specified cpu" ))
261+ goto out ;
262+
263+ /* lookup values from all CPUs */
264+ batch = 0 ;
265+ count = entries ;
266+ batch_opts .elem_flags = 0 ;
267+ memset (values_percpu , 0 , roundup (value_sz , 8 ) * nr_cpus * entries );
268+ err = bpf_map_lookup_batch (map_fd , NULL , & batch , keys , values_percpu , & count ,
269+ & batch_opts );
270+ if (!ASSERT_TRUE (!err || err == - ENOENT , "bpf_map_lookup_batch all_cpus" ))
271+ goto out ;
272+
273+ for (i = 0 ; i < entries ; i ++ ) {
274+ values_row = (void * ) values_percpu +
275+ roundup (value_sz , 8 ) * i * nr_cpus ;
276+ for (j = 0 ; j < nr_cpus ; j ++ ) {
277+ v = * (u32 * ) (values_row + roundup (value_sz , 8 ) * j );
278+ if (!ASSERT_EQ (v , j != cpu ? 0 : value ,
279+ "bpf_map_lookup_batch value all_cpus" ))
280+ goto out ;
281+ }
282+ }
283+ }
284+
285+ out :
286+ free (values_percpu );
287+ free (values );
288+ }
289+
290+
291+ static void test_percpu_map_cpu_flag (enum bpf_map_type map_type )
292+ {
293+ struct percpu_alloc_array * skel ;
294+ size_t key_sz = sizeof (int );
295+ int * keys , nr_cpus , i , err ;
296+ struct bpf_map * map ;
297+ u32 max_entries ;
298+
299+ nr_cpus = libbpf_num_possible_cpus ();
300+ if (!ASSERT_GT (nr_cpus , 0 , "libbpf_num_possible_cpus" ))
301+ return ;
302+
303+ max_entries = nr_cpus + 1 ;
304+ keys = calloc (max_entries , key_sz );
305+ if (!ASSERT_OK_PTR (keys , "calloc keys" ))
306+ return ;
307+
308+ for (i = 0 ; i < max_entries ; i ++ )
309+ keys [i ] = i ;
310+
311+ skel = percpu_alloc_array__open ();
312+ if (!ASSERT_OK_PTR (skel , "percpu_alloc_array__open" )) {
313+ free (keys );
314+ return ;
315+ }
316+
317+ map = skel -> maps .percpu ;
318+ bpf_map__set_type (map , map_type );
319+ bpf_map__set_max_entries (map , max_entries );
320+
321+ err = percpu_alloc_array__load (skel );
322+ if (!ASSERT_OK (err , "test_percpu_alloc__load" ))
323+ goto out ;
324+
325+ test_percpu_map_op_cpu_flag (map , keys , key_sz , max_entries - 1 , nr_cpus , true);
326+ out :
327+ percpu_alloc_array__destroy (skel );
328+ free (keys );
329+ }
330+
331+ static void test_percpu_array_cpu_flag (void )
332+ {
333+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_PERCPU_ARRAY );
334+ }
335+
336+ static void test_percpu_hash_cpu_flag (void )
337+ {
338+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_PERCPU_HASH );
339+ }
340+
341+ static void test_lru_percpu_hash_cpu_flag (void )
342+ {
343+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_LRU_PERCPU_HASH );
344+ }
345+
346+ static void test_percpu_cgroup_storage_cpu_flag (void )
347+ {
348+ struct percpu_alloc_array * skel = NULL ;
349+ struct bpf_cgroup_storage_key key ;
350+ int cgroup , prog_fd , nr_cpus , err ;
351+ struct bpf_map * map ;
352+
353+ nr_cpus = libbpf_num_possible_cpus ();
354+ if (!ASSERT_GT (nr_cpus , 0 , "libbpf_num_possible_cpus" ))
355+ return ;
356+
357+ err = setup_cgroup_environment ();
358+ if (!ASSERT_OK (err , "setup_cgroup_environment" ))
359+ return ;
360+
361+ cgroup = create_and_get_cgroup ("/cg_percpu" );
362+ if (!ASSERT_GE (cgroup , 0 , "create_and_get_cgroup" )) {
363+ cleanup_cgroup_environment ();
364+ return ;
365+ }
366+
367+ err = join_cgroup ("/cg_percpu" );
368+ if (!ASSERT_OK (err , "join_cgroup" ))
369+ goto out ;
370+
371+ skel = percpu_alloc_array__open_and_load ();
372+ if (!ASSERT_OK_PTR (skel , "percpu_alloc_array__open_and_load" ))
373+ goto out ;
374+
375+ prog_fd = bpf_program__fd (skel -> progs .cgroup_egress );
376+ err = bpf_prog_attach (prog_fd , cgroup , BPF_CGROUP_INET_EGRESS , 0 );
377+ if (!ASSERT_OK (err , "bpf_prog_attach" ))
378+ goto out ;
379+
380+ map = skel -> maps .percpu_cgroup_storage ;
381+ err = bpf_map_get_next_key (bpf_map__fd (map ), NULL , & key );
382+ if (!ASSERT_OK (err , "bpf_map_get_next_key" ))
383+ goto out ;
384+
385+ test_percpu_map_op_cpu_flag (map , & key , sizeof (key ), 1 , nr_cpus , false);
386+ out :
387+ bpf_prog_detach2 (-1 , cgroup , BPF_CGROUP_INET_EGRESS );
388+ close (cgroup );
389+ cleanup_cgroup_environment ();
390+ percpu_alloc_array__destroy (skel );
391+ }
392+
393+ static void test_map_op_cpu_flag (enum bpf_map_type map_type )
394+ {
395+ u32 max_entries = 1 , count = max_entries ;
396+ u64 flags , batch = 0 , val = 0 ;
397+ int err , map_fd , key = 0 ;
398+ LIBBPF_OPTS (bpf_map_batch_opts , batch_opts );
399+
400+ map_fd = bpf_map_create (map_type , "test_cpu_flag" , sizeof (int ), sizeof (u64 ), max_entries ,
401+ NULL );
402+ if (!ASSERT_GE (map_fd , 0 , "bpf_map_create" ))
403+ return ;
404+
405+ flags = BPF_F_ALL_CPUS ;
406+ err = bpf_map_update_elem (map_fd , & key , & val , flags );
407+ ASSERT_ERR (err , "bpf_map_update_elem all_cpus" );
408+
409+ batch_opts .elem_flags = BPF_F_ALL_CPUS ;
410+ err = bpf_map_update_batch (map_fd , & key , & val , & count , & batch_opts );
411+ ASSERT_ERR (err , "bpf_map_update_batch all_cpus" );
412+
413+ flags = BPF_F_CPU ;
414+ err = bpf_map_lookup_elem_flags (map_fd , & key , & val , flags );
415+ ASSERT_ERR (err , "bpf_map_lookup_elem_flags cpu" );
416+
417+ batch_opts .elem_flags = BPF_F_CPU ;
418+ err = bpf_map_lookup_batch (map_fd , NULL , & batch , & key , & val , & count , & batch_opts );
419+ ASSERT_ERR (err , "bpf_map_lookup_batch cpu" );
420+
421+ close (map_fd );
422+ }
423+
424+ static void test_array_cpu_flag (void )
425+ {
426+ test_map_op_cpu_flag (BPF_MAP_TYPE_ARRAY );
427+ }
428+
429+ static void test_hash_cpu_flag (void )
430+ {
431+ test_map_op_cpu_flag (BPF_MAP_TYPE_HASH );
432+ }
433+
118434void test_percpu_alloc (void )
119435{
120436 if (test__start_subtest ("array" ))
@@ -125,4 +441,16 @@ void test_percpu_alloc(void)
125441 test_cgrp_local_storage ();
126442 if (test__start_subtest ("failure_tests" ))
127443 test_failure ();
444+ if (test__start_subtest ("cpu_flag_percpu_array" ))
445+ test_percpu_array_cpu_flag ();
446+ if (test__start_subtest ("cpu_flag_percpu_hash" ))
447+ test_percpu_hash_cpu_flag ();
448+ if (test__start_subtest ("cpu_flag_lru_percpu_hash" ))
449+ test_lru_percpu_hash_cpu_flag ();
450+ if (test__start_subtest ("cpu_flag_percpu_cgroup_storage" ))
451+ test_percpu_cgroup_storage_cpu_flag ();
452+ if (test__start_subtest ("cpu_flag_array" ))
453+ test_array_cpu_flag ();
454+ if (test__start_subtest ("cpu_flag_hash" ))
455+ test_hash_cpu_flag ();
128456}
0 commit comments