11// SPDX-License-Identifier: GPL-2.0
22#include <test_progs.h>
3+ #include "cgroup_helpers.h"
34#include "percpu_alloc_array.skel.h"
45#include "percpu_alloc_cgrp_local_storage.skel.h"
56#include "percpu_alloc_fail.skel.h"
@@ -115,6 +116,245 @@ static void test_failure(void) {
115116 RUN_TESTS (percpu_alloc_fail );
116117}
117118
119+ static void test_percpu_map_op_cpu_flag (struct bpf_map * map , void * keys , size_t key_sz ,
120+ u32 max_entries , bool test_batch )
121+ {
122+ size_t value_sz = sizeof (u32 ), value_sz_cpus , value_sz_total ;
123+ u32 * values = NULL , * values_percpu = NULL ;
124+ int i , j , cpu , map_fd , nr_cpus , err ;
125+ const u32 value = 0xDEADC0DE ;
126+ u32 count = max_entries , v ;
127+ u64 batch = 0 , flags ;
128+ void * values_row ;
129+ LIBBPF_OPTS (bpf_map_batch_opts , batch_opts );
130+
131+ nr_cpus = libbpf_num_possible_cpus ();
132+ if (!ASSERT_GT (nr_cpus , 0 , "libbpf_num_possible_cpus" ))
133+ return ;
134+
135+ value_sz_cpus = value_sz * nr_cpus ;
136+ values = calloc (max_entries , value_sz_cpus );
137+ if (!ASSERT_OK_PTR (values , "calloc values" ))
138+ return ;
139+
140+ values_percpu = calloc (max_entries , roundup (value_sz , 8 ) * nr_cpus );
141+ if (!ASSERT_OK_PTR (values_percpu , "calloc values_percpu" )) {
142+ free (values );
143+ return ;
144+ }
145+
146+ value_sz_total = value_sz_cpus * max_entries ;
147+ memset (values , 0 , value_sz_total );
148+
149+ map_fd = bpf_map__fd (map );
150+ flags = BPF_F_CPU | BPF_F_ALL_CPUS ;
151+ err = bpf_map_lookup_elem_flags (map_fd , keys , values , flags );
152+ if (!ASSERT_ERR (err , "bpf_map_lookup_elem_flags err" ))
153+ goto out ;
154+
155+ err = bpf_map_update_elem (map_fd , keys , values , flags );
156+ if (!ASSERT_ERR (err , "bpf_map_update_elem err" ))
157+ goto out ;
158+
159+ flags = (u64 )nr_cpus << 32 | BPF_F_CPU ;
160+ err = bpf_map_update_elem (map_fd , keys , values , flags );
161+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_update_elem -ERANGE" ))
162+ goto out ;
163+
164+ err = bpf_map__update_elem (map , keys , key_sz , values , value_sz , flags );
165+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map__update_elem -ERANGE" ))
166+ goto out ;
167+
168+ err = bpf_map_lookup_elem_flags (map_fd , keys , values , flags );
169+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_lookup_elem_flags -ERANGE" ))
170+ goto out ;
171+
172+ err = bpf_map__lookup_elem (map , keys , key_sz , values , value_sz , flags );
173+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map__lookup_elem -ERANGE" ))
174+ goto out ;
175+
176+ for (cpu = 0 ; cpu < nr_cpus ; cpu ++ ) {
177+ /* clear value on all cpus */
178+ values [0 ] = 0 ;
179+ flags = BPF_F_ALL_CPUS ;
180+ for (i = 0 ; i < max_entries ; i ++ ) {
181+ err = bpf_map__update_elem (map , keys + i * key_sz , key_sz , values ,
182+ value_sz , flags );
183+ if (!ASSERT_OK (err , "bpf_map__update_elem all_cpus" ))
184+ goto out ;
185+ }
186+
187+ /* update value on specified cpu */
188+ for (i = 0 ; i < max_entries ; i ++ ) {
189+ values [0 ] = value ;
190+ flags = (u64 )cpu << 32 | BPF_F_CPU ;
191+ err = bpf_map__update_elem (map , keys + i * key_sz , key_sz , values ,
192+ value_sz , flags );
193+ if (!ASSERT_OK (err , "bpf_map__update_elem specified cpu" ))
194+ goto out ;
195+
196+ /* lookup then check value on CPUs */
197+ for (j = 0 ; j < nr_cpus ; j ++ ) {
198+ flags = (u64 )j << 32 | BPF_F_CPU ;
199+ err = bpf_map__lookup_elem (map , keys + i * key_sz , key_sz , values ,
200+ value_sz , flags );
201+ if (!ASSERT_OK (err , "bpf_map__lookup_elem specified cpu" ))
202+ goto out ;
203+ if (!ASSERT_EQ (values [0 ], j != cpu ? 0 : value ,
204+ "bpf_map__lookup_elem value on specified cpu" ))
205+ goto out ;
206+ }
207+ }
208+ }
209+
210+ if (!test_batch )
211+ goto out ;
212+
213+ batch_opts .elem_flags = (u64 )nr_cpus << 32 | BPF_F_CPU ;
214+ err = bpf_map_update_batch (map_fd , keys , values , & max_entries , & batch_opts );
215+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_update_batch -ERANGE" ))
216+ goto out ;
217+
218+ for (cpu = 0 ; cpu < nr_cpus ; cpu ++ ) {
219+ memset (values , 0 , value_sz_total );
220+
221+ /* clear values across all CPUs */
222+ batch_opts .elem_flags = BPF_F_ALL_CPUS ;
223+ err = bpf_map_update_batch (map_fd , keys , values , & max_entries , & batch_opts );
224+ if (!ASSERT_OK (err , "bpf_map_update_batch all_cpus" ))
225+ goto out ;
226+
227+ /* update values on specified CPU */
228+ for (i = 0 ; i < max_entries ; i ++ )
229+ values [i ] = value ;
230+
231+ batch_opts .elem_flags = (u64 )cpu << 32 | BPF_F_CPU ;
232+ err = bpf_map_update_batch (map_fd , keys , values , & max_entries , & batch_opts );
233+ if (!ASSERT_OK (err , "bpf_map_update_batch specified cpu" ))
234+ goto out ;
235+
236+ /* lookup values on specified CPU */
237+ memset (values , 0 , max_entries * value_sz );
238+ err = bpf_map_lookup_batch (map_fd , NULL , & batch , keys , values , & count , & batch_opts );
239+ if (!ASSERT_TRUE (!err || err == - ENOENT , "bpf_map_lookup_batch specified cpu" ))
240+ goto out ;
241+
242+ for (i = 0 ; i < max_entries ; i ++ )
243+ if (!ASSERT_EQ (values [i ], value ,
244+ "bpf_map_lookup_batch value on specified cpu" ))
245+ goto out ;
246+
247+ /* lookup values from all CPUs */
248+ batch_opts .elem_flags = 0 ;
249+ memset (values_percpu , 0 , roundup (value_sz , 8 ) * nr_cpus * max_entries );
250+ err = bpf_map_lookup_batch (map_fd , NULL , & batch , keys , values_percpu , & count ,
251+ & batch_opts );
252+ if (!ASSERT_TRUE (!err || err == - ENOENT , "bpf_map_lookup_batch all_cpus" ))
253+ goto out ;
254+
255+ for (i = 0 ; i < max_entries ; i ++ ) {
256+ values_row = (void * ) values_percpu +
257+ roundup (value_sz , 8 ) * i * nr_cpus ;
258+ for (j = 0 ; j < nr_cpus ; j ++ ) {
259+ v = * (u32 * ) (values_row + roundup (value_sz , 8 ) * j );
260+ if (!ASSERT_EQ (v , j != cpu ? 0 : value ,
261+ "bpf_map_lookup_batch value all_cpus" ))
262+ goto out ;
263+ }
264+ }
265+ }
266+
267+ out :
268+ free (values_percpu );
269+ free (values );
270+ }
271+
272+ static void test_percpu_map_cpu_flag (enum bpf_map_type map_type )
273+ {
274+ struct percpu_alloc_array * skel ;
275+ size_t key_sz = sizeof (int );
276+ int * keys = NULL , i , err ;
277+ struct bpf_map * map ;
278+ u32 max_entries ;
279+
280+ skel = percpu_alloc_array__open ();
281+ if (!ASSERT_OK_PTR (skel , "percpu_alloc_array__open" ))
282+ return ;
283+
284+ map = skel -> maps .percpu ;
285+ bpf_map__set_type (map , map_type );
286+
287+ err = percpu_alloc_array__load (skel );
288+ if (!ASSERT_OK (err , "test_percpu_alloc__load" ))
289+ goto out ;
290+
291+ max_entries = bpf_map__max_entries (map );
292+ keys = calloc (max_entries , key_sz );
293+ if (!ASSERT_OK_PTR (keys , "calloc keys" ))
294+ goto out ;
295+
296+ for (i = 0 ; i < max_entries ; i ++ )
297+ keys [i ] = i ;
298+
299+ test_percpu_map_op_cpu_flag (map , keys , key_sz , max_entries , true);
300+ out :
301+ if (keys )
302+ free (keys );
303+ percpu_alloc_array__destroy (skel );
304+ }
305+
306+ static void test_percpu_array_cpu_flag (void )
307+ {
308+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_PERCPU_ARRAY );
309+ }
310+
311+ static void test_percpu_hash_cpu_flag (void )
312+ {
313+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_PERCPU_HASH );
314+ }
315+
316+ static void test_lru_percpu_hash_cpu_flag (void )
317+ {
318+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_LRU_PERCPU_HASH );
319+ }
320+
321+ static void test_percpu_cgroup_storage_cpu_flag (void )
322+ {
323+ struct bpf_cgroup_storage_key key ;
324+ struct percpu_alloc_array * skel ;
325+ int cgroup = -1 , prog_fd , err ;
326+ struct bpf_map * map ;
327+
328+ skel = percpu_alloc_array__open_and_load ();
329+ if (!ASSERT_OK_PTR (skel , "percpu_alloc_array__open_and_load" ))
330+ return ;
331+
332+ cgroup = create_and_get_cgroup ("/cg_percpu" );
333+ if (!ASSERT_GE (cgroup , 0 , "create_and_get_cgroup" ))
334+ goto out ;
335+
336+ err = join_cgroup ("/cg_percpu" );
337+ if (!ASSERT_OK (err , "join_cgroup" ))
338+ goto out ;
339+
340+ prog_fd = bpf_program__fd (skel -> progs .cgroup_egress );
341+ err = bpf_prog_attach (prog_fd , cgroup , BPF_CGROUP_INET_EGRESS , 0 );
342+ if (!ASSERT_OK (err , "bpf_prog_attach" ))
343+ goto out ;
344+
345+ map = skel -> maps .percpu_cgroup_storage ;
346+ err = bpf_map_get_next_key (bpf_map__fd (map ), NULL , & key );
347+ if (!ASSERT_OK (err , "bpf_map_get_next_key" ))
348+ goto out ;
349+
350+ test_percpu_map_op_cpu_flag (map , & key , sizeof (key ), 1 , false);
351+ out :
352+ bpf_prog_detach2 (-1 , cgroup , BPF_CGROUP_INET_EGRESS );
353+ close (cgroup );
354+ cleanup_cgroup_environment ();
355+ percpu_alloc_array__destroy (skel );
356+ }
357+
118358void test_percpu_alloc (void )
119359{
120360 if (test__start_subtest ("array" ))
@@ -125,4 +365,12 @@ void test_percpu_alloc(void)
125365 test_cgrp_local_storage ();
126366 if (test__start_subtest ("failure_tests" ))
127367 test_failure ();
368+ if (test__start_subtest ("cpu_flag_percpu_array" ))
369+ test_percpu_array_cpu_flag ();
370+ if (test__start_subtest ("cpu_flag_percpu_hash" ))
371+ test_percpu_hash_cpu_flag ();
372+ if (test__start_subtest ("cpu_flag_lru_percpu_hash" ))
373+ test_lru_percpu_hash_cpu_flag ();
374+ if (test__start_subtest ("cpu_flag_percpu_cgroup_storage" ))
375+ test_percpu_cgroup_storage_cpu_flag ();
128376}
0 commit comments