11// SPDX-License-Identifier: GPL-2.0
22#include <test_progs.h>
3+ #include "cgroup_helpers.h"
34#include "percpu_alloc_array.skel.h"
45#include "percpu_alloc_cgrp_local_storage.skel.h"
56#include "percpu_alloc_fail.skel.h"
@@ -115,6 +116,230 @@ static void test_failure(void) {
115116 RUN_TESTS (percpu_alloc_fail );
116117}
117118
119+ static void test_percpu_map_op_cpu_flag (struct bpf_map * map , void * keys , size_t key_sz ,
120+ u32 max_entries , bool test_batch )
121+ {
122+ int i , j , cpu , map_fd , value_size , nr_cpus , err ;
123+ u64 * values = NULL , batch = 0 , flags ;
124+ const u64 value = 0xDEADC0DE ;
125+ size_t value_sz = sizeof (u64 );
126+ u32 count ;
127+ LIBBPF_OPTS (bpf_map_batch_opts , batch_opts );
128+
129+ nr_cpus = libbpf_num_possible_cpus ();
130+ if (!ASSERT_GT (nr_cpus , 0 , "libbpf_num_possible_cpus" ))
131+ return ;
132+
133+ value_size = value_sz * nr_cpus ;
134+ values = calloc (max_entries , value_size );
135+ if (!ASSERT_OK_PTR (values , "calloc values" ))
136+ goto out ;
137+ memset (values , 0 , value_size * max_entries );
138+
139+ map_fd = bpf_map__fd (map );
140+ flags = BPF_F_CPU | BPF_F_ALL_CPUS ;
141+ err = bpf_map_lookup_elem_flags (map_fd , keys , values , flags );
142+ if (!ASSERT_ERR (err , "bpf_map_lookup_elem_flags err" ))
143+ goto out ;
144+
145+ err = bpf_map_update_elem (map_fd , keys , values , flags );
146+ if (!ASSERT_ERR (err , "bpf_map_update_elem err" ))
147+ goto out ;
148+
149+ flags = (u64 )nr_cpus << 32 | BPF_F_CPU ;
150+ err = bpf_map_update_elem (map_fd , keys , values , flags );
151+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_update_elem -ERANGE" ))
152+ goto out ;
153+
154+ err = bpf_map__update_elem (map , keys , key_sz , values , value_sz , flags );
155+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map__update_elem -ERANGE" ))
156+ goto out ;
157+
158+ err = bpf_map_lookup_elem_flags (map_fd , keys , values , flags );
159+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_lookup_elem_flags -ERANGE" ))
160+ goto out ;
161+
162+ err = bpf_map__lookup_elem (map , keys , key_sz , values , value_sz , flags );
163+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map__lookup_elem -ERANGE" ))
164+ goto out ;
165+
166+ for (cpu = 0 ; cpu < nr_cpus ; cpu ++ ) {
167+ /* clear value on all cpus */
168+ values [0 ] = 0 ;
169+ flags = BPF_F_ALL_CPUS ;
170+ for (i = 0 ; i < max_entries ; i ++ ) {
171+ err = bpf_map__update_elem (map , keys + i * key_sz , key_sz , values ,
172+ value_sz , flags );
173+ if (!ASSERT_OK (err , "bpf_map__update_elem all_cpus" ))
174+ goto out ;
175+ }
176+
177+ /* update value on specified cpu */
178+ for (i = 0 ; i < max_entries ; i ++ ) {
179+ values [0 ] = value ;
180+ flags = (u64 )cpu << 32 | BPF_F_CPU ;
181+ err = bpf_map__update_elem (map , keys + i * key_sz , key_sz , values ,
182+ value_sz , flags );
183+ if (!ASSERT_OK (err , "bpf_map__update_elem specified cpu" ))
184+ goto out ;
185+
186+ /* lookup then check value on CPUs */
187+ for (j = 0 ; j < nr_cpus ; j ++ ) {
188+ flags = (u64 )j << 32 | BPF_F_CPU ;
189+ err = bpf_map__lookup_elem (map , keys + i * key_sz , key_sz , values ,
190+ value_sz , flags );
191+ if (!ASSERT_OK (err , "bpf_map__lookup_elem specified cpu" ))
192+ goto out ;
193+ if (!ASSERT_EQ (values [0 ], j != cpu ? 0 : value ,
194+ "bpf_map__lookup_elem value on specified cpu" ))
195+ goto out ;
196+ }
197+ }
198+ }
199+
200+ if (!test_batch )
201+ goto out ;
202+
203+ batch_opts .elem_flags = (u64 )nr_cpus << 32 | BPF_F_CPU ;
204+ err = bpf_map_update_batch (map_fd , keys , values , & max_entries , & batch_opts );
205+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_update_batch -ERANGE" ))
206+ goto out ;
207+
208+ for (cpu = 0 ; cpu < nr_cpus ; cpu ++ ) {
209+ memset (values , 0 , max_entries * value_size );
210+
211+ /* clear values across all CPUs */
212+ batch_opts .elem_flags = BPF_F_ALL_CPUS ;
213+ err = bpf_map_update_batch (map_fd , keys , values , & max_entries , & batch_opts );
214+ if (!ASSERT_OK (err , "bpf_map_update_batch all_cpus" ))
215+ goto out ;
216+
217+ /* update values on specified CPU */
218+ for (i = 0 ; i < max_entries ; i ++ )
219+ values [i ] = value ;
220+
221+ batch_opts .elem_flags = (u64 )cpu << 32 | BPF_F_CPU ;
222+ err = bpf_map_update_batch (map_fd , keys , values , & max_entries , & batch_opts );
223+ if (!ASSERT_OK (err , "bpf_map_update_batch specified cpu" ))
224+ goto out ;
225+
226+ /* lookup values on specified CPU */
227+ memset (values , 0 , max_entries * value_sz );
228+ err = bpf_map_lookup_batch (map_fd , NULL , & batch , keys , values , & count , & batch_opts );
229+ if (!ASSERT_TRUE (!err || err == - ENOENT , "bpf_map_lookup_batch specified cpu" ))
230+ goto out ;
231+
232+ for (i = 0 ; i < max_entries ; i ++ )
233+ if (!ASSERT_EQ (values [i ], value , "value on specified cpu" ))
234+ goto out ;
235+
236+ /* lookup values from all CPUs */
237+ batch_opts .elem_flags = 0 ;
238+ memset (values , 0 , max_entries * value_size );
239+ err = bpf_map_lookup_batch (map_fd , NULL , & batch , keys , values , & count , & batch_opts );
240+ if (!ASSERT_TRUE (!err || err == - ENOENT , "bpf_map_lookup_batch all_cpus" ))
241+ goto out ;
242+
243+ for (i = 0 ; i < max_entries ; i ++ ) {
244+ for (j = 0 ; j < nr_cpus ; j ++ ) {
245+ if (!ASSERT_EQ (values [i * nr_cpus + j ], j != cpu ? 0 : value ,
246+ "value on specified cpu" ))
247+ goto out ;
248+ }
249+ }
250+ }
251+
252+ out :
253+ if (values )
254+ free (values );
255+ }
256+
257+ static void test_percpu_map_cpu_flag (enum bpf_map_type map_type )
258+ {
259+ struct percpu_alloc_array * skel ;
260+ size_t key_sz = sizeof (int );
261+ int * keys = NULL , i , err ;
262+ struct bpf_map * map ;
263+ u32 max_entries ;
264+
265+ skel = percpu_alloc_array__open ();
266+ if (!ASSERT_OK_PTR (skel , "percpu_alloc_array__open" ))
267+ return ;
268+
269+ map = skel -> maps .percpu ;
270+ bpf_map__set_type (map , map_type );
271+
272+ err = percpu_alloc_array__load (skel );
273+ if (!ASSERT_OK (err , "test_percpu_alloc__load" ))
274+ goto out ;
275+
276+ max_entries = bpf_map__max_entries (map );
277+ keys = calloc (max_entries , key_sz );
278+ if (!ASSERT_OK_PTR (keys , "calloc keys" ))
279+ goto out ;
280+
281+ for (i = 0 ; i < max_entries ; i ++ )
282+ keys [i ] = i ;
283+
284+ test_percpu_map_op_cpu_flag (map , keys , key_sz , max_entries , true);
285+ out :
286+ if (keys )
287+ free (keys );
288+ percpu_alloc_array__destroy (skel );
289+ }
290+
291+ static void test_percpu_array_cpu_flag (void )
292+ {
293+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_PERCPU_ARRAY );
294+ }
295+
296+ static void test_percpu_hash_cpu_flag (void )
297+ {
298+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_PERCPU_HASH );
299+ }
300+
301+ static void test_lru_percpu_hash_cpu_flag (void )
302+ {
303+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_LRU_PERCPU_HASH );
304+ }
305+
306+ static void test_percpu_cgroup_storage_cpu_flag (void )
307+ {
308+ struct bpf_cgroup_storage_key key ;
309+ struct percpu_alloc_array * skel ;
310+ int cgroup = -1 , prog_fd , err ;
311+ struct bpf_map * map ;
312+
313+ skel = percpu_alloc_array__open_and_load ();
314+ if (!ASSERT_OK_PTR (skel , "percpu_alloc_array__open_and_load" ))
315+ return ;
316+
317+ cgroup = create_and_get_cgroup ("/cg_percpu" );
318+ if (!ASSERT_GE (cgroup , 0 , "create_and_get_cgroup" ))
319+ goto out ;
320+
321+ err = join_cgroup ("/cg_percpu" );
322+ if (!ASSERT_OK (err , "join_cgroup" ))
323+ goto out ;
324+
325+ prog_fd = bpf_program__fd (skel -> progs .cgroup_egress );
326+ err = bpf_prog_attach (prog_fd , cgroup , BPF_CGROUP_INET_EGRESS , 0 );
327+ if (!ASSERT_OK (err , "bpf_prog_attach" ))
328+ goto out ;
329+
330+ map = skel -> maps .percpu_cgroup_storage ;
331+ err = bpf_map_get_next_key (bpf_map__fd (map ), NULL , & key );
332+ if (!ASSERT_OK (err , "bpf_map_get_next_key" ))
333+ goto out ;
334+
335+ test_percpu_map_op_cpu_flag (map , & key , sizeof (key ), 1 , false);
336+ out :
337+ bpf_prog_detach2 (-1 , cgroup , BPF_CGROUP_INET_EGRESS );
338+ close (cgroup );
339+ cleanup_cgroup_environment ();
340+ percpu_alloc_array__destroy (skel );
341+ }
342+
118343void test_percpu_alloc (void )
119344{
120345 if (test__start_subtest ("array" ))
@@ -125,4 +350,12 @@ void test_percpu_alloc(void)
125350 test_cgrp_local_storage ();
126351 if (test__start_subtest ("failure_tests" ))
127352 test_failure ();
353+ if (test__start_subtest ("cpu_flag_percpu_array" ))
354+ test_percpu_array_cpu_flag ();
355+ if (test__start_subtest ("cpu_flag_percpu_hash" ))
356+ test_percpu_hash_cpu_flag ();
357+ if (test__start_subtest ("cpu_flag_lru_percpu_hash" ))
358+ test_lru_percpu_hash_cpu_flag ();
359+ if (test__start_subtest ("cpu_flag_percpu_cgroup_storage" ))
360+ test_percpu_cgroup_storage_cpu_flag ();
128361}
0 commit comments