11// SPDX-License-Identifier: GPL-2.0
22#include <test_progs.h>
3+ #include "cgroup_helpers.h"
34#include "percpu_alloc_array.skel.h"
45#include "percpu_alloc_cgrp_local_storage.skel.h"
56#include "percpu_alloc_fail.skel.h"
@@ -115,6 +116,221 @@ static void test_failure(void) {
115116 RUN_TESTS (percpu_alloc_fail );
116117}
117118
119+ static void test_percpu_map_op_cpu_flag (struct bpf_map * map , void * keys , size_t key_sz ,
120+ u32 max_entries , bool test_batch )
121+ {
122+ int i , j , cpu , map_fd , value_size , nr_cpus , err ;
123+ u64 * values = NULL , batch = 0 , flags ;
124+ const u64 value = 0xDEADC0DE ;
125+ size_t value_sz = sizeof (u64 );
126+ u32 count ;
127+ LIBBPF_OPTS (bpf_map_batch_opts , batch_opts );
128+
129+ nr_cpus = libbpf_num_possible_cpus ();
130+ if (!ASSERT_GT (nr_cpus , 0 , "libbpf_num_possible_cpus" ))
131+ return ;
132+
133+ value_size = value_sz * nr_cpus ;
134+ values = calloc (max_entries , value_size );
135+ if (!ASSERT_OK_PTR (values , "calloc values" ))
136+ goto out ;
137+ memset (values , 0 , value_size * max_entries );
138+
139+ map_fd = bpf_map__fd (map );
140+ flags = (u64 )nr_cpus << 32 | BPF_F_CPU ;
141+ err = bpf_map_update_elem (map_fd , keys , values , flags );
142+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_update_elem_opts -ERANGE" ))
143+ goto out ;
144+
145+ err = bpf_map__update_elem (map , keys , key_sz , values , value_sz , flags );
146+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map__update_elem_opts -ERANGE" ))
147+ goto out ;
148+
149+ err = bpf_map_lookup_elem_flags (map_fd , keys , values , flags );
150+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_lookup_elem_opts -ERANGE" ))
151+ goto out ;
152+
153+ err = bpf_map__lookup_elem (map , keys , key_sz , values , value_sz , flags );
154+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map__lookup_elem_opts -ERANGE" ))
155+ goto out ;
156+
157+ for (cpu = 0 ; cpu < nr_cpus ; cpu ++ ) {
158+ /* clear value on all cpus */
159+ values [0 ] = 0 ;
160+ flags = BPF_F_ALL_CPUS ;
161+ for (i = 0 ; i < max_entries ; i ++ ) {
162+ err = bpf_map__update_elem (map , keys + i * key_sz , key_sz , values ,
163+ value_sz , flags );
164+ if (!ASSERT_OK (err , "bpf_map__update_elem" ))
165+ goto out ;
166+ }
167+
168+ /* update value on specified cpu */
169+ for (i = 0 ; i < max_entries ; i ++ ) {
170+ values [0 ] = value ;
171+ flags = (u64 )cpu << 32 | BPF_F_CPU ;
172+ err = bpf_map__update_elem (map , keys + i * key_sz , key_sz , values ,
173+ value_sz , flags );
174+ if (!ASSERT_OK (err , "bpf_map__update_elem specified cpu" ))
175+ goto out ;
176+
177+ /* lookup then check value on CPUs */
178+ for (j = 0 ; j < nr_cpus ; j ++ ) {
179+ flags = (u64 )j << 32 | BPF_F_CPU ;
180+ err = bpf_map__lookup_elem (map , keys + i * key_sz , key_sz , values ,
181+ value_sz , flags );
182+ if (!ASSERT_OK (err , "bpf_map__lookup_elem specified cpu" ))
183+ goto out ;
184+ if (!ASSERT_EQ (values [0 ], j != cpu ? 0 : value ,
185+ "bpf_map__lookup_elem value on specified cpu" ))
186+ goto out ;
187+ }
188+ }
189+ }
190+
191+ if (!test_batch )
192+ goto out ;
193+
194+ batch_opts .elem_flags = (u64 )nr_cpus << 32 | BPF_F_CPU ;
195+ err = bpf_map_update_batch (map_fd , keys , values , & max_entries , & batch_opts );
196+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_update_batch -ERANGE" ))
197+ goto out ;
198+
199+ for (cpu = 0 ; cpu < nr_cpus ; cpu ++ ) {
200+ memset (values , 0 , max_entries * value_size );
201+
202+ /* clear values across all CPUs */
203+ batch_opts .elem_flags = BPF_F_ALL_CPUS ;
204+ err = bpf_map_update_batch (map_fd , keys , values , & max_entries , & batch_opts );
205+ if (!ASSERT_OK (err , "bpf_map_update_batch all cpus" ))
206+ goto out ;
207+
208+ /* update values on specified CPU */
209+ for (i = 0 ; i < max_entries ; i ++ )
210+ values [i ] = value ;
211+
212+ batch_opts .elem_flags = (u64 )cpu << 32 | BPF_F_CPU ;
213+ err = bpf_map_update_batch (map_fd , keys , values , & max_entries , & batch_opts );
214+ if (!ASSERT_OK (err , "bpf_map_update_batch specified cpu" ))
215+ goto out ;
216+
217+ /* lookup values on specified CPU */
218+ memset (values , 0 , max_entries * value_sz );
219+ err = bpf_map_lookup_batch (map_fd , NULL , & batch , keys , values , & count , & batch_opts );
220+ if (!ASSERT_TRUE (!err || err == - ENOENT , "bpf_map_lookup_batch specified cpu" ))
221+ goto out ;
222+
223+ for (i = 0 ; i < max_entries ; i ++ )
224+ if (!ASSERT_EQ (values [i ], value , "value on specified cpu" ))
225+ goto out ;
226+
227+ /* lookup values from all CPUs */
228+ batch_opts .elem_flags = 0 ;
229+ memset (values , 0 , max_entries * value_size );
230+ err = bpf_map_lookup_batch (map_fd , NULL , & batch , keys , values , & count , & batch_opts );
231+ if (!ASSERT_TRUE (!err || err == - ENOENT , "bpf_map_lookup_batch all cpus" ))
232+ goto out ;
233+
234+ for (i = 0 ; i < max_entries ; i ++ ) {
235+ for (j = 0 ; j < nr_cpus ; j ++ ) {
236+ if (!ASSERT_EQ (values [i * nr_cpus + j ], j != cpu ? 0 : value ,
237+ "value on specified cpu" ))
238+ goto out ;
239+ }
240+ }
241+ }
242+
243+ out :
244+ if (values )
245+ free (values );
246+ }
247+
248+ static void test_percpu_map_cpu_flag (enum bpf_map_type map_type )
249+ {
250+ struct percpu_alloc_array * skel ;
251+ size_t key_sz = sizeof (int );
252+ int * keys = NULL , i , err ;
253+ struct bpf_map * map ;
254+ u32 max_entries ;
255+
256+ skel = percpu_alloc_array__open ();
257+ if (!ASSERT_OK_PTR (skel , "percpu_alloc_array__open" ))
258+ return ;
259+
260+ map = skel -> maps .percpu ;
261+ bpf_map__set_type (map , map_type );
262+
263+ err = percpu_alloc_array__load (skel );
264+ if (!ASSERT_OK (err , "test_percpu_alloc__load" ))
265+ goto out ;
266+
267+ max_entries = bpf_map__max_entries (map );
268+ keys = calloc (max_entries , key_sz );
269+ if (!ASSERT_OK_PTR (keys , "calloc keys" ))
270+ goto out ;
271+
272+ for (i = 0 ; i < max_entries ; i ++ )
273+ keys [i ] = i ;
274+
275+ test_percpu_map_op_cpu_flag (map , keys , key_sz , max_entries , true);
276+ out :
277+ if (keys )
278+ free (keys );
279+ percpu_alloc_array__destroy (skel );
280+ }
281+
282+ static void test_percpu_array_cpu_flag (void )
283+ {
284+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_PERCPU_ARRAY );
285+ }
286+
287+ static void test_percpu_hash_cpu_flag (void )
288+ {
289+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_PERCPU_HASH );
290+ }
291+
292+ static void test_lru_percpu_hash_cpu_flag (void )
293+ {
294+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_LRU_PERCPU_HASH );
295+ }
296+
297+ static void test_percpu_cgroup_storage_cpu_flag (void )
298+ {
299+ struct bpf_cgroup_storage_key key ;
300+ struct percpu_alloc_array * skel ;
301+ int cgroup = -1 , prog_fd , err ;
302+ struct bpf_map * map ;
303+
304+ skel = percpu_alloc_array__open_and_load ();
305+ if (!ASSERT_OK_PTR (skel , "percpu_alloc_array__open_and_load" ))
306+ return ;
307+
308+ cgroup = create_and_get_cgroup ("/cg_percpu" );
309+ if (!ASSERT_GE (cgroup , 0 , "create_and_get_cgroup" ))
310+ goto out ;
311+
312+ err = join_cgroup ("/cg_percpu" );
313+ if (!ASSERT_OK (err , "join_cgroup" ))
314+ goto out ;
315+
316+ prog_fd = bpf_program__fd (skel -> progs .cgroup_egress );
317+ err = bpf_prog_attach (prog_fd , cgroup , BPF_CGROUP_INET_EGRESS , 0 );
318+ if (!ASSERT_OK (err , "bpf_prog_attach" ))
319+ goto out ;
320+
321+ map = skel -> maps .percpu_cgroup_storage ;
322+ err = bpf_map_get_next_key (bpf_map__fd (map ), NULL , & key );
323+ if (!ASSERT_OK (err , "bpf_map_get_next_key" ))
324+ goto out ;
325+
326+ test_percpu_map_op_cpu_flag (map , & key , sizeof (key ), 1 , false);
327+ out :
328+ bpf_prog_detach2 (-1 , cgroup , BPF_CGROUP_INET_EGRESS );
329+ close (cgroup );
330+ cleanup_cgroup_environment ();
331+ percpu_alloc_array__destroy (skel );
332+ }
333+
118334void test_percpu_alloc (void )
119335{
120336 if (test__start_subtest ("array" ))
@@ -125,4 +341,12 @@ void test_percpu_alloc(void)
125341 test_cgrp_local_storage ();
126342 if (test__start_subtest ("failure_tests" ))
127343 test_failure ();
344+ if (test__start_subtest ("cpu_flag_percpu_array" ))
345+ test_percpu_array_cpu_flag ();
346+ if (test__start_subtest ("cpu_flag_percpu_hash" ))
347+ test_percpu_hash_cpu_flag ();
348+ if (test__start_subtest ("cpu_flag_lru_percpu_hash" ))
349+ test_lru_percpu_hash_cpu_flag ();
350+ if (test__start_subtest ("cpu_flag_percpu_cgroup_storage" ))
351+ test_percpu_cgroup_storage_cpu_flag ();
128352}
0 commit comments