11// SPDX-License-Identifier: GPL-2.0
22#include <test_progs.h>
3+ #include "cgroup_helpers.h"
34#include "percpu_alloc_array.skel.h"
45#include "percpu_alloc_cgrp_local_storage.skel.h"
56#include "percpu_alloc_fail.skel.h"
@@ -115,6 +116,305 @@ static void test_failure(void) {
115116 RUN_TESTS (percpu_alloc_fail );
116117}
117118
119+ static void test_percpu_map_op_cpu_flag (struct bpf_map * map , void * keys , size_t key_sz ,
120+ u32 max_entries , bool test_batch )
121+ {
122+ size_t value_sz = sizeof (u32 ), value_sz_cpus , value_sz_total ;
123+ u32 * values = NULL , * values_percpu = NULL ;
124+ int i , j , cpu , map_fd , nr_cpus , err ;
125+ const u32 value = 0xDEADC0DE ;
126+ u32 count = max_entries , v ;
127+ u64 batch = 0 , flags ;
128+ void * values_row ;
129+ LIBBPF_OPTS (bpf_map_batch_opts , batch_opts );
130+
131+ nr_cpus = libbpf_num_possible_cpus ();
132+ if (!ASSERT_GT (nr_cpus , 0 , "libbpf_num_possible_cpus" ))
133+ return ;
134+
135+ value_sz_cpus = value_sz * nr_cpus ;
136+ values = calloc (max_entries , value_sz_cpus );
137+ if (!ASSERT_OK_PTR (values , "calloc values" ))
138+ return ;
139+
140+ values_percpu = calloc (max_entries , roundup (value_sz , 8 ) * nr_cpus );
141+ if (!ASSERT_OK_PTR (values_percpu , "calloc values_percpu" )) {
142+ free (values );
143+ return ;
144+ }
145+
146+ value_sz_total = value_sz_cpus * max_entries ;
147+ memset (values , 0 , value_sz_total );
148+
149+ map_fd = bpf_map__fd (map );
150+ flags = BPF_F_CPU | BPF_F_ALL_CPUS ;
151+ err = bpf_map_lookup_elem_flags (map_fd , keys , values , flags );
152+ if (!ASSERT_ERR (err , "bpf_map_lookup_elem_flags cpu|all_cpus" ))
153+ goto out ;
154+
155+ err = bpf_map_update_elem (map_fd , keys , values , flags );
156+ if (!ASSERT_ERR (err , "bpf_map_update_elem cpu|all_cpus" ))
157+ goto out ;
158+
159+ flags = BPF_F_ALL_CPUS ;
160+ err = bpf_map_lookup_elem_flags (map_fd , keys , values , flags );
161+ if (!ASSERT_ERR (err , "bpf_map_lookup_elem_flags all_cpus" ))
162+ goto out ;
163+
164+ flags = BPF_F_LOCK | BPF_F_CPU ;
165+ err = bpf_map_lookup_elem_flags (map_fd , keys , values , flags );
166+ if (!ASSERT_ERR (err , "bpf_map_lookup_elem_flags BPF_F_LOCK" ))
167+ goto out ;
168+
169+ flags = BPF_F_LOCK | BPF_F_ALL_CPUS ;
170+ err = bpf_map_update_elem (map_fd , keys , values , flags );
171+ if (!ASSERT_ERR (err , "bpf_map_update_elem BPF_F_LOCK" ))
172+ goto out ;
173+
174+ flags = (u64 )nr_cpus << 32 | BPF_F_CPU ;
175+ err = bpf_map_update_elem (map_fd , keys , values , flags );
176+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_update_elem -ERANGE" ))
177+ goto out ;
178+
179+ err = bpf_map__update_elem (map , keys , key_sz , values , value_sz , flags );
180+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map__update_elem -ERANGE" ))
181+ goto out ;
182+
183+ err = bpf_map_lookup_elem_flags (map_fd , keys , values , flags );
184+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_lookup_elem_flags -ERANGE" ))
185+ goto out ;
186+
187+ err = bpf_map__lookup_elem (map , keys , key_sz , values , value_sz , flags );
188+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map__lookup_elem -ERANGE" ))
189+ goto out ;
190+
191+ for (cpu = 0 ; cpu < nr_cpus ; cpu ++ ) {
192+ /* clear value on all cpus */
193+ values [0 ] = 0 ;
194+ flags = BPF_F_ALL_CPUS ;
195+ for (i = 0 ; i < max_entries ; i ++ ) {
196+ err = bpf_map__update_elem (map , keys + i * key_sz , key_sz , values ,
197+ value_sz , flags );
198+ if (!ASSERT_OK (err , "bpf_map__update_elem all_cpus" ))
199+ goto out ;
200+ }
201+
202+ /* update value on specified cpu */
203+ for (i = 0 ; i < max_entries ; i ++ ) {
204+ values [0 ] = value ;
205+ flags = (u64 )cpu << 32 | BPF_F_CPU ;
206+ err = bpf_map__update_elem (map , keys + i * key_sz , key_sz , values ,
207+ value_sz , flags );
208+ if (!ASSERT_OK (err , "bpf_map__update_elem specified cpu" ))
209+ goto out ;
210+
211+ /* lookup then check value on CPUs */
212+ for (j = 0 ; j < nr_cpus ; j ++ ) {
213+ flags = (u64 )j << 32 | BPF_F_CPU ;
214+ err = bpf_map__lookup_elem (map , keys + i * key_sz , key_sz , values ,
215+ value_sz , flags );
216+ if (!ASSERT_OK (err , "bpf_map__lookup_elem specified cpu" ))
217+ goto out ;
218+ if (!ASSERT_EQ (values [0 ], j != cpu ? 0 : value ,
219+ "bpf_map__lookup_elem value on specified cpu" ))
220+ goto out ;
221+ }
222+ }
223+ }
224+
225+ if (!test_batch )
226+ goto out ;
227+
228+ batch_opts .elem_flags = (u64 )nr_cpus << 32 | BPF_F_CPU ;
229+ err = bpf_map_update_batch (map_fd , keys , values , & max_entries , & batch_opts );
230+ if (!ASSERT_EQ (err , - ERANGE , "bpf_map_update_batch -ERANGE" ))
231+ goto out ;
232+
233+ for (cpu = 0 ; cpu < nr_cpus ; cpu ++ ) {
234+ memset (values , 0 , value_sz_total );
235+
236+ /* clear values across all CPUs */
237+ batch_opts .elem_flags = BPF_F_ALL_CPUS ;
238+ err = bpf_map_update_batch (map_fd , keys , values , & max_entries , & batch_opts );
239+ if (!ASSERT_OK (err , "bpf_map_update_batch all_cpus" ))
240+ goto out ;
241+
242+ /* update values on specified CPU */
243+ for (i = 0 ; i < max_entries ; i ++ )
244+ values [i ] = value ;
245+
246+ batch_opts .elem_flags = (u64 )cpu << 32 | BPF_F_CPU ;
247+ err = bpf_map_update_batch (map_fd , keys , values , & max_entries , & batch_opts );
248+ if (!ASSERT_OK (err , "bpf_map_update_batch specified cpu" ))
249+ goto out ;
250+
251+ /* lookup values on specified CPU */
252+ memset (values , 0 , max_entries * value_sz );
253+ err = bpf_map_lookup_batch (map_fd , NULL , & batch , keys , values , & count , & batch_opts );
254+ if (!ASSERT_TRUE (!err || err == - ENOENT , "bpf_map_lookup_batch specified cpu" ))
255+ goto out ;
256+
257+ for (i = 0 ; i < max_entries ; i ++ )
258+ if (!ASSERT_EQ (values [i ], value ,
259+ "bpf_map_lookup_batch value on specified cpu" ))
260+ goto out ;
261+
262+ /* lookup values from all CPUs */
263+ batch = 0 ;
264+ count = max_entries ;
265+ batch_opts .elem_flags = 0 ;
266+ memset (values_percpu , 0 , roundup (value_sz , 8 ) * nr_cpus * max_entries );
267+ err = bpf_map_lookup_batch (map_fd , NULL , & batch , keys , values_percpu , & count ,
268+ & batch_opts );
269+ if (!ASSERT_TRUE (!err || err == - ENOENT , "bpf_map_lookup_batch all_cpus" ))
270+ goto out ;
271+
272+ for (i = 0 ; i < max_entries ; i ++ ) {
273+ values_row = (void * ) values_percpu +
274+ roundup (value_sz , 8 ) * i * nr_cpus ;
275+ for (j = 0 ; j < nr_cpus ; j ++ ) {
276+ v = * (u32 * ) (values_row + roundup (value_sz , 8 ) * j );
277+ if (!ASSERT_EQ (v , j != cpu ? 0 : value ,
278+ "bpf_map_lookup_batch value all_cpus" ))
279+ goto out ;
280+ }
281+ }
282+ }
283+
284+ out :
285+ free (values_percpu );
286+ free (values );
287+ }
288+
289+ static void test_percpu_map_cpu_flag (enum bpf_map_type map_type )
290+ {
291+ struct percpu_alloc_array * skel ;
292+ size_t key_sz = sizeof (int );
293+ int * keys = NULL , i , err ;
294+ struct bpf_map * map ;
295+ u32 max_entries ;
296+
297+ skel = percpu_alloc_array__open ();
298+ if (!ASSERT_OK_PTR (skel , "percpu_alloc_array__open" ))
299+ return ;
300+
301+ map = skel -> maps .percpu ;
302+ bpf_map__set_type (map , map_type );
303+
304+ err = percpu_alloc_array__load (skel );
305+ if (!ASSERT_OK (err , "test_percpu_alloc__load" ))
306+ goto out ;
307+
308+ max_entries = bpf_map__max_entries (map );
309+ keys = calloc (max_entries , key_sz );
310+ if (!ASSERT_OK_PTR (keys , "calloc keys" ))
311+ goto out ;
312+
313+ for (i = 0 ; i < max_entries ; i ++ )
314+ keys [i ] = i ;
315+
316+ test_percpu_map_op_cpu_flag (map , keys , key_sz , max_entries , true);
317+ out :
318+ if (keys )
319+ free (keys );
320+ percpu_alloc_array__destroy (skel );
321+ }
322+
323+ static void test_percpu_array_cpu_flag (void )
324+ {
325+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_PERCPU_ARRAY );
326+ }
327+
328+ static void test_percpu_hash_cpu_flag (void )
329+ {
330+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_PERCPU_HASH );
331+ }
332+
333+ static void test_lru_percpu_hash_cpu_flag (void )
334+ {
335+ test_percpu_map_cpu_flag (BPF_MAP_TYPE_LRU_PERCPU_HASH );
336+ }
337+
338+ static void test_percpu_cgroup_storage_cpu_flag (void )
339+ {
340+ struct percpu_alloc_array * skel = NULL ;
341+ struct bpf_cgroup_storage_key key ;
342+ int cgroup , prog_fd , err ;
343+ struct bpf_map * map ;
344+
345+ cgroup = create_and_get_cgroup ("/cg_percpu" );
346+ if (!ASSERT_GE (cgroup , 0 , "create_and_get_cgroup" )) {
347+ cleanup_cgroup_environment ();
348+ return ;
349+ }
350+
351+ err = join_cgroup ("/cg_percpu" );
352+ if (!ASSERT_OK (err , "join_cgroup" ))
353+ goto out ;
354+
355+ skel = percpu_alloc_array__open_and_load ();
356+ if (!ASSERT_OK_PTR (skel , "percpu_alloc_array__open_and_load" ))
357+ goto out ;
358+
359+ prog_fd = bpf_program__fd (skel -> progs .cgroup_egress );
360+ err = bpf_prog_attach (prog_fd , cgroup , BPF_CGROUP_INET_EGRESS , 0 );
361+ if (!ASSERT_OK (err , "bpf_prog_attach" ))
362+ goto out ;
363+
364+ map = skel -> maps .percpu_cgroup_storage ;
365+ err = bpf_map_get_next_key (bpf_map__fd (map ), NULL , & key );
366+ if (!ASSERT_OK (err , "bpf_map_get_next_key" ))
367+ goto out ;
368+
369+ test_percpu_map_op_cpu_flag (map , & key , sizeof (key ), 1 , false);
370+ out :
371+ bpf_prog_detach2 (-1 , cgroup , BPF_CGROUP_INET_EGRESS );
372+ close (cgroup );
373+ cleanup_cgroup_environment ();
374+ percpu_alloc_array__destroy (skel );
375+ }
376+
377+ static void test_map_op_cpu_flag (enum bpf_map_type map_type )
378+ {
379+ u32 max_entries = 1 , count = max_entries ;
380+ u64 flags , batch = 0 , val = 0 ;
381+ int err , map_fd , key = 0 ;
382+ LIBBPF_OPTS (bpf_map_batch_opts , batch_opts );
383+
384+ map_fd = bpf_map_create (map_type , "test_cpu_flag" , sizeof (int ), sizeof (u64 ), max_entries ,
385+ NULL );
386+ if (!ASSERT_GE (map_fd , 0 , "bpf_map_create" ))
387+ return ;
388+
389+ flags = BPF_F_ALL_CPUS ;
390+ err = bpf_map_update_elem (map_fd , & key , & val , flags );
391+ ASSERT_ERR (err , "bpf_map_update_elem all_cpus" );
392+
393+ batch_opts .elem_flags = BPF_F_ALL_CPUS ;
394+ err = bpf_map_update_batch (map_fd , & key , & val , & count , & batch_opts );
395+ ASSERT_ERR (err , "bpf_map_update_batch all_cpus" );
396+
397+ flags = BPF_F_CPU ;
398+ err = bpf_map_lookup_elem_flags (map_fd , & key , & val , flags );
399+ ASSERT_ERR (err , "bpf_map_lookup_elem_flags cpu" );
400+
401+ batch_opts .elem_flags = BPF_F_CPU ;
402+ err = bpf_map_lookup_batch (map_fd , NULL , & batch , & key , & val , & count , & batch_opts );
403+ ASSERT_ERR (err , "bpf_map_lookup_batch cpu" );
404+
405+ close (map_fd );
406+ }
407+
408+ static void test_array_cpu_flag (void )
409+ {
410+ test_map_op_cpu_flag (BPF_MAP_TYPE_ARRAY );
411+ }
412+
413+ static void test_hash_cpu_flag (void )
414+ {
415+ test_map_op_cpu_flag (BPF_MAP_TYPE_HASH );
416+ }
417+
118418void test_percpu_alloc (void )
119419{
120420 if (test__start_subtest ("array" ))
@@ -125,4 +425,16 @@ void test_percpu_alloc(void)
125425 test_cgrp_local_storage ();
126426 if (test__start_subtest ("failure_tests" ))
127427 test_failure ();
428+ if (test__start_subtest ("cpu_flag_percpu_array" ))
429+ test_percpu_array_cpu_flag ();
430+ if (test__start_subtest ("cpu_flag_percpu_hash" ))
431+ test_percpu_hash_cpu_flag ();
432+ if (test__start_subtest ("cpu_flag_lru_percpu_hash" ))
433+ test_lru_percpu_hash_cpu_flag ();
434+ if (test__start_subtest ("cpu_flag_percpu_cgroup_storage" ))
435+ test_percpu_cgroup_storage_cpu_flag ();
436+ if (test__start_subtest ("cpu_flag_array" ))
437+ test_array_cpu_flag ();
438+ if (test__start_subtest ("cpu_flag_hash" ))
439+ test_hash_cpu_flag ();
128440}
0 commit comments