@@ -103,6 +103,7 @@ struct test {
103
103
104
104
#define VLAN_HLEN 4
105
105
106
+ static __u32 duration ;
106
107
struct test tests [] = {
107
108
{
108
109
.name = "ipv4" ,
@@ -474,11 +475,87 @@ static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array)
474
475
return 0 ;
475
476
}
476
477
478
+ static void run_tests_skb_less (int tap_fd , struct bpf_map * keys )
479
+ {
480
+ int i , err , keys_fd ;
481
+
482
+ keys_fd = bpf_map__fd (keys );
483
+ if (CHECK (keys_fd < 0 , "bpf_map__fd" , "err %d\n" , keys_fd ))
484
+ return ;
485
+
486
+ for (i = 0 ; i < ARRAY_SIZE (tests ); i ++ ) {
487
+ /* Keep in sync with 'flags' from eth_get_headlen. */
488
+ __u32 eth_get_headlen_flags =
489
+ BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG ;
490
+ struct bpf_prog_test_run_attr tattr = {};
491
+ struct bpf_flow_keys flow_keys = {};
492
+ __u32 key = (__u32 )(tests [i ].keys .sport ) << 16 |
493
+ tests [i ].keys .dport ;
494
+
495
+ /* For skb-less case we can't pass input flags; run
496
+ * only the tests that have a matching set of flags.
497
+ */
498
+
499
+ if (tests [i ].flags != eth_get_headlen_flags )
500
+ continue ;
501
+
502
+ err = tx_tap (tap_fd , & tests [i ].pkt , sizeof (tests [i ].pkt ));
503
+ CHECK (err < 0 , "tx_tap" , "err %d errno %d\n" , err , errno );
504
+
505
+ err = bpf_map_lookup_elem (keys_fd , & key , & flow_keys );
506
+ CHECK_ATTR (err , tests [i ].name , "bpf_map_lookup_elem %d\n" , err );
507
+
508
+ CHECK_ATTR (err , tests [i ].name , "skb-less err %d\n" , err );
509
+ CHECK_FLOW_KEYS (tests [i ].name , flow_keys , tests [i ].keys );
510
+
511
+ err = bpf_map_delete_elem (keys_fd , & key );
512
+ CHECK_ATTR (err , tests [i ].name , "bpf_map_delete_elem %d\n" , err );
513
+ }
514
+ }
515
+
516
+ static void test_skb_less_prog_attach (struct bpf_flow * skel , int tap_fd )
517
+ {
518
+ int err , prog_fd ;
519
+
520
+ prog_fd = bpf_program__fd (skel -> progs ._dissect );
521
+ if (CHECK (prog_fd < 0 , "bpf_program__fd" , "err %d\n" , prog_fd ))
522
+ return ;
523
+
524
+ err = bpf_prog_attach (prog_fd , 0 , BPF_FLOW_DISSECTOR , 0 );
525
+ if (CHECK (err , "bpf_prog_attach" , "err %d errno %d\n" , err , errno ))
526
+ return ;
527
+
528
+ run_tests_skb_less (tap_fd , skel -> maps .last_dissection );
529
+
530
+ err = bpf_prog_detach (prog_fd , BPF_FLOW_DISSECTOR );
531
+ CHECK (err , "bpf_prog_detach" , "err %d errno %d\n" , err , errno );
532
+ }
533
+
534
+ static void test_skb_less_link_create (struct bpf_flow * skel , int tap_fd )
535
+ {
536
+ struct bpf_link * link ;
537
+ int err , net_fd ;
538
+
539
+ net_fd = open ("/proc/self/ns/net" , O_RDONLY );
540
+ if (CHECK (net_fd < 0 , "open(/proc/self/ns/net)" , "err %d\n" , errno ))
541
+ return ;
542
+
543
+ link = bpf_program__attach_netns (skel -> progs ._dissect , net_fd );
544
+ if (CHECK (IS_ERR (link ), "attach_netns" , "err %ld\n" , PTR_ERR (link )))
545
+ goto out_close ;
546
+
547
+ run_tests_skb_less (tap_fd , skel -> maps .last_dissection );
548
+
549
+ err = bpf_link__destroy (link );
550
+ CHECK (err , "bpf_link__destroy" , "err %d\n" , err );
551
+ out_close :
552
+ close (net_fd );
553
+ }
554
+
477
555
void test_flow_dissector (void )
478
556
{
479
557
int i , err , prog_fd , keys_fd = -1 , tap_fd ;
480
558
struct bpf_flow * skel ;
481
- __u32 duration = 0 ;
482
559
483
560
skel = bpf_flow__open_and_load ();
484
561
if (CHECK (!skel , "skel" , "failed to open/load skeleton\n" ))
@@ -526,45 +603,17 @@ void test_flow_dissector(void)
526
603
* via BPF map in this case.
527
604
*/
528
605
529
- err = bpf_prog_attach (prog_fd , 0 , BPF_FLOW_DISSECTOR , 0 );
530
- CHECK (err , "bpf_prog_attach" , "err %d errno %d\n" , err , errno );
531
-
532
606
tap_fd = create_tap ("tap0" );
533
607
CHECK (tap_fd < 0 , "create_tap" , "tap_fd %d errno %d\n" , tap_fd , errno );
534
608
err = ifup ("tap0" );
535
609
CHECK (err , "ifup" , "err %d errno %d\n" , err , errno );
536
610
537
- for (i = 0 ; i < ARRAY_SIZE (tests ); i ++ ) {
538
- /* Keep in sync with 'flags' from eth_get_headlen. */
539
- __u32 eth_get_headlen_flags =
540
- BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG ;
541
- struct bpf_prog_test_run_attr tattr = {};
542
- struct bpf_flow_keys flow_keys = {};
543
- __u32 key = (__u32 )(tests [i ].keys .sport ) << 16 |
544
- tests [i ].keys .dport ;
545
-
546
- /* For skb-less case we can't pass input flags; run
547
- * only the tests that have a matching set of flags.
548
- */
549
-
550
- if (tests [i ].flags != eth_get_headlen_flags )
551
- continue ;
552
-
553
- err = tx_tap (tap_fd , & tests [i ].pkt , sizeof (tests [i ].pkt ));
554
- CHECK (err < 0 , "tx_tap" , "err %d errno %d\n" , err , errno );
555
-
556
- err = bpf_map_lookup_elem (keys_fd , & key , & flow_keys );
557
- CHECK_ATTR (err , tests [i ].name , "bpf_map_lookup_elem %d\n" , err );
558
-
559
- CHECK_ATTR (err , tests [i ].name , "skb-less err %d\n" , err );
560
- CHECK_FLOW_KEYS (tests [i ].name , flow_keys , tests [i ].keys );
561
-
562
- err = bpf_map_delete_elem (keys_fd , & key );
563
- CHECK_ATTR (err , tests [i ].name , "bpf_map_delete_elem %d\n" , err );
564
- }
611
+ /* Test direct prog attachment */
612
+ test_skb_less_prog_attach (skel , tap_fd );
613
+ /* Test indirect prog attachment via link */
614
+ test_skb_less_link_create (skel , tap_fd );
565
615
566
616
close (tap_fd );
567
- bpf_prog_detach (prog_fd , BPF_FLOW_DISSECTOR );
568
617
out_destroy_skel :
569
618
bpf_flow__destroy (skel );
570
619
}
0 commit comments