@@ -680,3 +680,233 @@ int test_dynptr_copy_xdp(struct xdp_md *xdp)
680
680
bpf_ringbuf_discard_dynptr (& ptr_buf , 0 );
681
681
return XDP_DROP ;
682
682
}
683
+
684
+ void * user_ptr ;
685
+ /* Contains the copy of the data pointed by user_ptr.
686
+ * Size 384 to make it not fit into a single kernel chunk when copying
687
+ * but less than the maximum bpf stack size (512).
688
+ */
689
+ char expected_str [384 ];
690
+ __u32 test_len [7 ] = {0 /* placeholder */ , 0 , 1 , 2 , 255 , 256 , 257 };
691
+
692
+ typedef int (* bpf_read_dynptr_fn_t )(struct bpf_dynptr * dptr , u32 off ,
693
+ u32 size , const void * unsafe_ptr );
694
+
695
+ /* Returns the offset just before the end of the maximum sized xdp fragment.
696
+ * Any write larger than 32 bytes will be split between 2 fragments.
697
+ */
698
+ __u32 xdp_near_frag_end_offset (void )
699
+ {
700
+ const __u32 headroom = 256 ;
701
+ const __u32 max_frag_size = __PAGE_SIZE - headroom - sizeof (struct skb_shared_info );
702
+
703
+ /* 32 bytes before the approximate end of the fragment */
704
+ return max_frag_size - 32 ;
705
+ }
706
+
707
+ /* Use __always_inline on test_dynptr_probe[_str][_xdp]() and callbacks
708
+ * of type bpf_read_dynptr_fn_t to prevent compiler from generating
709
+ * indirect calls that make program fail to load with "unknown opcode" error.
710
+ */
711
+ static __always_inline void test_dynptr_probe (void * ptr , bpf_read_dynptr_fn_t bpf_read_dynptr_fn )
712
+ {
713
+ char buf [sizeof (expected_str )];
714
+ struct bpf_dynptr ptr_buf ;
715
+ int i ;
716
+
717
+ if (bpf_get_current_pid_tgid () >> 32 != pid )
718
+ return ;
719
+
720
+ err = bpf_ringbuf_reserve_dynptr (& ringbuf , sizeof (buf ), 0 , & ptr_buf );
721
+
722
+ bpf_for (i , 0 , ARRAY_SIZE (test_len )) {
723
+ __u32 len = test_len [i ];
724
+
725
+ err = err ?: bpf_read_dynptr_fn (& ptr_buf , 0 , test_len [i ], ptr );
726
+ if (len > sizeof (buf ))
727
+ break ;
728
+ err = err ?: bpf_dynptr_read (& buf , len , & ptr_buf , 0 , 0 );
729
+
730
+ if (err || bpf_memcmp (expected_str , buf , len ))
731
+ err = 1 ;
732
+
733
+ /* Reset buffer and dynptr */
734
+ __builtin_memset (buf , 0 , sizeof (buf ));
735
+ err = err ?: bpf_dynptr_write (& ptr_buf , 0 , buf , len , 0 );
736
+ }
737
+ bpf_ringbuf_discard_dynptr (& ptr_buf , 0 );
738
+ }
739
+
740
+ static __always_inline void test_dynptr_probe_str (void * ptr ,
741
+ bpf_read_dynptr_fn_t bpf_read_dynptr_fn )
742
+ {
743
+ char buf [sizeof (expected_str )];
744
+ struct bpf_dynptr ptr_buf ;
745
+ __u32 cnt , i ;
746
+
747
+ if (bpf_get_current_pid_tgid () >> 32 != pid )
748
+ return ;
749
+
750
+ bpf_ringbuf_reserve_dynptr (& ringbuf , sizeof (buf ), 0 , & ptr_buf );
751
+
752
+ bpf_for (i , 0 , ARRAY_SIZE (test_len )) {
753
+ __u32 len = test_len [i ];
754
+
755
+ cnt = bpf_read_dynptr_fn (& ptr_buf , 0 , len , ptr );
756
+ if (cnt != len )
757
+ err = 1 ;
758
+
759
+ if (len > sizeof (buf ))
760
+ continue ;
761
+ err = err ?: bpf_dynptr_read (& buf , len , & ptr_buf , 0 , 0 );
762
+ if (!len )
763
+ continue ;
764
+ if (err || bpf_memcmp (expected_str , buf , len - 1 ) || buf [len - 1 ] != '\0' )
765
+ err = 1 ;
766
+ }
767
+ bpf_ringbuf_discard_dynptr (& ptr_buf , 0 );
768
+ }
769
+
770
+ static __always_inline void test_dynptr_probe_xdp (struct xdp_md * xdp , void * ptr ,
771
+ bpf_read_dynptr_fn_t bpf_read_dynptr_fn )
772
+ {
773
+ struct bpf_dynptr ptr_xdp ;
774
+ char buf [sizeof (expected_str )];
775
+ __u32 off , i ;
776
+
777
+ if (bpf_get_current_pid_tgid () >> 32 != pid )
778
+ return ;
779
+
780
+ off = xdp_near_frag_end_offset ();
781
+ err = bpf_dynptr_from_xdp (xdp , 0 , & ptr_xdp );
782
+
783
+ bpf_for (i , 0 , ARRAY_SIZE (test_len )) {
784
+ __u32 len = test_len [i ];
785
+
786
+ err = err ?: bpf_read_dynptr_fn (& ptr_xdp , off , len , ptr );
787
+ if (len > sizeof (buf ))
788
+ continue ;
789
+ err = err ?: bpf_dynptr_read (& buf , len , & ptr_xdp , off , 0 );
790
+ if (err || bpf_memcmp (expected_str , buf , len ))
791
+ err = 1 ;
792
+ /* Reset buffer and dynptr */
793
+ __builtin_memset (buf , 0 , sizeof (buf ));
794
+ err = err ?: bpf_dynptr_write (& ptr_xdp , off , buf , len , 0 );
795
+ }
796
+ }
797
+
798
+ static __always_inline void test_dynptr_probe_str_xdp (struct xdp_md * xdp , void * ptr ,
799
+ bpf_read_dynptr_fn_t bpf_read_dynptr_fn )
800
+ {
801
+ struct bpf_dynptr ptr_xdp ;
802
+ char buf [sizeof (expected_str )];
803
+ __u32 cnt , off , i ;
804
+
805
+ if (bpf_get_current_pid_tgid () >> 32 != pid )
806
+ return ;
807
+
808
+ off = xdp_near_frag_end_offset ();
809
+ err = bpf_dynptr_from_xdp (xdp , 0 , & ptr_xdp );
810
+ if (err )
811
+ return ;
812
+
813
+ bpf_for (i , 0 , ARRAY_SIZE (test_len )) {
814
+ __u32 len = test_len [i ];
815
+
816
+ cnt = bpf_read_dynptr_fn (& ptr_xdp , off , len , ptr );
817
+ if (cnt != len )
818
+ err = 1 ;
819
+
820
+ if (len > sizeof (buf ))
821
+ continue ;
822
+ err = err ?: bpf_dynptr_read (& buf , len , & ptr_xdp , off , 0 );
823
+
824
+ if (!len )
825
+ continue ;
826
+ if (err || bpf_memcmp (expected_str , buf , len - 1 ) || buf [len - 1 ] != '\0' )
827
+ err = 1 ;
828
+
829
+ __builtin_memset (buf , 0 , sizeof (buf ));
830
+ err = err ?: bpf_dynptr_write (& ptr_xdp , off , buf , len , 0 );
831
+ }
832
+ }
833
+
834
+ SEC ("xdp" )
835
+ int test_probe_read_user_dynptr (struct xdp_md * xdp )
836
+ {
837
+ test_dynptr_probe (user_ptr , bpf_probe_read_user_dynptr );
838
+ if (!err )
839
+ test_dynptr_probe_xdp (xdp , user_ptr , bpf_probe_read_user_dynptr );
840
+ return XDP_PASS ;
841
+ }
842
+
843
+ SEC ("xdp" )
844
+ int test_probe_read_kernel_dynptr (struct xdp_md * xdp )
845
+ {
846
+ test_dynptr_probe (expected_str , bpf_probe_read_kernel_dynptr );
847
+ if (!err )
848
+ test_dynptr_probe_xdp (xdp , expected_str , bpf_probe_read_kernel_dynptr );
849
+ return XDP_PASS ;
850
+ }
851
+
852
+ SEC ("xdp" )
853
+ int test_probe_read_user_str_dynptr (struct xdp_md * xdp )
854
+ {
855
+ test_dynptr_probe_str (user_ptr , bpf_probe_read_user_str_dynptr );
856
+ if (!err )
857
+ test_dynptr_probe_str_xdp (xdp , user_ptr , bpf_probe_read_user_str_dynptr );
858
+ return XDP_PASS ;
859
+ }
860
+
861
+ SEC ("xdp" )
862
+ int test_probe_read_kernel_str_dynptr (struct xdp_md * xdp )
863
+ {
864
+ test_dynptr_probe_str (expected_str , bpf_probe_read_kernel_str_dynptr );
865
+ if (!err )
866
+ test_dynptr_probe_str_xdp (xdp , expected_str , bpf_probe_read_kernel_str_dynptr );
867
+ return XDP_PASS ;
868
+ }
869
+
870
+ SEC ("fentry.s/" SYS_PREFIX "sys_nanosleep" )
871
+ int test_copy_from_user_dynptr (void * ctx )
872
+ {
873
+ test_dynptr_probe (user_ptr , bpf_copy_from_user_dynptr );
874
+ return 0 ;
875
+ }
876
+
877
+ SEC ("fentry.s/" SYS_PREFIX "sys_nanosleep" )
878
+ int test_copy_from_user_str_dynptr (void * ctx )
879
+ {
880
+ test_dynptr_probe_str (user_ptr , bpf_copy_from_user_str_dynptr );
881
+ return 0 ;
882
+ }
883
+
884
+ static int bpf_copy_data_from_user_task (struct bpf_dynptr * dptr , u32 off ,
885
+ u32 size , const void * unsafe_ptr )
886
+ {
887
+ struct task_struct * task = bpf_get_current_task_btf ();
888
+
889
+ return bpf_copy_from_user_task_dynptr (dptr , off , size , unsafe_ptr , task );
890
+ }
891
+
892
+ static int bpf_copy_data_from_user_task_str (struct bpf_dynptr * dptr , u32 off ,
893
+ u32 size , const void * unsafe_ptr )
894
+ {
895
+ struct task_struct * task = bpf_get_current_task_btf ();
896
+
897
+ return bpf_copy_from_user_task_str_dynptr (dptr , off , size , unsafe_ptr , task );
898
+ }
899
+
900
+ SEC ("fentry.s/" SYS_PREFIX "sys_nanosleep" )
901
+ int test_copy_from_user_task_dynptr (void * ctx )
902
+ {
903
+ test_dynptr_probe (user_ptr , bpf_copy_data_from_user_task );
904
+ return 0 ;
905
+ }
906
+
907
+ SEC ("fentry.s/" SYS_PREFIX "sys_nanosleep" )
908
+ int test_copy_from_user_task_str_dynptr (void * ctx )
909
+ {
910
+ test_dynptr_probe_str (user_ptr , bpf_copy_data_from_user_task_str );
911
+ return 0 ;
912
+ }
0 commit comments