@@ -681,6 +681,164 @@ int test_dynptr_copy_xdp(struct xdp_md *xdp)
681681 return XDP_DROP ;
682682}
683683
684+ char memset_zero_data [] = "data to be zeroed" ;
685+
686+ SEC ("?tp/syscalls/sys_enter_nanosleep" )
687+ int test_dynptr_memset_zero (void * ctx )
688+ {
689+ __u32 data_sz = sizeof (memset_zero_data );
690+ char zeroes [32 ] = {'\0' };
691+ struct bpf_dynptr ptr ;
692+
693+ err = bpf_dynptr_from_mem (memset_zero_data , data_sz , 0 , & ptr );
694+ err = err ?: bpf_dynptr_memset (& ptr , 0 , data_sz , 0 );
695+ err = err ?: bpf_memcmp (zeroes , memset_zero_data , data_sz );
696+
697+ return 0 ;
698+ }
699+
700+ #define DYNPTR_MEMSET_VAL 42
701+
702+ char memset_notzero_data [] = "data to be overwritten" ;
703+
704+ SEC ("?tp/syscalls/sys_enter_nanosleep" )
705+ int test_dynptr_memset_notzero (void * ctx )
706+ {
707+ u32 data_sz = sizeof (memset_notzero_data );
708+ struct bpf_dynptr ptr ;
709+ char expected [32 ];
710+
711+ __builtin_memset (expected , DYNPTR_MEMSET_VAL , data_sz );
712+
713+ err = bpf_dynptr_from_mem (memset_notzero_data , data_sz , 0 , & ptr );
714+ err = err ?: bpf_dynptr_memset (& ptr , 0 , data_sz , DYNPTR_MEMSET_VAL );
715+ err = err ?: bpf_memcmp (expected , memset_notzero_data , data_sz );
716+
717+ return 0 ;
718+ }
719+
720+ char memset_zero_offset_data [] = "data to be zeroed partially" ;
721+
722+ SEC ("?tp/syscalls/sys_enter_nanosleep" )
723+ int test_dynptr_memset_zero_offset (void * ctx )
724+ {
725+ char expected [] = "data to \0\0\0\0eroed partially" ;
726+ __u32 data_sz = sizeof (memset_zero_offset_data );
727+ struct bpf_dynptr ptr ;
728+
729+ err = bpf_dynptr_from_mem (memset_zero_offset_data , data_sz , 0 , & ptr );
730+ err = err ?: bpf_dynptr_memset (& ptr , 8 , 4 , 0 );
731+ err = err ?: bpf_memcmp (expected , memset_zero_offset_data , data_sz );
732+
733+ return 0 ;
734+ }
735+
736+ char memset_zero_adjusted_data [] = "data to be zeroed partially" ;
737+
738+ SEC ("?tp/syscalls/sys_enter_nanosleep" )
739+ int test_dynptr_memset_zero_adjusted (void * ctx )
740+ {
741+ char expected [] = "data\0\0\0\0be zeroed partially" ;
742+ __u32 data_sz = sizeof (memset_zero_adjusted_data );
743+ struct bpf_dynptr ptr ;
744+
745+ err = bpf_dynptr_from_mem (memset_zero_adjusted_data , data_sz , 0 , & ptr );
746+ err = err ?: bpf_dynptr_adjust (& ptr , 4 , 8 );
747+ err = err ?: bpf_dynptr_memset (& ptr , 0 , bpf_dynptr_size (& ptr ), 0 );
748+ err = err ?: bpf_memcmp (expected , memset_zero_adjusted_data , data_sz );
749+
750+ return 0 ;
751+ }
752+
753+ char memset_overflow_data [] = "memset overflow data" ;
754+
755+ SEC ("?tp/syscalls/sys_enter_nanosleep" )
756+ int test_dynptr_memset_overflow (void * ctx )
757+ {
758+ __u32 data_sz = sizeof (memset_overflow_data );
759+ struct bpf_dynptr ptr ;
760+ int ret ;
761+
762+ err = bpf_dynptr_from_mem (memset_overflow_data , data_sz , 0 , & ptr );
763+ ret = bpf_dynptr_memset (& ptr , 0 , data_sz + 1 , 0 );
764+ if (ret != - E2BIG )
765+ err = 1 ;
766+
767+ return 0 ;
768+ }
769+
770+ SEC ("?tp/syscalls/sys_enter_nanosleep" )
771+ int test_dynptr_memset_overflow_offset (void * ctx )
772+ {
773+ __u32 data_sz = sizeof (memset_overflow_data );
774+ struct bpf_dynptr ptr ;
775+ int ret ;
776+
777+ err = bpf_dynptr_from_mem (memset_overflow_data , data_sz , 0 , & ptr );
778+ ret = bpf_dynptr_memset (& ptr , 1 , data_sz , 0 );
779+ if (ret != - E2BIG )
780+ err = 1 ;
781+
782+ return 0 ;
783+ }
784+
785+ SEC ("?cgroup_skb/egress" )
786+ int test_dynptr_memset_readonly (struct __sk_buff * skb )
787+ {
788+ struct bpf_dynptr ptr ;
789+ int ret ;
790+
791+ err = bpf_dynptr_from_skb (skb , 0 , & ptr );
792+
793+ /* cgroup skbs are read only, memset should fail */
794+ ret = bpf_dynptr_memset (& ptr , 0 , bpf_dynptr_size (& ptr ), 0 );
795+ if (ret != - EINVAL )
796+ err = 1 ;
797+
798+ return 0 ;
799+ }
800+
801+ #define min_t (type , x , y ) ({ \
802+ type __x = (x); \
803+ type __y = (y); \
804+ __x < __y ? __x : __y; })
805+
806+ SEC ("xdp" )
807+ int test_dynptr_memset_xdp_chunks (struct xdp_md * xdp )
808+ {
809+ u32 data_sz , chunk_sz , offset = 0 ;
810+ const int max_chunks = 200 ;
811+ struct bpf_dynptr ptr_xdp ;
812+ char expected_buf [32 ];
813+ char buf [32 ];
814+ int i ;
815+
816+ __builtin_memset (expected_buf , DYNPTR_MEMSET_VAL , sizeof (expected_buf ));
817+
818+ /* ptr_xdp is backed by non-contiguous memory */
819+ bpf_dynptr_from_xdp (xdp , 0 , & ptr_xdp );
820+ data_sz = bpf_dynptr_size (& ptr_xdp );
821+
822+ err = bpf_dynptr_memset (& ptr_xdp , 0 , data_sz , DYNPTR_MEMSET_VAL );
823+ if (err )
824+ goto out ;
825+
826+ bpf_for (i , 0 , max_chunks ) {
827+ offset = i * sizeof (buf );
828+ if (offset >= data_sz )
829+ goto out ;
830+ chunk_sz = min_t (u32 , sizeof (buf ), data_sz - offset );
831+ err = bpf_dynptr_read (& buf , chunk_sz , & ptr_xdp , offset , 0 );
832+ if (err )
833+ goto out ;
834+ err = bpf_memcmp (buf , expected_buf , sizeof (buf ));
835+ if (err )
836+ goto out ;
837+ }
838+ out :
839+ return XDP_DROP ;
840+ }
841+
684842void * user_ptr ;
685843/* Contains the copy of the data pointed by user_ptr.
686844 * Size 384 to make it not fit into a single kernel chunk when copying
0 commit comments