@@ -910,6 +910,12 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
910910 /* cb is allowed */
911911
912912 if (!range_is_zero (__skb , offsetofend (struct __sk_buff , cb ),
913+ offsetof(struct __sk_buff , data_end )))
914+ return - EINVAL ;
915+
916+ /* data_end is allowed, but not copied to skb */
917+
918+ if (!range_is_zero (__skb , offsetofend (struct __sk_buff , data_end ),
913919 offsetof(struct __sk_buff , tstamp )))
914920 return - EINVAL ;
915921
@@ -984,9 +990,12 @@ static struct proto bpf_dummy_proto = {
984990int bpf_prog_test_run_skb (struct bpf_prog * prog , const union bpf_attr * kattr ,
985991 union bpf_attr __user * uattr )
986992{
987- bool is_l2 = false, is_direct_pkt_access = false;
993+ bool is_l2 = false, is_direct_pkt_access = false, is_lwt = false;
994+ u32 tailroom = SKB_DATA_ALIGN (sizeof (struct skb_shared_info ));
988995 struct net * net = current -> nsproxy -> net_ns ;
989996 struct net_device * dev = net -> loopback_dev ;
997+ u32 headroom = NET_SKB_PAD + NET_IP_ALIGN ;
998+ u32 linear_sz = kattr -> test .data_size_in ;
990999 u32 size = kattr -> test .data_size_in ;
9911000 u32 repeat = kattr -> test .repeat ;
9921001 struct __sk_buff * ctx = NULL ;
@@ -1007,11 +1016,14 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
10071016 switch (prog -> type ) {
10081017 case BPF_PROG_TYPE_SCHED_CLS :
10091018 case BPF_PROG_TYPE_SCHED_ACT :
1019+ is_direct_pkt_access = true;
10101020 is_l2 = true;
1011- fallthrough ;
1021+ break ;
10121022 case BPF_PROG_TYPE_LWT_IN :
10131023 case BPF_PROG_TYPE_LWT_OUT :
10141024 case BPF_PROG_TYPE_LWT_XMIT :
1025+ is_lwt = true;
1026+ fallthrough ;
10151027 case BPF_PROG_TYPE_CGROUP_SKB :
10161028 is_direct_pkt_access = true;
10171029 break ;
@@ -1023,9 +1035,24 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
10231035 if (IS_ERR (ctx ))
10241036 return PTR_ERR (ctx );
10251037
1026- data = bpf_test_init (kattr , kattr -> test .data_size_in ,
1027- size , NET_SKB_PAD + NET_IP_ALIGN ,
1028- SKB_DATA_ALIGN (sizeof (struct skb_shared_info )));
1038+ if (ctx ) {
1039+ if (ctx -> data_end > kattr -> test .data_size_in || ctx -> data || ctx -> data_meta ) {
1040+ ret = - EINVAL ;
1041+ goto out ;
1042+ }
1043+ if (ctx -> data_end ) {
1044+ /* Non-linear LWT test_run is unsupported for now. */
1045+ if (is_lwt ) {
1046+ ret = - EINVAL ;
1047+ goto out ;
1048+ }
1049+ linear_sz = max (ETH_HLEN , ctx -> data_end );
1050+ }
1051+ }
1052+
1053+ linear_sz = min_t (u32 , linear_sz , PAGE_SIZE - headroom - tailroom );
1054+
1055+ data = bpf_test_init (kattr , linear_sz , linear_sz , headroom , tailroom );
10291056 if (IS_ERR (data )) {
10301057 ret = PTR_ERR (data );
10311058 data = NULL ;
@@ -1044,12 +1071,49 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
10441071 ret = - ENOMEM ;
10451072 goto out ;
10461073 }
1074+
10471075 skb -> sk = sk ;
10481076
10491077 data = NULL ; /* data released via kfree_skb */
10501078
10511079 skb_reserve (skb , NET_SKB_PAD + NET_IP_ALIGN );
1052- __skb_put (skb , size );
1080+ __skb_put (skb , linear_sz );
1081+
1082+ if (unlikely (kattr -> test .data_size_in > linear_sz )) {
1083+ void __user * data_in = u64_to_user_ptr (kattr -> test .data_in );
1084+ struct skb_shared_info * sinfo = skb_shinfo (skb );
1085+
1086+ size = linear_sz ;
1087+ while (size < kattr -> test .data_size_in ) {
1088+ struct page * page ;
1089+ u32 data_len ;
1090+
1091+ if (sinfo -> nr_frags == MAX_SKB_FRAGS ) {
1092+ ret = - ENOMEM ;
1093+ goto out ;
1094+ }
1095+
1096+ page = alloc_page (GFP_KERNEL );
1097+ if (!page ) {
1098+ ret = - ENOMEM ;
1099+ goto out ;
1100+ }
1101+
1102+ data_len = min_t (u32 , kattr -> test .data_size_in - size ,
1103+ PAGE_SIZE );
1104+ skb_fill_page_desc (skb , sinfo -> nr_frags , page , 0 , data_len );
1105+
1106+ if (copy_from_user (page_address (page ), data_in + size ,
1107+ data_len )) {
1108+ ret = - EFAULT ;
1109+ goto out ;
1110+ }
1111+ skb -> data_len += data_len ;
1112+ skb -> truesize += PAGE_SIZE ;
1113+ skb -> len += data_len ;
1114+ size += data_len ;
1115+ }
1116+ }
10531117
10541118 if (ctx && ctx -> ifindex > 1 ) {
10551119 dev = dev_get_by_index (net , ctx -> ifindex );
@@ -1130,9 +1194,11 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
11301194 convert_skb_to___skb (skb , ctx );
11311195
11321196 size = skb -> len ;
1133- /* bpf program can never convert linear skb to non-linear */
1134- if (WARN_ON_ONCE (skb_is_nonlinear (skb )))
1197+ if (skb_is_nonlinear (skb )) {
1198+ /* bpf program can never convert linear skb to non-linear */
1199+ WARN_ON_ONCE (linear_sz == size );
11351200 size = skb_headlen (skb );
1201+ }
11361202 ret = bpf_test_finish (kattr , uattr , skb -> data , NULL , size , retval ,
11371203 duration );
11381204 if (!ret )
0 commit comments