@@ -447,7 +447,7 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
447447
448448static int bpf_test_finish (const union bpf_attr * kattr ,
449449 union bpf_attr __user * uattr , const void * data ,
450- struct skb_shared_info * sinfo , u32 size ,
450+ struct skb_shared_info * sinfo , u32 size , u32 frag_size ,
451451 u32 retval , u32 duration )
452452{
453453 void __user * data_out = u64_to_user_ptr (kattr -> test .data_out );
@@ -464,7 +464,7 @@ static int bpf_test_finish(const union bpf_attr *kattr,
464464 }
465465
466466 if (data_out ) {
467- int len = sinfo ? copy_size - sinfo -> xdp_frags_size : copy_size ;
467+ int len = sinfo ? copy_size - frag_size : copy_size ;
468468
469469 if (len < 0 ) {
470470 err = - ENOSPC ;
@@ -910,6 +910,12 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
910910 /* cb is allowed */
911911
912912 if (!range_is_zero (__skb , offsetofend (struct __sk_buff , cb ),
913+ offsetof(struct __sk_buff , data_end )))
914+ return - EINVAL ;
915+
916+ /* data_end is allowed, but not copied to skb */
917+
918+ if (!range_is_zero (__skb , offsetofend (struct __sk_buff , data_end ),
913919 offsetof(struct __sk_buff , tstamp )))
914920 return - EINVAL ;
915921
@@ -984,34 +990,39 @@ static struct proto bpf_dummy_proto = {
984990int bpf_prog_test_run_skb (struct bpf_prog * prog , const union bpf_attr * kattr ,
985991 union bpf_attr __user * uattr )
986992{
987- bool is_l2 = false, is_direct_pkt_access = false;
993+ bool is_l2 = false, is_direct_pkt_access = false, is_lwt = false;
994+ u32 tailroom = SKB_DATA_ALIGN (sizeof (struct skb_shared_info ));
988995 struct net * net = current -> nsproxy -> net_ns ;
989996 struct net_device * dev = net -> loopback_dev ;
990- u32 size = kattr -> test .data_size_in ;
997+ u32 headroom = NET_SKB_PAD + NET_IP_ALIGN ;
998+ u32 linear_sz = kattr -> test .data_size_in ;
991999 u32 repeat = kattr -> test .repeat ;
9921000 struct __sk_buff * ctx = NULL ;
9931001 struct sk_buff * skb = NULL ;
9941002 struct sock * sk = NULL ;
9951003 u32 retval , duration ;
9961004 int hh_len = ETH_HLEN ;
997- void * data ;
1005+ void * data = NULL ;
9981006 int ret ;
9991007
10001008 if ((kattr -> test .flags & ~BPF_F_TEST_SKB_CHECKSUM_COMPLETE ) ||
10011009 kattr -> test .cpu || kattr -> test .batch_size )
10021010 return - EINVAL ;
10031011
1004- if (size < ETH_HLEN )
1012+ if (kattr -> test . data_size_in < ETH_HLEN )
10051013 return - EINVAL ;
10061014
10071015 switch (prog -> type ) {
10081016 case BPF_PROG_TYPE_SCHED_CLS :
10091017 case BPF_PROG_TYPE_SCHED_ACT :
1018+ is_direct_pkt_access = true;
10101019 is_l2 = true;
1011- fallthrough ;
1020+ break ;
10121021 case BPF_PROG_TYPE_LWT_IN :
10131022 case BPF_PROG_TYPE_LWT_OUT :
10141023 case BPF_PROG_TYPE_LWT_XMIT :
1024+ is_lwt = true;
1025+ fallthrough ;
10151026 case BPF_PROG_TYPE_CGROUP_SKB :
10161027 is_direct_pkt_access = true;
10171028 break ;
@@ -1023,9 +1034,24 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
10231034 if (IS_ERR (ctx ))
10241035 return PTR_ERR (ctx );
10251036
1026- data = bpf_test_init (kattr , kattr -> test .data_size_in ,
1027- size , NET_SKB_PAD + NET_IP_ALIGN ,
1028- SKB_DATA_ALIGN (sizeof (struct skb_shared_info )));
1037+ if (ctx ) {
1038+ if (ctx -> data_end > kattr -> test .data_size_in || ctx -> data || ctx -> data_meta ) {
1039+ ret = - EINVAL ;
1040+ goto out ;
1041+ }
1042+ if (ctx -> data_end ) {
1043+ /* Non-linear LWT test_run is unsupported for now. */
1044+ if (is_lwt ) {
1045+ ret = - EINVAL ;
1046+ goto out ;
1047+ }
1048+ linear_sz = max (ETH_HLEN , ctx -> data_end );
1049+ }
1050+ }
1051+
1052+ linear_sz = min_t (u32 , linear_sz , PAGE_SIZE - headroom - tailroom );
1053+
1054+ data = bpf_test_init (kattr , linear_sz , linear_sz , headroom , tailroom );
10291055 if (IS_ERR (data )) {
10301056 ret = PTR_ERR (data );
10311057 data = NULL ;
@@ -1049,7 +1075,43 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
10491075 data = NULL ; /* data released via kfree_skb */
10501076
10511077 skb_reserve (skb , NET_SKB_PAD + NET_IP_ALIGN );
1052- __skb_put (skb , size );
1078+ __skb_put (skb , linear_sz );
1079+
1080+ if (unlikely (kattr -> test .data_size_in > linear_sz )) {
1081+ void __user * data_in = u64_to_user_ptr (kattr -> test .data_in );
1082+ struct skb_shared_info * sinfo = skb_shinfo (skb );
1083+ u32 copied = linear_sz ;
1084+
1085+ while (copied < kattr -> test .data_size_in ) {
1086+ struct page * page ;
1087+ u32 data_len ;
1088+
1089+ if (sinfo -> nr_frags == MAX_SKB_FRAGS ) {
1090+ ret = - ENOMEM ;
1091+ goto out ;
1092+ }
1093+
1094+ page = alloc_page (GFP_KERNEL );
1095+ if (!page ) {
1096+ ret = - ENOMEM ;
1097+ goto out ;
1098+ }
1099+
1100+ data_len = min_t (u32 , kattr -> test .data_size_in - copied ,
1101+ PAGE_SIZE );
1102+ skb_fill_page_desc (skb , sinfo -> nr_frags , page , 0 , data_len );
1103+
1104+ if (copy_from_user (page_address (page ), data_in + copied ,
1105+ data_len )) {
1106+ ret = - EFAULT ;
1107+ goto out ;
1108+ }
1109+ skb -> data_len += data_len ;
1110+ skb -> truesize += PAGE_SIZE ;
1111+ skb -> len += data_len ;
1112+ copied += data_len ;
1113+ }
1114+ }
10531115
10541116 if (ctx && ctx -> ifindex > 1 ) {
10551117 dev = dev_get_by_index (net , ctx -> ifindex );
@@ -1129,12 +1191,11 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
11291191
11301192 convert_skb_to___skb (skb , ctx );
11311193
1132- size = skb -> len ;
1133- /* bpf program can never convert linear skb to non-linear */
1134- if (WARN_ON_ONCE (skb_is_nonlinear (skb )))
1135- size = skb_headlen (skb );
1136- ret = bpf_test_finish (kattr , uattr , skb -> data , NULL , size , retval ,
1137- duration );
1194+ if (skb_is_nonlinear (skb ))
1195+ /* bpf program can never convert linear skb to non-linear */
1196+ WARN_ON_ONCE (linear_sz == kattr -> test .data_size_in );
1197+ ret = bpf_test_finish (kattr , uattr , skb -> data , skb_shinfo (skb ), skb -> len ,
1198+ skb -> data_len , retval , duration );
11381199 if (!ret )
11391200 ret = bpf_ctx_finish (kattr , uattr , ctx ,
11401201 sizeof (struct __sk_buff ));
@@ -1342,7 +1403,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
13421403 goto out ;
13431404
13441405 size = xdp .data_end - xdp .data_meta + sinfo -> xdp_frags_size ;
1345- ret = bpf_test_finish (kattr , uattr , xdp .data_meta , sinfo , size ,
1406+ ret = bpf_test_finish (kattr , uattr , xdp .data_meta , sinfo , size , sinfo -> xdp_frags_size ,
13461407 retval , duration );
13471408 if (!ret )
13481409 ret = bpf_ctx_finish (kattr , uattr , ctx ,
@@ -1433,7 +1494,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
14331494 goto out ;
14341495
14351496 ret = bpf_test_finish (kattr , uattr , & flow_keys , NULL ,
1436- sizeof (flow_keys ), retval , duration );
1497+ sizeof (flow_keys ), 0 , retval , duration );
14371498 if (!ret )
14381499 ret = bpf_ctx_finish (kattr , uattr , user_ctx ,
14391500 sizeof (struct bpf_flow_keys ));
@@ -1534,7 +1595,7 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kat
15341595 user_ctx -> cookie = sock_gen_cookie (ctx .selected_sk );
15351596 }
15361597
1537- ret = bpf_test_finish (kattr , uattr , NULL , NULL , 0 , retval , duration );
1598+ ret = bpf_test_finish (kattr , uattr , NULL , NULL , 0 , 0 , retval , duration );
15381599 if (!ret )
15391600 ret = bpf_ctx_finish (kattr , uattr , user_ctx , sizeof (* user_ctx ));
15401601
@@ -1734,7 +1795,7 @@ int bpf_prog_test_run_nf(struct bpf_prog *prog,
17341795 if (ret )
17351796 goto out ;
17361797
1737- ret = bpf_test_finish (kattr , uattr , NULL , NULL , 0 , retval , duration );
1798+ ret = bpf_test_finish (kattr , uattr , NULL , NULL , 0 , 0 , retval , duration );
17381799
17391800out :
17401801 kfree (user_ctx );
0 commit comments