Skip to content

Commit 95b8ebe

Browse files
pchaignoguidosarducci
authored andcommitted
bpf: Craft non-linear skbs in BPF_PROG_TEST_RUN
This patch adds support for crafting non-linear skbs in BPF test runs for tc programs. The size of the linear area is given by ctx->data_end, with a minimum of ETH_HLEN always pulled in the linear area. If ctx or ctx->data_end are null, a linear skb is used. This is particularly useful to test support for non-linear skbs in large codebases such as Cilium. We've had multiple bugs in the past few years where we were missing calls to bpf_skb_pull_data(). This support in BPF_PROG_TEST_RUN would allow us to automatically cover this case in our BPF tests. LWT program types are currently excluded in this patch. Allowing non-linear skbs for these programs would require a bit more care because they are able to call helpers (ex., bpf_clone_redirect, bpf_redirect) that themselves call eth_type_trans(). eth_type_trans() assumes there are at least ETH_HLEN bytes in the linear area. That may not be true for LWT programs as we already pulled the L2 header via the eth_type_trans() call in bpf_prog_test_run_skb(). In addition to the selftests introduced later in the series, this patch was tested by enabling non-linear skbs for all tc selftests programs and checking test failures were expected. Suggested-by: Daniel Borkmann <[email protected]> Signed-off-by: Paul Chaignon <[email protected]> Signed-off-by: Martin KaFai Lau <[email protected]> Tested-by: [email protected] Link: https://patch.msgid.link/5694d4d1af31bddf974afcb1bbb1e28b8998dcd0.1760037899.git.paul.chaignon@gmail.com
1 parent 95ff6ed commit 95b8ebe

File tree

1 file changed

+82
-21
lines changed

1 file changed

+82
-21
lines changed

net/bpf/test_run.c

Lines changed: 82 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -436,7 +436,7 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
436436

437437
static int bpf_test_finish(const union bpf_attr *kattr,
438438
union bpf_attr __user *uattr, const void *data,
439-
struct skb_shared_info *sinfo, u32 size,
439+
struct skb_shared_info *sinfo, u32 size, u32 frag_size,
440440
u32 retval, u32 duration)
441441
{
442442
void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
@@ -453,7 +453,7 @@ static int bpf_test_finish(const union bpf_attr *kattr,
453453
}
454454

455455
if (data_out) {
456-
int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size;
456+
int len = sinfo ? copy_size - frag_size : copy_size;
457457

458458
if (len < 0) {
459459
err = -ENOSPC;
@@ -899,6 +899,12 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
899899
/* cb is allowed */
900900

901901
if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
902+
offsetof(struct __sk_buff, data_end)))
903+
return -EINVAL;
904+
905+
/* data_end is allowed, but not copied to skb */
906+
907+
if (!range_is_zero(__skb, offsetofend(struct __sk_buff, data_end),
902908
offsetof(struct __sk_buff, tstamp)))
903909
return -EINVAL;
904910

@@ -973,34 +979,39 @@ static struct proto bpf_dummy_proto = {
973979
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
974980
union bpf_attr __user *uattr)
975981
{
976-
bool is_l2 = false, is_direct_pkt_access = false;
982+
bool is_l2 = false, is_direct_pkt_access = false, is_lwt = false;
983+
u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
977984
struct net *net = current->nsproxy->net_ns;
978985
struct net_device *dev = net->loopback_dev;
979-
u32 size = kattr->test.data_size_in;
986+
u32 headroom = NET_SKB_PAD + NET_IP_ALIGN;
987+
u32 linear_sz = kattr->test.data_size_in;
980988
u32 repeat = kattr->test.repeat;
981989
struct __sk_buff *ctx = NULL;
982990
struct sk_buff *skb = NULL;
983991
struct sock *sk = NULL;
984992
u32 retval, duration;
985993
int hh_len = ETH_HLEN;
986-
void *data;
994+
void *data = NULL;
987995
int ret;
988996

989997
if ((kattr->test.flags & ~BPF_F_TEST_SKB_CHECKSUM_COMPLETE) ||
990998
kattr->test.cpu || kattr->test.batch_size)
991999
return -EINVAL;
9921000

993-
if (size < ETH_HLEN)
1001+
if (kattr->test.data_size_in < ETH_HLEN)
9941002
return -EINVAL;
9951003

9961004
switch (prog->type) {
9971005
case BPF_PROG_TYPE_SCHED_CLS:
9981006
case BPF_PROG_TYPE_SCHED_ACT:
1007+
is_direct_pkt_access = true;
9991008
is_l2 = true;
1000-
fallthrough;
1009+
break;
10011010
case BPF_PROG_TYPE_LWT_IN:
10021011
case BPF_PROG_TYPE_LWT_OUT:
10031012
case BPF_PROG_TYPE_LWT_XMIT:
1013+
is_lwt = true;
1014+
fallthrough;
10041015
case BPF_PROG_TYPE_CGROUP_SKB:
10051016
is_direct_pkt_access = true;
10061017
break;
@@ -1012,9 +1023,24 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
10121023
if (IS_ERR(ctx))
10131024
return PTR_ERR(ctx);
10141025

1015-
data = bpf_test_init(kattr, kattr->test.data_size_in,
1016-
size, NET_SKB_PAD + NET_IP_ALIGN,
1017-
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1026+
if (ctx) {
1027+
if (ctx->data_end > kattr->test.data_size_in || ctx->data || ctx->data_meta) {
1028+
ret = -EINVAL;
1029+
goto out;
1030+
}
1031+
if (ctx->data_end) {
1032+
/* Non-linear LWT test_run is unsupported for now. */
1033+
if (is_lwt) {
1034+
ret = -EINVAL;
1035+
goto out;
1036+
}
1037+
linear_sz = max(ETH_HLEN, ctx->data_end);
1038+
}
1039+
}
1040+
1041+
linear_sz = min_t(u32, linear_sz, PAGE_SIZE - headroom - tailroom);
1042+
1043+
data = bpf_test_init(kattr, linear_sz, linear_sz, headroom, tailroom);
10181044
if (IS_ERR(data)) {
10191045
ret = PTR_ERR(data);
10201046
data = NULL;
@@ -1038,7 +1064,43 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
10381064
data = NULL; /* data released via kfree_skb */
10391065

10401066
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1041-
__skb_put(skb, size);
1067+
__skb_put(skb, linear_sz);
1068+
1069+
if (unlikely(kattr->test.data_size_in > linear_sz)) {
1070+
void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
1071+
struct skb_shared_info *sinfo = skb_shinfo(skb);
1072+
u32 copied = linear_sz;
1073+
1074+
while (copied < kattr->test.data_size_in) {
1075+
struct page *page;
1076+
u32 data_len;
1077+
1078+
if (sinfo->nr_frags == MAX_SKB_FRAGS) {
1079+
ret = -ENOMEM;
1080+
goto out;
1081+
}
1082+
1083+
page = alloc_page(GFP_KERNEL);
1084+
if (!page) {
1085+
ret = -ENOMEM;
1086+
goto out;
1087+
}
1088+
1089+
data_len = min_t(u32, kattr->test.data_size_in - copied,
1090+
PAGE_SIZE);
1091+
skb_fill_page_desc(skb, sinfo->nr_frags, page, 0, data_len);
1092+
1093+
if (copy_from_user(page_address(page), data_in + copied,
1094+
data_len)) {
1095+
ret = -EFAULT;
1096+
goto out;
1097+
}
1098+
skb->data_len += data_len;
1099+
skb->truesize += PAGE_SIZE;
1100+
skb->len += data_len;
1101+
copied += data_len;
1102+
}
1103+
}
10421104

10431105
if (ctx && ctx->ifindex > 1) {
10441106
dev = dev_get_by_index(net, ctx->ifindex);
@@ -1118,12 +1180,11 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
11181180

11191181
convert_skb_to___skb(skb, ctx);
11201182

1121-
size = skb->len;
1122-
/* bpf program can never convert linear skb to non-linear */
1123-
if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
1124-
size = skb_headlen(skb);
1125-
ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval,
1126-
duration);
1183+
if (skb_is_nonlinear(skb))
1184+
/* bpf program can never convert linear skb to non-linear */
1185+
WARN_ON_ONCE(linear_sz == kattr->test.data_size_in);
1186+
ret = bpf_test_finish(kattr, uattr, skb->data, skb_shinfo(skb), skb->len,
1187+
skb->data_len, retval, duration);
11271188
if (!ret)
11281189
ret = bpf_ctx_finish(kattr, uattr, ctx,
11291190
sizeof(struct __sk_buff));
@@ -1331,7 +1392,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
13311392
goto out;
13321393

13331394
size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
1334-
ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size,
1395+
ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size, sinfo->xdp_frags_size,
13351396
retval, duration);
13361397
if (!ret)
13371398
ret = bpf_ctx_finish(kattr, uattr, ctx,
@@ -1422,7 +1483,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
14221483
goto out;
14231484

14241485
ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL,
1425-
sizeof(flow_keys), retval, duration);
1486+
sizeof(flow_keys), 0, retval, duration);
14261487
if (!ret)
14271488
ret = bpf_ctx_finish(kattr, uattr, user_ctx,
14281489
sizeof(struct bpf_flow_keys));
@@ -1523,7 +1584,7 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kat
15231584
user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
15241585
}
15251586

1526-
ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1587+
ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, 0, retval, duration);
15271588
if (!ret)
15281589
ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
15291590

@@ -1723,7 +1784,7 @@ int bpf_prog_test_run_nf(struct bpf_prog *prog,
17231784
if (ret)
17241785
goto out;
17251786

1726-
ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1787+
ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, 0, retval, duration);
17271788

17281789
out:
17291790
kfree(user_ctx);

0 commit comments

Comments
 (0)