Skip to content

Commit 4c7b9cc

Browse files
committed
bpf: Support pulling non-linear xdp data
Add kfunc, bpf_xdp_pull_data(), to support pulling data from xdp fragments for direct packet access. Similar to bpf_skb_pull_data(), bpf_xdp_pull_data() makes the first len bytes of data directly readable and writable in bpf programs. If len is larger than the linear data size, data in fragments will be copied to the linear region when there is still space available, which is subject to driver implementation. A use case of the kfunc is to decapsulate headers residing in xdp fragments. It is possible for a NIC driver to place headers in xdp fragments. To keep using direct packet access for parsing and decapsulating headers, users can pull the header into linear data area by calling bpf_xdp_pull_data() and then use bpf_xdp_adjust_head() to pop the header. An unused argument, flags is reserved for future extension (e.g., tossing the data instead of copying it to the linear data area). Signed-off-by: Amery Hung <[email protected]>
1 parent f4b7b13 commit 4c7b9cc

File tree

1 file changed

+52
-0
lines changed

1 file changed

+52
-0
lines changed

net/core/filter.c

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12211,6 +12211,57 @@ __bpf_kfunc int bpf_sock_ops_enable_tx_tstamp(struct bpf_sock_ops_kern *skops,
1221112211
return 0;
1221212212
}
1221312213

12214+
__bpf_kfunc int bpf_xdp_pull_data(struct xdp_md *x, u32 len, u64 flags)
12215+
{
12216+
struct xdp_buff *xdp = (struct xdp_buff *)x;
12217+
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
12218+
void *data_end, *data_hard_end = xdp_data_hard_end(xdp);
12219+
int i, delta, buff_len, n_frags_free = 0, len_free = 0;
12220+
12221+
buff_len = xdp_get_buff_len(xdp);
12222+
12223+
if (unlikely(len > buff_len))
12224+
return -EINVAL;
12225+
12226+
if (!len)
12227+
len = xdp_get_buff_len(xdp);
12228+
12229+
data_end = xdp->data + len;
12230+
delta = data_end - xdp->data_end;
12231+
12232+
if (delta <= 0)
12233+
return 0;
12234+
12235+
if (unlikely(data_end > data_hard_end))
12236+
return -EINVAL;
12237+
12238+
for (i = 0; i < sinfo->nr_frags && delta; i++) {
12239+
skb_frag_t *frag = &sinfo->frags[i];
12240+
u32 shrink = min_t(u32, delta, skb_frag_size(frag));
12241+
12242+
memcpy(xdp->data_end + len_free, skb_frag_address(frag), shrink);
12243+
12244+
len_free += shrink;
12245+
delta -= shrink;
12246+
if (bpf_xdp_shrink_data(xdp, frag, shrink, false))
12247+
n_frags_free++;
12248+
}
12249+
12250+
for (i = 0; i < sinfo->nr_frags - n_frags_free; i++) {
12251+
memcpy(&sinfo->frags[i], &sinfo->frags[i + n_frags_free],
12252+
sizeof(skb_frag_t));
12253+
}
12254+
12255+
sinfo->nr_frags -= n_frags_free;
12256+
sinfo->xdp_frags_size -= len_free;
12257+
xdp->data_end = data_end;
12258+
12259+
if (unlikely(!sinfo->nr_frags))
12260+
xdp_buff_clear_frags_flag(xdp);
12261+
12262+
return 0;
12263+
}
12264+
1221412265
__bpf_kfunc_end_defs();
1221512266

1221612267
int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
@@ -12238,6 +12289,7 @@ BTF_KFUNCS_END(bpf_kfunc_check_set_skb_meta)
1223812289

1223912290
BTF_KFUNCS_START(bpf_kfunc_check_set_xdp)
1224012291
BTF_ID_FLAGS(func, bpf_dynptr_from_xdp)
12292+
BTF_ID_FLAGS(func, bpf_xdp_pull_data)
1224112293
BTF_KFUNCS_END(bpf_kfunc_check_set_xdp)
1224212294

1224312295
BTF_KFUNCS_START(bpf_kfunc_check_set_sock_addr)

0 commit comments

Comments
 (0)