Skip to content

Commit 560d958

Browse files
alobakinkuba-moo
authored andcommitted
xsk: add generic XSk &xdp_buff -> skb conversion
Same as with converting &xdp_buff to skb on Rx, the code which allocates a new skb and copies the XSk frame there is identical across the drivers, so make it generic. This includes copying all the frags if they are present in the original buff. System percpu page_pools greatly improve XDP_PASS performance on XSk: instead of page_alloc() + page_free(), the net core recycles the same pages, so the only overhead left is memcpy()s. When the Page Pool is not compiled in, the whole function is a return-NULL (but it always gets selected when eBPF is enabled). Note that the passed buff gets freed if the conversion is done w/o any error, assuming you don't need this buffer after you convert it to an skb. Reviewed-by: Maciej Fijalkowski <[email protected]> Signed-off-by: Alexander Lobakin <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 51205f8 commit 560d958

File tree

2 files changed

+113
-0
lines changed

2 files changed

+113
-0
lines changed

include/net/xdp.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -337,6 +337,7 @@ void xdp_warn(const char *msg, const char *func, const int line);
337337
#define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__)
338338

339339
struct sk_buff *xdp_build_skb_from_buff(const struct xdp_buff *xdp);
340+
struct sk_buff *xdp_build_skb_from_zc(struct xdp_buff *xdp);
340341
struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
341342
struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
342343
struct sk_buff *skb,

net/core/xdp.c

Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -684,6 +684,118 @@ struct sk_buff *xdp_build_skb_from_buff(const struct xdp_buff *xdp)
684684
}
685685
EXPORT_SYMBOL_GPL(xdp_build_skb_from_buff);
686686

687+
/**
688+
* xdp_copy_frags_from_zc - copy frags from XSk buff to skb
689+
* @skb: skb to copy frags to
690+
* @xdp: XSk &xdp_buff from which the frags will be copied
691+
* @pp: &page_pool backing page allocation, if available
692+
*
693+
* Copy all frags from XSk &xdp_buff to the skb to pass it up the stack.
694+
* Allocate a new buffer for each frag, copy it and attach to the skb.
695+
*
696+
* Return: true on success, false on netmem allocation fail.
697+
*/
698+
static noinline bool xdp_copy_frags_from_zc(struct sk_buff *skb,
699+
const struct xdp_buff *xdp,
700+
struct page_pool *pp)
701+
{
702+
struct skb_shared_info *sinfo = skb_shinfo(skb);
703+
const struct skb_shared_info *xinfo;
704+
u32 nr_frags, tsize = 0;
705+
bool pfmemalloc = false;
706+
707+
xinfo = xdp_get_shared_info_from_buff(xdp);
708+
nr_frags = xinfo->nr_frags;
709+
710+
for (u32 i = 0; i < nr_frags; i++) {
711+
u32 len = skb_frag_size(&xinfo->frags[i]);
712+
u32 offset, truesize = len;
713+
netmem_ref netmem;
714+
715+
netmem = page_pool_dev_alloc_netmem(pp, &offset, &truesize);
716+
if (unlikely(!netmem)) {
717+
sinfo->nr_frags = i;
718+
return false;
719+
}
720+
721+
memcpy(__netmem_address(netmem),
722+
__netmem_address(xinfo->frags[i].netmem),
723+
LARGEST_ALIGN(len));
724+
__skb_fill_netmem_desc_noacc(sinfo, i, netmem, offset, len);
725+
726+
tsize += truesize;
727+
pfmemalloc |= netmem_is_pfmemalloc(netmem);
728+
}
729+
730+
xdp_update_skb_shared_info(skb, nr_frags, xinfo->xdp_frags_size,
731+
tsize, pfmemalloc);
732+
733+
return true;
734+
}
735+
736+
/**
737+
* xdp_build_skb_from_zc - create an skb from XSk &xdp_buff
738+
* @xdp: source XSk buff
739+
*
740+
* Similar to xdp_build_skb_from_buff(), but for XSk frames. Allocate an skb
741+
* head, new buffer for the head, copy the data and initialize the skb fields.
742+
* If there are frags, allocate new buffers for them and copy.
743+
* Buffers are allocated from the system percpu pools to try recycling them.
744+
* If new skb was built successfully, @xdp is returned to XSk pool's freelist.
745+
* On error, it remains untouched and the caller must take care of this.
746+
*
747+
* Return: new &sk_buff on success, %NULL on error.
748+
*/
749+
struct sk_buff *xdp_build_skb_from_zc(struct xdp_buff *xdp)
750+
{
751+
struct page_pool *pp = this_cpu_read(system_page_pool);
752+
const struct xdp_rxq_info *rxq = xdp->rxq;
753+
u32 len = xdp->data_end - xdp->data_meta;
754+
u32 truesize = xdp->frame_sz;
755+
struct sk_buff *skb;
756+
int metalen;
757+
void *data;
758+
759+
if (!IS_ENABLED(CONFIG_PAGE_POOL))
760+
return NULL;
761+
762+
data = page_pool_dev_alloc_va(pp, &truesize);
763+
if (unlikely(!data))
764+
return NULL;
765+
766+
skb = napi_build_skb(data, truesize);
767+
if (unlikely(!skb)) {
768+
page_pool_free_va(pp, data, true);
769+
return NULL;
770+
}
771+
772+
skb_mark_for_recycle(skb);
773+
skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
774+
775+
memcpy(__skb_put(skb, len), xdp->data_meta, LARGEST_ALIGN(len));
776+
777+
metalen = xdp->data - xdp->data_meta;
778+
if (metalen > 0) {
779+
skb_metadata_set(skb, metalen);
780+
__skb_pull(skb, metalen);
781+
}
782+
783+
skb_record_rx_queue(skb, rxq->queue_index);
784+
785+
if (unlikely(xdp_buff_has_frags(xdp)) &&
786+
unlikely(!xdp_copy_frags_from_zc(skb, xdp, pp))) {
787+
napi_consume_skb(skb, true);
788+
return NULL;
789+
}
790+
791+
xsk_buff_free(xdp);
792+
793+
skb->protocol = eth_type_trans(skb, rxq->dev);
794+
795+
return skb;
796+
}
797+
EXPORT_SYMBOL_GPL(xdp_build_skb_from_zc);
798+
687799
struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
688800
struct sk_buff *skb,
689801
struct net_device *dev)

0 commit comments

Comments
 (0)