Skip to content

Commit 09d2b31

Browse files
fengidrikuba-moo
authored andcommitted
virtio_net: xsk: bind/unbind xsk for rx
This patch implement the logic of bind/unbind xsk pool to rq. Signed-off-by: Xuan Zhuo <[email protected]> Acked-by: Jason Wang <[email protected]> Acked-by: Michael S. Tsirkin <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 5db4810 commit 09d2b31

File tree

1 file changed

+134
-0
lines changed

1 file changed

+134
-0
lines changed

drivers/net/virtio_net.c

Lines changed: 134 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
#include <net/net_failover.h>
2626
#include <net/netdev_rx_queue.h>
2727
#include <net/netdev_queues.h>
28+
#include <net/xdp_sock_drv.h>
2829

2930
static int napi_weight = NAPI_POLL_WEIGHT;
3031
module_param(napi_weight, int, 0444);
@@ -348,6 +349,11 @@ struct receive_queue {
348349

349350
/* Record the last dma info to free after new pages is allocated. */
350351
struct virtnet_rq_dma *last_dma;
352+
353+
struct xsk_buff_pool *xsk_pool;
354+
355+
/* xdp rxq used by xsk */
356+
struct xdp_rxq_info xsk_rxq_info;
351357
};
352358

353359
/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -5026,6 +5032,132 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
50265032
return virtnet_set_guest_offloads(vi, offloads);
50275033
}
50285034

5035+
static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queue *rq,
5036+
struct xsk_buff_pool *pool)
5037+
{
5038+
int err, qindex;
5039+
5040+
qindex = rq - vi->rq;
5041+
5042+
if (pool) {
5043+
err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, rq->napi.napi_id);
5044+
if (err < 0)
5045+
return err;
5046+
5047+
err = xdp_rxq_info_reg_mem_model(&rq->xsk_rxq_info,
5048+
MEM_TYPE_XSK_BUFF_POOL, NULL);
5049+
if (err < 0)
5050+
goto unreg;
5051+
5052+
xsk_pool_set_rxq_info(pool, &rq->xsk_rxq_info);
5053+
}
5054+
5055+
virtnet_rx_pause(vi, rq);
5056+
5057+
err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf);
5058+
if (err) {
5059+
netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err);
5060+
5061+
pool = NULL;
5062+
}
5063+
5064+
rq->xsk_pool = pool;
5065+
5066+
virtnet_rx_resume(vi, rq);
5067+
5068+
if (pool)
5069+
return 0;
5070+
5071+
unreg:
5072+
xdp_rxq_info_unreg(&rq->xsk_rxq_info);
5073+
return err;
5074+
}
5075+
5076+
static int virtnet_xsk_pool_enable(struct net_device *dev,
5077+
struct xsk_buff_pool *pool,
5078+
u16 qid)
5079+
{
5080+
struct virtnet_info *vi = netdev_priv(dev);
5081+
struct receive_queue *rq;
5082+
struct device *dma_dev;
5083+
struct send_queue *sq;
5084+
int err;
5085+
5086+
if (vi->hdr_len > xsk_pool_get_headroom(pool))
5087+
return -EINVAL;
5088+
5089+
/* In big_packets mode, xdp cannot work, so there is no need to
5090+
* initialize xsk of rq.
5091+
*/
5092+
if (vi->big_packets && !vi->mergeable_rx_bufs)
5093+
return -ENOENT;
5094+
5095+
if (qid >= vi->curr_queue_pairs)
5096+
return -EINVAL;
5097+
5098+
sq = &vi->sq[qid];
5099+
rq = &vi->rq[qid];
5100+
5101+
/* xsk assumes that tx and rx must have the same dma device. The af-xdp
5102+
* may use one buffer to receive from the rx and reuse this buffer to
5103+
* send by the tx. So the dma dev of sq and rq must be the same one.
5104+
*
5105+
* But vq->dma_dev allows every vq has the respective dma dev. So I
5106+
* check the dma dev of vq and sq is the same dev.
5107+
*/
5108+
if (virtqueue_dma_dev(rq->vq) != virtqueue_dma_dev(sq->vq))
5109+
return -EINVAL;
5110+
5111+
dma_dev = virtqueue_dma_dev(rq->vq);
5112+
if (!dma_dev)
5113+
return -EINVAL;
5114+
5115+
err = xsk_pool_dma_map(pool, dma_dev, 0);
5116+
if (err)
5117+
goto err_xsk_map;
5118+
5119+
err = virtnet_rq_bind_xsk_pool(vi, rq, pool);
5120+
if (err)
5121+
goto err_rq;
5122+
5123+
return 0;
5124+
5125+
err_rq:
5126+
xsk_pool_dma_unmap(pool, 0);
5127+
err_xsk_map:
5128+
return err;
5129+
}
5130+
5131+
static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
5132+
{
5133+
struct virtnet_info *vi = netdev_priv(dev);
5134+
struct xsk_buff_pool *pool;
5135+
struct receive_queue *rq;
5136+
int err;
5137+
5138+
if (qid >= vi->curr_queue_pairs)
5139+
return -EINVAL;
5140+
5141+
rq = &vi->rq[qid];
5142+
5143+
pool = rq->xsk_pool;
5144+
5145+
err = virtnet_rq_bind_xsk_pool(vi, rq, NULL);
5146+
5147+
xsk_pool_dma_unmap(pool, 0);
5148+
5149+
return err;
5150+
}
5151+
5152+
static int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp)
5153+
{
5154+
if (xdp->xsk.pool)
5155+
return virtnet_xsk_pool_enable(dev, xdp->xsk.pool,
5156+
xdp->xsk.queue_id);
5157+
else
5158+
return virtnet_xsk_pool_disable(dev, xdp->xsk.queue_id);
5159+
}
5160+
50295161
static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
50305162
struct netlink_ext_ack *extack)
50315163
{
@@ -5151,6 +5283,8 @@ static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
51515283
switch (xdp->command) {
51525284
case XDP_SETUP_PROG:
51535285
return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
5286+
case XDP_SETUP_XSK_POOL:
5287+
return virtnet_xsk_pool_setup(dev, xdp);
51545288
default:
51555289
return -EINVAL;
51565290
}

0 commit comments

Comments
 (0)