Skip to content

Commit 163943a

Browse files
alobakinChristoph Hellwig
authored andcommitted
xsk: use generic DMA sync shortcut instead of a custom one
XSk infra's been using its own DMA sync shortcut to try avoiding redundant function calls. Now that there is a generic one, remove the custom implementation and rely on the generic helpers. xsk_buff_dma_sync_for_cpu() doesn't need the second argument anymore, remove it. Signed-off-by: Alexander Lobakin <[email protected]> Signed-off-by: Christoph Hellwig <[email protected]>
1 parent 4321de4 commit 163943a

File tree

13 files changed

+21
-51
lines changed

13 files changed

+21
-51
lines changed

drivers/net/ethernet/engleder/tsnep_main.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1587,7 +1587,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
15871587
length = __le32_to_cpu(entry->desc_wb->properties) &
15881588
TSNEP_DESC_LENGTH_MASK;
15891589
xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN);
1590-
xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
1590+
xsk_buff_dma_sync_for_cpu(entry->xdp);
15911591

15921592
/* RX metadata with timestamps is in front of actual data,
15931593
* subtract metadata size to get length of actual data and

drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ static u32 dpaa2_xsk_run_xdp(struct dpaa2_eth_priv *priv,
5555
xdp_set_data_meta_invalid(xdp_buff);
5656
xdp_buff->rxq = &ch->xdp_rxq;
5757

58-
xsk_buff_dma_sync_for_cpu(xdp_buff, ch->xsk_pool);
58+
xsk_buff_dma_sync_for_cpu(xdp_buff);
5959
xdp_act = bpf_prog_run_xdp(xdp_prog, xdp_buff);
6060

6161
/* xdp.data pointer may have changed */

drivers/net/ethernet/intel/i40e/i40e_xsk.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -483,7 +483,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
483483

484484
bi = *i40e_rx_bi(rx_ring, next_to_process);
485485
xsk_buff_set_size(bi, size);
486-
xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
486+
xsk_buff_dma_sync_for_cpu(bi);
487487

488488
if (!first)
489489
first = bi;

drivers/net/ethernet/intel/ice/ice_xsk.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -879,7 +879,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
879879
ICE_RX_FLX_DESC_PKT_LEN_M;
880880

881881
xsk_buff_set_size(xdp, size);
882-
xsk_buff_dma_sync_for_cpu(xdp, xsk_pool);
882+
xsk_buff_dma_sync_for_cpu(xdp);
883883

884884
if (!first) {
885885
first = xdp;

drivers/net/ethernet/intel/igc/igc_main.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2813,7 +2813,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
28132813
}
28142814

28152815
bi->xdp->data_end = bi->xdp->data + size;
2816-
xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool);
2816+
xsk_buff_dma_sync_for_cpu(bi->xdp);
28172817

28182818
res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
28192819
switch (res) {

drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -304,7 +304,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
304304
}
305305

306306
bi->xdp->data_end = bi->xdp->data + size;
307-
xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
307+
xsk_buff_dma_sync_for_cpu(bi->xdp);
308308
xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
309309

310310
if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) {

drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -270,7 +270,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
270270
/* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
271271
mxbuf->cqe = cqe;
272272
xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
273-
xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
273+
xsk_buff_dma_sync_for_cpu(&mxbuf->xdp);
274274
net_prefetch(mxbuf->xdp.data);
275275

276276
/* Possible flows:
@@ -319,7 +319,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
319319
/* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
320320
mxbuf->cqe = cqe;
321321
xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
322-
xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
322+
xsk_buff_dma_sync_for_cpu(&mxbuf->xdp);
323323
net_prefetch(mxbuf->xdp.data);
324324

325325
prog = rcu_dereference(rq->xdp_prog);

drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -917,7 +917,7 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
917917

918918
if (!rq->xsk_pool) {
919919
count = mlx5e_refill_rx_wqes(rq, head, wqe_bulk);
920-
} else if (likely(!rq->xsk_pool->dma_need_sync)) {
920+
} else if (likely(!dma_dev_need_sync(rq->pdev))) {
921921
mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
922922
count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk);
923923
} else {

drivers/net/ethernet/netronome/nfp/nfd3/xsk.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ nfp_nfd3_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget,
184184
xrxbuf->xdp->data += meta_len;
185185
xrxbuf->xdp->data_end = xrxbuf->xdp->data + pkt_len;
186186
xdp_set_data_meta_invalid(xrxbuf->xdp);
187-
xsk_buff_dma_sync_for_cpu(xrxbuf->xdp, r_vec->xsk_pool);
187+
xsk_buff_dma_sync_for_cpu(xrxbuf->xdp);
188188
net_prefetch(xrxbuf->xdp->data);
189189

190190
if (meta_len) {

drivers/net/ethernet/stmicro/stmmac/stmmac_main.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5352,7 +5352,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
53525352

53535353
/* RX buffer is good and fit into a XSK pool buffer */
53545354
buf->xdp->data_end = buf->xdp->data + buf1_len;
5355-
xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5355+
xsk_buff_dma_sync_for_cpu(buf->xdp);
53565356

53575357
prog = READ_ONCE(priv->xdp_prog);
53585358
res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);

0 commit comments

Comments
 (0)