Skip to content

Commit 0b7cfa4

Browse files
ayalevinSaeed Mahameed
authored andcommitted
net/mlx5e: Fix page DMA map/unmap attributes
Driver initiates DMA sync, hence it may skip CPU sync. Add DMA_ATTR_SKIP_CPU_SYNC as input attribute both to dma_map_page and dma_unmap_page to avoid redundant sync with the CPU. When forcing the device to work with SWIOTLB, the extra sync might cause data corruption. The driver unmaps the whole page while the hardware used just a part of the bounce buffer. So syncing overrides the entire page with bounce buffer that only partially contains real data. Fixes: bc77b24 ("net/mlx5e: Add fragmented memory support for RX multi packet WQE") Fixes: db05815 ("net/mlx5e: Add XSK zero-copy support") Signed-off-by: Aya Levin <[email protected]> Reviewed-by: Gal Pressman <[email protected]> Signed-off-by: Saeed Mahameed <[email protected]>
1 parent 36595d8 commit 0b7cfa4

File tree

2 files changed

+6
-5
lines changed

2 files changed

+6
-5
lines changed

drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,13 +11,13 @@ static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv,
1111
{
1212
struct device *dev = mlx5_core_dma_dev(priv->mdev);
1313

14-
return xsk_pool_dma_map(pool, dev, 0);
14+
return xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC);
1515
}
1616

1717
static void mlx5e_xsk_unmap_pool(struct mlx5e_priv *priv,
1818
struct xsk_buff_pool *pool)
1919
{
20-
return xsk_pool_dma_unmap(pool, 0);
20+
return xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
2121
}
2222

2323
static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk)

drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -278,8 +278,8 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
278278
if (unlikely(!dma_info->page))
279279
return -ENOMEM;
280280

281-
dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
282-
PAGE_SIZE, rq->buff.map_dir);
281+
dma_info->addr = dma_map_page_attrs(rq->pdev, dma_info->page, 0, PAGE_SIZE,
282+
rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC);
283283
if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
284284
page_pool_recycle_direct(rq->page_pool, dma_info->page);
285285
dma_info->page = NULL;
@@ -300,7 +300,8 @@ static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
300300

301301
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
302302
{
303-
dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir);
303+
dma_unmap_page_attrs(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir,
304+
DMA_ATTR_SKIP_CPU_SYNC);
304305
}
305306

306307
void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,

0 commit comments

Comments
 (0)