Skip to content

Commit 15f73e6

Browse files
Amit Cohenkuba-moo
authored andcommitted
mlxsw: pci: Sync Rx buffers for CPU
When Rx packet is received, drivers should sync the pages for CPU, to ensure the CPU reads the data written by the device and not stale data from its cache. Add the missing sync call in Rx path, sync the actual length of data for each fragment. Cc: Jiri Pirko <[email protected]> Fixes: b5b60bb ("mlxsw: pci: Use page pool for Rx buffers allocation") Signed-off-by: Amit Cohen <[email protected]> Reviewed-by: Ido Schimmel <[email protected]> Signed-off-by: Petr Machata <[email protected]> Link: https://patch.msgid.link/461486fac91755ca4e04c2068c102250026dcd0b.1729866134.git.petrm@nvidia.com Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 0a66e55 commit 15f73e6

File tree

1 file changed

+15
-7
lines changed
  • drivers/net/ethernet/mellanox/mlxsw

1 file changed

+15
-7
lines changed

drivers/net/ethernet/mellanox/mlxsw/pci.c

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -389,27 +389,34 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
389389
dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
390390
}
391391

392-
static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[],
392+
static struct sk_buff *mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q,
393+
struct page *pages[],
393394
u16 byte_count)
394395
{
396+
struct mlxsw_pci_queue *cq = q->u.rdq.cq;
395397
unsigned int linear_data_size;
398+
struct page_pool *page_pool;
396399
struct sk_buff *skb;
397400
int page_index = 0;
398401
bool linear_only;
399402
void *data;
400403

404+
linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE;
405+
linear_data_size = linear_only ? byte_count :
406+
PAGE_SIZE -
407+
MLXSW_PCI_RX_BUF_SW_OVERHEAD;
408+
409+
page_pool = cq->u.cq.page_pool;
410+
page_pool_dma_sync_for_cpu(page_pool, pages[page_index],
411+
MLXSW_PCI_SKB_HEADROOM, linear_data_size);
412+
401413
data = page_address(pages[page_index]);
402414
net_prefetch(data);
403415

404416
skb = napi_build_skb(data, PAGE_SIZE);
405417
if (unlikely(!skb))
406418
return ERR_PTR(-ENOMEM);
407419

408-
linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE;
409-
linear_data_size = linear_only ? byte_count :
410-
PAGE_SIZE -
411-
MLXSW_PCI_RX_BUF_SW_OVERHEAD;
412-
413420
skb_reserve(skb, MLXSW_PCI_SKB_HEADROOM);
414421
skb_put(skb, linear_data_size);
415422

@@ -425,6 +432,7 @@ static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[],
425432

426433
page = pages[page_index];
427434
frag_size = min(byte_count, PAGE_SIZE);
435+
page_pool_dma_sync_for_cpu(page_pool, page, 0, frag_size);
428436
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
429437
page, 0, frag_size, PAGE_SIZE);
430438
byte_count -= frag_size;
@@ -760,7 +768,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
760768
if (err)
761769
goto out;
762770

763-
skb = mlxsw_pci_rdq_build_skb(pages, byte_count);
771+
skb = mlxsw_pci_rdq_build_skb(q, pages, byte_count);
764772
if (IS_ERR(skb)) {
765773
dev_err_ratelimited(&pdev->dev, "Failed to build skb for RDQ\n");
766774
mlxsw_pci_rdq_pages_recycle(q, pages, num_sg_entries);

0 commit comments

Comments
 (0)