Skip to content

Commit dd1a385

Browse files
Kevin Groeneveldopsiff
authored andcommitted
net: fec: handle page_pool_dev_alloc_pages error
[ Upstream commit 001ba09 ] The fec_enet_update_cbd function calls page_pool_dev_alloc_pages but did not handle the case when it returned NULL. There was a WARN_ON(!new_page) but it would still proceed to use the NULL pointer and then crash. This case does seem somewhat rare but when the system is under memory pressure it can happen. One case where I can duplicate this with some frequency is when writing over a smbd share to a SATA HDD attached to an imx6q. Setting /proc/sys/vm/min_free_kbytes to higher values also seems to solve the problem for my test case. But it still seems wrong that the fec driver ignores the memory allocation error and can crash. This commit handles the allocation error by dropping the current packet. Fixes: 95698ff ("net: fec: using page pool to manage RX buffers") Signed-off-by: Kevin Groeneveld <[email protected]> Reviewed-by: Jacob Keller <[email protected]> Reviewed-by: Wei Fang <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]> Signed-off-by: Sasha Levin <[email protected]> (cherry picked from commit 341f43053ff9bf50fd27b0b1eebad64659476b7d)
1 parent 4a4a25b commit dd1a385

File tree

1 file changed

+14
-5
lines changed

1 file changed

+14
-5
lines changed

drivers/net/ethernet/freescale/fec_main.c

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1572,19 +1572,22 @@ static void fec_enet_tx(struct net_device *ndev, int budget)
15721572
fec_enet_tx_queue(ndev, i, budget);
15731573
}
15741574

1575-
static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
1575+
static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
15761576
struct bufdesc *bdp, int index)
15771577
{
15781578
struct page *new_page;
15791579
dma_addr_t phys_addr;
15801580

15811581
new_page = page_pool_dev_alloc_pages(rxq->page_pool);
1582-
WARN_ON(!new_page);
1583-
rxq->rx_skb_info[index].page = new_page;
1582+
if (unlikely(!new_page))
1583+
return -ENOMEM;
15841584

1585+
rxq->rx_skb_info[index].page = new_page;
15851586
rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
15861587
phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
15871588
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
1589+
1590+
return 0;
15881591
}
15891592

15901593
static u32
@@ -1679,6 +1682,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
16791682
int cpu = smp_processor_id();
16801683
struct xdp_buff xdp;
16811684
struct page *page;
1685+
__fec32 cbd_bufaddr;
16821686
u32 sub_len = 4;
16831687

16841688
#if !defined(CONFIG_M5272)
@@ -1743,12 +1747,17 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
17431747

17441748
index = fec_enet_get_bd_index(bdp, &rxq->bd);
17451749
page = rxq->rx_skb_info[index].page;
1750+
cbd_bufaddr = bdp->cbd_bufaddr;
1751+
if (fec_enet_update_cbd(rxq, bdp, index)) {
1752+
ndev->stats.rx_dropped++;
1753+
goto rx_processing_done;
1754+
}
1755+
17461756
dma_sync_single_for_cpu(&fep->pdev->dev,
1747-
fec32_to_cpu(bdp->cbd_bufaddr),
1757+
fec32_to_cpu(cbd_bufaddr),
17481758
pkt_len,
17491759
DMA_FROM_DEVICE);
17501760
prefetch(page_address(page));
1751-
fec_enet_update_cbd(rxq, bdp, index);
17521761

17531762
if (xdp_prog) {
17541763
xdp_buff_clear_frags_flag(&xdp);

0 commit comments

Comments
 (0)