Skip to content

Commit 001ba09

Browse files
Kevin Groeneveldkuba-moo
authored andcommitted
net: fec: handle page_pool_dev_alloc_pages error
The fec_enet_update_cbd function calls page_pool_dev_alloc_pages but did not handle the case when it returned NULL. There was a WARN_ON(!new_page) but it would still proceed to use the NULL pointer and then crash. This case does seem somewhat rare but when the system is under memory pressure it can happen. One case where I can duplicate this with some frequency is when writing over a smbd share to a SATA HDD attached to an imx6q. Setting /proc/sys/vm/min_free_kbytes to higher values also seems to solve the problem for my test case. But it still seems wrong that the fec driver ignores the memory allocation error and can crash. This commit handles the allocation error by dropping the current packet. Fixes: 95698ff ("net: fec: using page pool to manage RX buffers") Signed-off-by: Kevin Groeneveld <[email protected]> Reviewed-by: Jacob Keller <[email protected]> Reviewed-by: Wei Fang <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent f0d0277 commit 001ba09

File tree

1 file changed

+14
-5
lines changed

1 file changed

+14
-5
lines changed

drivers/net/ethernet/freescale/fec_main.c

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1591,19 +1591,22 @@ static void fec_enet_tx(struct net_device *ndev, int budget)
15911591
fec_enet_tx_queue(ndev, i, budget);
15921592
}
15931593

1594-
static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
1594+
static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
15951595
struct bufdesc *bdp, int index)
15961596
{
15971597
struct page *new_page;
15981598
dma_addr_t phys_addr;
15991599

16001600
new_page = page_pool_dev_alloc_pages(rxq->page_pool);
1601-
WARN_ON(!new_page);
1602-
rxq->rx_skb_info[index].page = new_page;
1601+
if (unlikely(!new_page))
1602+
return -ENOMEM;
16031603

1604+
rxq->rx_skb_info[index].page = new_page;
16041605
rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
16051606
phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
16061607
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
1608+
1609+
return 0;
16071610
}
16081611

16091612
static u32
@@ -1698,6 +1701,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
16981701
int cpu = smp_processor_id();
16991702
struct xdp_buff xdp;
17001703
struct page *page;
1704+
__fec32 cbd_bufaddr;
17011705
u32 sub_len = 4;
17021706

17031707
#if !defined(CONFIG_M5272)
@@ -1766,12 +1770,17 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
17661770

17671771
index = fec_enet_get_bd_index(bdp, &rxq->bd);
17681772
page = rxq->rx_skb_info[index].page;
1773+
cbd_bufaddr = bdp->cbd_bufaddr;
1774+
if (fec_enet_update_cbd(rxq, bdp, index)) {
1775+
ndev->stats.rx_dropped++;
1776+
goto rx_processing_done;
1777+
}
1778+
17691779
dma_sync_single_for_cpu(&fep->pdev->dev,
1770-
fec32_to_cpu(bdp->cbd_bufaddr),
1780+
fec32_to_cpu(cbd_bufaddr),
17711781
pkt_len,
17721782
DMA_FROM_DEVICE);
17731783
prefetch(page_address(page));
1774-
fec_enet_update_cbd(rxq, bdp, index);
17751784

17761785
if (xdp_prog) {
17771786
xdp_buff_clear_frags_flag(&xdp);

0 commit comments

Comments
 (0)