@@ -553,6 +553,14 @@ static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
553
553
va = netdev_alloc_frag (mpc -> rxbpre_alloc_size );
554
554
if (!va )
555
555
goto error ;
556
+
557
+ page = virt_to_head_page (va );
558
+ /* Check if the frag falls back to single page */
559
+ if (compound_order (page ) <
560
+ get_order (mpc -> rxbpre_alloc_size )) {
561
+ put_page (page );
562
+ goto error ;
563
+ }
556
564
} else {
557
565
page = dev_alloc_page ();
558
566
if (!page )
@@ -563,7 +571,6 @@ static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
563
571
564
572
da = dma_map_single (dev , va + mpc -> rxbpre_headroom ,
565
573
mpc -> rxbpre_datasize , DMA_FROM_DEVICE );
566
-
567
574
if (dma_mapping_error (dev , da )) {
568
575
put_page (virt_to_head_page (va ));
569
576
goto error ;
@@ -1505,6 +1512,13 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
1505
1512
1506
1513
if (!va )
1507
1514
return NULL ;
1515
+
1516
+ page = virt_to_head_page (va );
1517
+ /* Check if the frag falls back to single page */
1518
+ if (compound_order (page ) < get_order (rxq -> alloc_size )) {
1519
+ put_page (page );
1520
+ return NULL ;
1521
+ }
1508
1522
} else {
1509
1523
page = dev_alloc_page ();
1510
1524
if (!page )
@@ -1515,7 +1529,6 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
1515
1529
1516
1530
* da = dma_map_single (dev , va + rxq -> headroom , rxq -> datasize ,
1517
1531
DMA_FROM_DEVICE );
1518
-
1519
1532
if (dma_mapping_error (dev , * da )) {
1520
1533
put_page (virt_to_head_page (va ));
1521
1534
return NULL ;
@@ -1525,14 +1538,13 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
1525
1538
}
1526
1539
1527
1540
/* Allocate frag for rx buffer, and save the old buf */
1528
- static void mana_refill_rxoob (struct device * dev , struct mana_rxq * rxq ,
1529
- struct mana_recv_buf_oob * rxoob , void * * old_buf )
1541
+ static void mana_refill_rx_oob (struct device * dev , struct mana_rxq * rxq ,
1542
+ struct mana_recv_buf_oob * rxoob , void * * old_buf )
1530
1543
{
1531
1544
dma_addr_t da ;
1532
1545
void * va ;
1533
1546
1534
1547
va = mana_get_rxfrag (rxq , dev , & da , true);
1535
-
1536
1548
if (!va )
1537
1549
return ;
1538
1550
@@ -1597,7 +1609,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1597
1609
rxbuf_oob = & rxq -> rx_oobs [curr ];
1598
1610
WARN_ON_ONCE (rxbuf_oob -> wqe_inf .wqe_size_in_bu != 1 );
1599
1611
1600
- mana_refill_rxoob (dev , rxq , rxbuf_oob , & old_buf );
1612
+ mana_refill_rx_oob (dev , rxq , rxbuf_oob , & old_buf );
1601
1613
1602
1614
/* Unsuccessful refill will have old_buf == NULL.
1603
1615
* In this case, mana_rx_skb() will drop the packet.
0 commit comments