Skip to content

Commit 7cdaa4c

Browse files
wkzdavem330
authored andcommitted
net: ethernet: fec: prevent tx starvation under high rx load
In the ISR, we poll the event register for the queues in need of service and then enter polled mode. After this point, the event register will never be read again until we exit polled mode. In a scenario where a UDP flow is routed back out through the same interface, i.e. "router-on-a-stick" we'll typically only see an rx queue event initially. Once we start to process the incoming flow we'll be locked polled mode, but we'll never clean the tx rings since that event is never caught. Eventually the netdev watchdog will trip, causing all buffers to be dropped and then the process starts over again. Rework the NAPI poll to keep trying to consome the entire budget as long as new events are coming in, making sure to service all rx/tx queues, in priority order, on each pass. Fixes: 4d494cd ("net: fec: change data structure to support multiqueue") Signed-off-by: Tobias Waldekranz <[email protected]> Tested-by: Fugang Duan <[email protected]> Reviewed-by: Fugang Duan <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 28b18e4 commit 7cdaa4c

File tree

2 files changed

+31
-68
lines changed

2 files changed

+31
-68
lines changed

drivers/net/ethernet/freescale/fec.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -525,11 +525,6 @@ struct fec_enet_private {
525525
unsigned int total_tx_ring_size;
526526
unsigned int total_rx_ring_size;
527527

528-
unsigned long work_tx;
529-
unsigned long work_rx;
530-
unsigned long work_ts;
531-
unsigned long work_mdio;
532-
533528
struct platform_device *pdev;
534529

535530
int dev_id;

drivers/net/ethernet/freescale/fec_main.c

Lines changed: 31 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -75,8 +75,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
7575

7676
#define DRIVER_NAME "fec"
7777

78-
#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))
79-
8078
/* Pause frame feild and FIFO threshold */
8179
#define FEC_ENET_FCE (1 << 5)
8280
#define FEC_ENET_RSEM_V 0x84
@@ -1248,8 +1246,6 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
12481246

12491247
fep = netdev_priv(ndev);
12501248

1251-
queue_id = FEC_ENET_GET_QUQUE(queue_id);
1252-
12531249
txq = fep->tx_queue[queue_id];
12541250
/* get next bdp of dirty_tx */
12551251
nq = netdev_get_tx_queue(ndev, queue_id);
@@ -1340,17 +1336,14 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
13401336
writel(0, txq->bd.reg_desc_active);
13411337
}
13421338

1343-
static void
1344-
fec_enet_tx(struct net_device *ndev)
1339+
static void fec_enet_tx(struct net_device *ndev)
13451340
{
13461341
struct fec_enet_private *fep = netdev_priv(ndev);
1347-
u16 queue_id;
1348-
/* First process class A queue, then Class B and Best Effort queue */
1349-
for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) {
1350-
clear_bit(queue_id, &fep->work_tx);
1351-
fec_enet_tx_queue(ndev, queue_id);
1352-
}
1353-
return;
1342+
int i;
1343+
1344+
/* Make sure that AVB queues are processed first. */
1345+
for (i = fep->num_tx_queues - 1; i >= 0; i--)
1346+
fec_enet_tx_queue(ndev, i);
13541347
}
13551348

13561349
static int
@@ -1426,7 +1419,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
14261419
#ifdef CONFIG_M532x
14271420
flush_cache_all();
14281421
#endif
1429-
queue_id = FEC_ENET_GET_QUQUE(queue_id);
14301422
rxq = fep->rx_queue[queue_id];
14311423

14321424
/* First, grab all of the stats for the incoming packet.
@@ -1550,6 +1542,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
15501542
htons(ETH_P_8021Q),
15511543
vlan_tag);
15521544

1545+
skb_record_rx_queue(skb, queue_id);
15531546
napi_gro_receive(&fep->napi, skb);
15541547

15551548
if (is_copybreak) {
@@ -1595,67 +1588,40 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
15951588
return pkt_received;
15961589
}
15971590

1598-
static int
1599-
fec_enet_rx(struct net_device *ndev, int budget)
1591+
static int fec_enet_rx(struct net_device *ndev, int budget)
16001592
{
1601-
int pkt_received = 0;
1602-
u16 queue_id;
16031593
struct fec_enet_private *fep = netdev_priv(ndev);
1594+
int i, done = 0;
16041595

1605-
for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
1606-
int ret;
1607-
1608-
ret = fec_enet_rx_queue(ndev,
1609-
budget - pkt_received, queue_id);
1596+
/* Make sure that AVB queues are processed first. */
1597+
for (i = fep->num_rx_queues - 1; i >= 0; i--)
1598+
done += fec_enet_rx_queue(ndev, budget - done, i);
16101599

1611-
if (ret < budget - pkt_received)
1612-
clear_bit(queue_id, &fep->work_rx);
1613-
1614-
pkt_received += ret;
1615-
}
1616-
return pkt_received;
1600+
return done;
16171601
}
16181602

1619-
static bool
1620-
fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
1603+
static bool fec_enet_collect_events(struct fec_enet_private *fep)
16211604
{
1622-
if (int_events == 0)
1623-
return false;
1605+
uint int_events;
1606+
1607+
int_events = readl(fep->hwp + FEC_IEVENT);
16241608

1625-
if (int_events & FEC_ENET_RXF_0)
1626-
fep->work_rx |= (1 << 2);
1627-
if (int_events & FEC_ENET_RXF_1)
1628-
fep->work_rx |= (1 << 0);
1629-
if (int_events & FEC_ENET_RXF_2)
1630-
fep->work_rx |= (1 << 1);
1609+
/* Don't clear MDIO events, we poll for those */
1610+
int_events &= ~FEC_ENET_MII;
16311611

1632-
if (int_events & FEC_ENET_TXF_0)
1633-
fep->work_tx |= (1 << 2);
1634-
if (int_events & FEC_ENET_TXF_1)
1635-
fep->work_tx |= (1 << 0);
1636-
if (int_events & FEC_ENET_TXF_2)
1637-
fep->work_tx |= (1 << 1);
1612+
writel(int_events, fep->hwp + FEC_IEVENT);
16381613

1639-
return true;
1614+
return int_events != 0;
16401615
}
16411616

16421617
static irqreturn_t
16431618
fec_enet_interrupt(int irq, void *dev_id)
16441619
{
16451620
struct net_device *ndev = dev_id;
16461621
struct fec_enet_private *fep = netdev_priv(ndev);
1647-
uint int_events;
16481622
irqreturn_t ret = IRQ_NONE;
16491623

1650-
int_events = readl(fep->hwp + FEC_IEVENT);
1651-
1652-
/* Don't clear MDIO events, we poll for those */
1653-
int_events &= ~FEC_ENET_MII;
1654-
1655-
writel(int_events, fep->hwp + FEC_IEVENT);
1656-
fec_enet_collect_events(fep, int_events);
1657-
1658-
if ((fep->work_tx || fep->work_rx) && fep->link) {
1624+
if (fec_enet_collect_events(fep) && fep->link) {
16591625
ret = IRQ_HANDLED;
16601626

16611627
if (napi_schedule_prep(&fep->napi)) {
@@ -1672,17 +1638,19 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
16721638
{
16731639
struct net_device *ndev = napi->dev;
16741640
struct fec_enet_private *fep = netdev_priv(ndev);
1675-
int pkts;
1641+
int done = 0;
16761642

1677-
pkts = fec_enet_rx(ndev, budget);
1678-
1679-
fec_enet_tx(ndev);
1643+
do {
1644+
done += fec_enet_rx(ndev, budget - done);
1645+
fec_enet_tx(ndev);
1646+
} while ((done < budget) && fec_enet_collect_events(fep));
16801647

1681-
if (pkts < budget) {
1682-
napi_complete_done(napi, pkts);
1648+
if (done < budget) {
1649+
napi_complete_done(napi, done);
16831650
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
16841651
}
1685-
return pkts;
1652+
1653+
return done;
16861654
}
16871655

16881656
/* ------------------------------------------------------------------------- */

0 commit comments

Comments
 (0)