Skip to content

Commit c0dbbdf

Browse files
author
Paolo Abeni
committed
Merge branch 'net-fec-fix-some-issues-of-ndo_xdp_xmit'
Wei Fang says: ==================== net: fec: fix some issues of ndo_xdp_xmit() We encountered some issues when testing the ndo_xdp_xmit() interface of the fec driver on i.MX8MP and i.MX93 platforms. These issues are easy to reproduce, and the specific reproduction steps are as follows. step1: The ethernet port of a board (board A) is connected to the EQOS port of i.MX8MP/i.MX93, and the FEC port of i.MX8MP/i.MX93 is connected to another ethernet port, such as a switch port. step2: Board A uses the pktgen_sample03_burst_single_flow.sh to generate and send packets to i.MX8MP/i.MX93. The command is shown below. ./pktgen_sample03_burst_single_flow.sh -i eth0 -d 192.168.6.8 -m \ 56:bf:0d:68:b0:9e -s 1500 step3: i.MX8MP/i.MX93 use the xdp_redirect bfp program to redirect the XDP frames from EQOS port to FEC port. The command is shown below. ./xdp_redirect eth1 eth0 After a few moments, the warning or error logs will be printed in the console, for more details, please refer to the commit message of each patch. ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Paolo Abeni <[email protected]>
2 parents 9d0aba9 + 84a1094 commit c0dbbdf

File tree

2 files changed

+127
-56
lines changed

2 files changed

+127
-56
lines changed

drivers/net/ethernet/freescale/fec.h

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -355,7 +355,7 @@ struct bufdesc_ex {
355355
#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
356356
#define FEC_ENET_TX_FRSIZE 2048
357357
#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
358-
#define TX_RING_SIZE 512 /* Must be power of two */
358+
#define TX_RING_SIZE 1024 /* Must be power of two */
359359
#define TX_RING_MOD_MASK 511 /* for this to work */
360360

361361
#define BD_ENET_RX_INT 0x00800000
@@ -544,10 +544,23 @@ enum {
544544
XDP_STATS_TOTAL,
545545
};
546546

547+
enum fec_txbuf_type {
548+
FEC_TXBUF_T_SKB,
549+
FEC_TXBUF_T_XDP_NDO,
550+
};
551+
552+
struct fec_tx_buffer {
553+
union {
554+
struct sk_buff *skb;
555+
struct xdp_frame *xdp;
556+
};
557+
enum fec_txbuf_type type;
558+
};
559+
547560
struct fec_enet_priv_tx_q {
548561
struct bufdesc_prop bd;
549562
unsigned char *tx_bounce[TX_RING_SIZE];
550-
struct sk_buff *tx_skbuff[TX_RING_SIZE];
563+
struct fec_tx_buffer tx_buf[TX_RING_SIZE];
551564

552565
unsigned short tx_stop_threshold;
553566
unsigned short tx_wake_threshold;

drivers/net/ethernet/freescale/fec_main.c

Lines changed: 112 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -397,7 +397,7 @@ static void fec_dump(struct net_device *ndev)
397397
fec16_to_cpu(bdp->cbd_sc),
398398
fec32_to_cpu(bdp->cbd_bufaddr),
399399
fec16_to_cpu(bdp->cbd_datlen),
400-
txq->tx_skbuff[index]);
400+
txq->tx_buf[index].skb);
401401
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
402402
index++;
403403
} while (bdp != txq->bd.base);
@@ -654,7 +654,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
654654

655655
index = fec_enet_get_bd_index(last_bdp, &txq->bd);
656656
/* Save skb pointer */
657-
txq->tx_skbuff[index] = skb;
657+
txq->tx_buf[index].skb = skb;
658658

659659
/* Make sure the updates to rest of the descriptor are performed before
660660
* transferring ownership.
@@ -672,9 +672,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
672672

673673
skb_tx_timestamp(skb);
674674

675-
/* Make sure the update to bdp and tx_skbuff are performed before
676-
* txq->bd.cur.
677-
*/
675+
/* Make sure the update to bdp is performed before txq->bd.cur. */
678676
wmb();
679677
txq->bd.cur = bdp;
680678

@@ -862,7 +860,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
862860
}
863861

864862
/* Save skb pointer */
865-
txq->tx_skbuff[index] = skb;
863+
txq->tx_buf[index].skb = skb;
866864

867865
skb_tx_timestamp(skb);
868866
txq->bd.cur = bdp;
@@ -952,16 +950,33 @@ static void fec_enet_bd_init(struct net_device *dev)
952950
for (i = 0; i < txq->bd.ring_size; i++) {
953951
/* Initialize the BD for every fragment in the page. */
954952
bdp->cbd_sc = cpu_to_fec16(0);
955-
if (bdp->cbd_bufaddr &&
956-
!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
957-
dma_unmap_single(&fep->pdev->dev,
958-
fec32_to_cpu(bdp->cbd_bufaddr),
959-
fec16_to_cpu(bdp->cbd_datlen),
960-
DMA_TO_DEVICE);
961-
if (txq->tx_skbuff[i]) {
962-
dev_kfree_skb_any(txq->tx_skbuff[i]);
963-
txq->tx_skbuff[i] = NULL;
953+
if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
954+
if (bdp->cbd_bufaddr &&
955+
!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
956+
dma_unmap_single(&fep->pdev->dev,
957+
fec32_to_cpu(bdp->cbd_bufaddr),
958+
fec16_to_cpu(bdp->cbd_datlen),
959+
DMA_TO_DEVICE);
960+
if (txq->tx_buf[i].skb) {
961+
dev_kfree_skb_any(txq->tx_buf[i].skb);
962+
txq->tx_buf[i].skb = NULL;
963+
}
964+
} else {
965+
if (bdp->cbd_bufaddr)
966+
dma_unmap_single(&fep->pdev->dev,
967+
fec32_to_cpu(bdp->cbd_bufaddr),
968+
fec16_to_cpu(bdp->cbd_datlen),
969+
DMA_TO_DEVICE);
970+
971+
if (txq->tx_buf[i].xdp) {
972+
xdp_return_frame(txq->tx_buf[i].xdp);
973+
txq->tx_buf[i].xdp = NULL;
974+
}
975+
976+
/* restore default tx buffer type: FEC_TXBUF_T_SKB */
977+
txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
964978
}
979+
965980
bdp->cbd_bufaddr = cpu_to_fec32(0);
966981
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
967982
}
@@ -1360,6 +1375,7 @@ static void
13601375
fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
13611376
{
13621377
struct fec_enet_private *fep;
1378+
struct xdp_frame *xdpf;
13631379
struct bufdesc *bdp;
13641380
unsigned short status;
13651381
struct sk_buff *skb;
@@ -1387,16 +1403,31 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
13871403

13881404
index = fec_enet_get_bd_index(bdp, &txq->bd);
13891405

1390-
skb = txq->tx_skbuff[index];
1391-
txq->tx_skbuff[index] = NULL;
1392-
if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1393-
dma_unmap_single(&fep->pdev->dev,
1394-
fec32_to_cpu(bdp->cbd_bufaddr),
1395-
fec16_to_cpu(bdp->cbd_datlen),
1396-
DMA_TO_DEVICE);
1397-
bdp->cbd_bufaddr = cpu_to_fec32(0);
1398-
if (!skb)
1399-
goto skb_done;
1406+
if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1407+
skb = txq->tx_buf[index].skb;
1408+
txq->tx_buf[index].skb = NULL;
1409+
if (bdp->cbd_bufaddr &&
1410+
!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1411+
dma_unmap_single(&fep->pdev->dev,
1412+
fec32_to_cpu(bdp->cbd_bufaddr),
1413+
fec16_to_cpu(bdp->cbd_datlen),
1414+
DMA_TO_DEVICE);
1415+
bdp->cbd_bufaddr = cpu_to_fec32(0);
1416+
if (!skb)
1417+
goto tx_buf_done;
1418+
} else {
1419+
xdpf = txq->tx_buf[index].xdp;
1420+
if (bdp->cbd_bufaddr)
1421+
dma_unmap_single(&fep->pdev->dev,
1422+
fec32_to_cpu(bdp->cbd_bufaddr),
1423+
fec16_to_cpu(bdp->cbd_datlen),
1424+
DMA_TO_DEVICE);
1425+
bdp->cbd_bufaddr = cpu_to_fec32(0);
1426+
if (!xdpf) {
1427+
txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1428+
goto tx_buf_done;
1429+
}
1430+
}
14001431

14011432
/* Check for errors. */
14021433
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1415,21 +1446,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
14151446
ndev->stats.tx_carrier_errors++;
14161447
} else {
14171448
ndev->stats.tx_packets++;
1418-
ndev->stats.tx_bytes += skb->len;
1419-
}
1420-
1421-
/* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
1422-
* are to time stamp the packet, so we still need to check time
1423-
* stamping enabled flag.
1424-
*/
1425-
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
1426-
fep->hwts_tx_en) &&
1427-
fep->bufdesc_ex) {
1428-
struct skb_shared_hwtstamps shhwtstamps;
1429-
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
14301449

1431-
fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1432-
skb_tstamp_tx(skb, &shhwtstamps);
1450+
if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
1451+
ndev->stats.tx_bytes += skb->len;
1452+
else
1453+
ndev->stats.tx_bytes += xdpf->len;
14331454
}
14341455

14351456
/* Deferred means some collisions occurred during transmit,
@@ -1438,10 +1459,32 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
14381459
if (status & BD_ENET_TX_DEF)
14391460
ndev->stats.collisions++;
14401461

1441-
/* Free the sk buffer associated with this last transmit */
1442-
dev_kfree_skb_any(skb);
1443-
skb_done:
1444-
/* Make sure the update to bdp and tx_skbuff are performed
1462+
if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1463+
/* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
1464+
* are to time stamp the packet, so we still need to check time
1465+
* stamping enabled flag.
1466+
*/
1467+
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
1468+
fep->hwts_tx_en) && fep->bufdesc_ex) {
1469+
struct skb_shared_hwtstamps shhwtstamps;
1470+
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1471+
1472+
fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1473+
skb_tstamp_tx(skb, &shhwtstamps);
1474+
}
1475+
1476+
/* Free the sk buffer associated with this last transmit */
1477+
dev_kfree_skb_any(skb);
1478+
} else {
1479+
xdp_return_frame(xdpf);
1480+
1481+
txq->tx_buf[index].xdp = NULL;
1482+
/* restore default tx buffer type: FEC_TXBUF_T_SKB */
1483+
txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1484+
}
1485+
1486+
tx_buf_done:
1487+
/* Make sure the update to bdp and tx_buf are performed
14451488
* before dirty_tx
14461489
*/
14471490
wmb();
@@ -3249,9 +3292,19 @@ static void fec_enet_free_buffers(struct net_device *ndev)
32493292
for (i = 0; i < txq->bd.ring_size; i++) {
32503293
kfree(txq->tx_bounce[i]);
32513294
txq->tx_bounce[i] = NULL;
3252-
skb = txq->tx_skbuff[i];
3253-
txq->tx_skbuff[i] = NULL;
3254-
dev_kfree_skb(skb);
3295+
3296+
if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
3297+
skb = txq->tx_buf[i].skb;
3298+
txq->tx_buf[i].skb = NULL;
3299+
dev_kfree_skb(skb);
3300+
} else {
3301+
if (txq->tx_buf[i].xdp) {
3302+
xdp_return_frame(txq->tx_buf[i].xdp);
3303+
txq->tx_buf[i].xdp = NULL;
3304+
}
3305+
3306+
txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3307+
}
32553308
}
32563309
}
32573310
}
@@ -3296,8 +3349,7 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
32963349
fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
32973350

32983351
txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
3299-
txq->tx_wake_threshold =
3300-
(txq->bd.ring_size - txq->tx_stop_threshold) / 2;
3352+
txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS;
33013353

33023354
txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
33033355
txq->bd.ring_size * TSO_HEADER_SIZE,
@@ -3732,21 +3784,27 @@ static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
37323784
if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
37333785
return -EOPNOTSUPP;
37343786

3787+
if (!bpf->prog)
3788+
xdp_features_clear_redirect_target(dev);
3789+
37353790
if (is_run) {
37363791
napi_disable(&fep->napi);
37373792
netif_tx_disable(dev);
37383793
}
37393794

37403795
old_prog = xchg(&fep->xdp_prog, bpf->prog);
3796+
if (old_prog)
3797+
bpf_prog_put(old_prog);
3798+
37413799
fec_restart(dev);
37423800

37433801
if (is_run) {
37443802
napi_enable(&fep->napi);
37453803
netif_tx_start_all_queues(dev);
37463804
}
37473805

3748-
if (old_prog)
3749-
bpf_prog_put(old_prog);
3806+
if (bpf->prog)
3807+
xdp_features_set_redirect_target(dev, false);
37503808

37513809
return 0;
37523810

@@ -3778,7 +3836,7 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
37783836

37793837
entries_free = fec_enet_get_free_txdesc_num(txq);
37803838
if (entries_free < MAX_SKB_FRAGS + 1) {
3781-
netdev_err(fep->netdev, "NOT enough BD for SG!\n");
3839+
netdev_err_once(fep->netdev, "NOT enough BD for SG!\n");
37823840
return -EBUSY;
37833841
}
37843842

@@ -3811,7 +3869,8 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
38113869
ebdp->cbd_esc = cpu_to_fec32(estatus);
38123870
}
38133871

3814-
txq->tx_skbuff[index] = NULL;
3872+
txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
3873+
txq->tx_buf[index].xdp = frame;
38153874

38163875
/* Make sure the updates to rest of the descriptor are performed before
38173876
* transferring ownership.
@@ -4016,8 +4075,7 @@ static int fec_enet_init(struct net_device *ndev)
40164075

40174076
if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME))
40184077
ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
4019-
NETDEV_XDP_ACT_REDIRECT |
4020-
NETDEV_XDP_ACT_NDO_XMIT;
4078+
NETDEV_XDP_ACT_REDIRECT;
40214079

40224080
fec_restart(ndev);
40234081

0 commit comments

Comments
 (0)