@@ -397,7 +397,7 @@ static void fec_dump(struct net_device *ndev)
397
397
fec16_to_cpu (bdp -> cbd_sc ),
398
398
fec32_to_cpu (bdp -> cbd_bufaddr ),
399
399
fec16_to_cpu (bdp -> cbd_datlen ),
400
- txq -> tx_skbuff [index ]);
400
+ txq -> tx_buf [index ]. skb );
401
401
bdp = fec_enet_get_nextdesc (bdp , & txq -> bd );
402
402
index ++ ;
403
403
} while (bdp != txq -> bd .base );
@@ -654,7 +654,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
654
654
655
655
index = fec_enet_get_bd_index (last_bdp , & txq -> bd );
656
656
/* Save skb pointer */
657
- txq -> tx_skbuff [index ] = skb ;
657
+ txq -> tx_buf [index ]. skb = skb ;
658
658
659
659
/* Make sure the updates to rest of the descriptor are performed before
660
660
* transferring ownership.
@@ -672,9 +672,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
672
672
673
673
skb_tx_timestamp (skb );
674
674
675
- /* Make sure the update to bdp and tx_skbuff are performed before
676
- * txq->bd.cur.
677
- */
675
+ /* Make sure the update to bdp is performed before txq->bd.cur. */
678
676
wmb ();
679
677
txq -> bd .cur = bdp ;
680
678
@@ -862,7 +860,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
862
860
}
863
861
864
862
/* Save skb pointer */
865
- txq -> tx_skbuff [index ] = skb ;
863
+ txq -> tx_buf [index ]. skb = skb ;
866
864
867
865
skb_tx_timestamp (skb );
868
866
txq -> bd .cur = bdp ;
@@ -952,16 +950,33 @@ static void fec_enet_bd_init(struct net_device *dev)
952
950
for (i = 0 ; i < txq -> bd .ring_size ; i ++ ) {
953
951
/* Initialize the BD for every fragment in the page. */
954
952
bdp -> cbd_sc = cpu_to_fec16 (0 );
955
- if (bdp -> cbd_bufaddr &&
956
- !IS_TSO_HEADER (txq , fec32_to_cpu (bdp -> cbd_bufaddr )))
957
- dma_unmap_single (& fep -> pdev -> dev ,
958
- fec32_to_cpu (bdp -> cbd_bufaddr ),
959
- fec16_to_cpu (bdp -> cbd_datlen ),
960
- DMA_TO_DEVICE );
961
- if (txq -> tx_skbuff [i ]) {
962
- dev_kfree_skb_any (txq -> tx_skbuff [i ]);
963
- txq -> tx_skbuff [i ] = NULL ;
953
+ if (txq -> tx_buf [i ].type == FEC_TXBUF_T_SKB ) {
954
+ if (bdp -> cbd_bufaddr &&
955
+ !IS_TSO_HEADER (txq , fec32_to_cpu (bdp -> cbd_bufaddr )))
956
+ dma_unmap_single (& fep -> pdev -> dev ,
957
+ fec32_to_cpu (bdp -> cbd_bufaddr ),
958
+ fec16_to_cpu (bdp -> cbd_datlen ),
959
+ DMA_TO_DEVICE );
960
+ if (txq -> tx_buf [i ].skb ) {
961
+ dev_kfree_skb_any (txq -> tx_buf [i ].skb );
962
+ txq -> tx_buf [i ].skb = NULL ;
963
+ }
964
+ } else {
965
+ if (bdp -> cbd_bufaddr )
966
+ dma_unmap_single (& fep -> pdev -> dev ,
967
+ fec32_to_cpu (bdp -> cbd_bufaddr ),
968
+ fec16_to_cpu (bdp -> cbd_datlen ),
969
+ DMA_TO_DEVICE );
970
+
971
+ if (txq -> tx_buf [i ].xdp ) {
972
+ xdp_return_frame (txq -> tx_buf [i ].xdp );
973
+ txq -> tx_buf [i ].xdp = NULL ;
974
+ }
975
+
976
+ /* restore default tx buffer type: FEC_TXBUF_T_SKB */
977
+ txq -> tx_buf [i ].type = FEC_TXBUF_T_SKB ;
964
978
}
979
+
965
980
bdp -> cbd_bufaddr = cpu_to_fec32 (0 );
966
981
bdp = fec_enet_get_nextdesc (bdp , & txq -> bd );
967
982
}
@@ -1360,6 +1375,7 @@ static void
1360
1375
fec_enet_tx_queue (struct net_device * ndev , u16 queue_id )
1361
1376
{
1362
1377
struct fec_enet_private * fep ;
1378
+ struct xdp_frame * xdpf ;
1363
1379
struct bufdesc * bdp ;
1364
1380
unsigned short status ;
1365
1381
struct sk_buff * skb ;
@@ -1387,16 +1403,31 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1387
1403
1388
1404
index = fec_enet_get_bd_index (bdp , & txq -> bd );
1389
1405
1390
- skb = txq -> tx_skbuff [index ];
1391
- txq -> tx_skbuff [index ] = NULL ;
1392
- if (!IS_TSO_HEADER (txq , fec32_to_cpu (bdp -> cbd_bufaddr )))
1393
- dma_unmap_single (& fep -> pdev -> dev ,
1394
- fec32_to_cpu (bdp -> cbd_bufaddr ),
1395
- fec16_to_cpu (bdp -> cbd_datlen ),
1396
- DMA_TO_DEVICE );
1397
- bdp -> cbd_bufaddr = cpu_to_fec32 (0 );
1398
- if (!skb )
1399
- goto skb_done ;
1406
+ if (txq -> tx_buf [index ].type == FEC_TXBUF_T_SKB ) {
1407
+ skb = txq -> tx_buf [index ].skb ;
1408
+ txq -> tx_buf [index ].skb = NULL ;
1409
+ if (bdp -> cbd_bufaddr &&
1410
+ !IS_TSO_HEADER (txq , fec32_to_cpu (bdp -> cbd_bufaddr )))
1411
+ dma_unmap_single (& fep -> pdev -> dev ,
1412
+ fec32_to_cpu (bdp -> cbd_bufaddr ),
1413
+ fec16_to_cpu (bdp -> cbd_datlen ),
1414
+ DMA_TO_DEVICE );
1415
+ bdp -> cbd_bufaddr = cpu_to_fec32 (0 );
1416
+ if (!skb )
1417
+ goto tx_buf_done ;
1418
+ } else {
1419
+ xdpf = txq -> tx_buf [index ].xdp ;
1420
+ if (bdp -> cbd_bufaddr )
1421
+ dma_unmap_single (& fep -> pdev -> dev ,
1422
+ fec32_to_cpu (bdp -> cbd_bufaddr ),
1423
+ fec16_to_cpu (bdp -> cbd_datlen ),
1424
+ DMA_TO_DEVICE );
1425
+ bdp -> cbd_bufaddr = cpu_to_fec32 (0 );
1426
+ if (!xdpf ) {
1427
+ txq -> tx_buf [index ].type = FEC_TXBUF_T_SKB ;
1428
+ goto tx_buf_done ;
1429
+ }
1430
+ }
1400
1431
1401
1432
/* Check for errors. */
1402
1433
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1415,21 +1446,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1415
1446
ndev -> stats .tx_carrier_errors ++ ;
1416
1447
} else {
1417
1448
ndev -> stats .tx_packets ++ ;
1418
- ndev -> stats .tx_bytes += skb -> len ;
1419
- }
1420
-
1421
- /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
1422
- * are to time stamp the packet, so we still need to check time
1423
- * stamping enabled flag.
1424
- */
1425
- if (unlikely (skb_shinfo (skb )-> tx_flags & SKBTX_IN_PROGRESS &&
1426
- fep -> hwts_tx_en ) &&
1427
- fep -> bufdesc_ex ) {
1428
- struct skb_shared_hwtstamps shhwtstamps ;
1429
- struct bufdesc_ex * ebdp = (struct bufdesc_ex * )bdp ;
1430
1449
1431
- fec_enet_hwtstamp (fep , fec32_to_cpu (ebdp -> ts ), & shhwtstamps );
1432
- skb_tstamp_tx (skb , & shhwtstamps );
1450
+ if (txq -> tx_buf [index ].type == FEC_TXBUF_T_SKB )
1451
+ ndev -> stats .tx_bytes += skb -> len ;
1452
+ else
1453
+ ndev -> stats .tx_bytes += xdpf -> len ;
1433
1454
}
1434
1455
1435
1456
/* Deferred means some collisions occurred during transmit,
@@ -1438,10 +1459,32 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1438
1459
if (status & BD_ENET_TX_DEF )
1439
1460
ndev -> stats .collisions ++ ;
1440
1461
1441
- /* Free the sk buffer associated with this last transmit */
1442
- dev_kfree_skb_any (skb );
1443
- skb_done :
1444
- /* Make sure the update to bdp and tx_skbuff are performed
1462
+ if (txq -> tx_buf [index ].type == FEC_TXBUF_T_SKB ) {
1463
+ /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
1464
+ * are to time stamp the packet, so we still need to check time
1465
+ * stamping enabled flag.
1466
+ */
1467
+ if (unlikely (skb_shinfo (skb )-> tx_flags & SKBTX_IN_PROGRESS &&
1468
+ fep -> hwts_tx_en ) && fep -> bufdesc_ex ) {
1469
+ struct skb_shared_hwtstamps shhwtstamps ;
1470
+ struct bufdesc_ex * ebdp = (struct bufdesc_ex * )bdp ;
1471
+
1472
+ fec_enet_hwtstamp (fep , fec32_to_cpu (ebdp -> ts ), & shhwtstamps );
1473
+ skb_tstamp_tx (skb , & shhwtstamps );
1474
+ }
1475
+
1476
+ /* Free the sk buffer associated with this last transmit */
1477
+ dev_kfree_skb_any (skb );
1478
+ } else {
1479
+ xdp_return_frame (xdpf );
1480
+
1481
+ txq -> tx_buf [index ].xdp = NULL ;
1482
+ /* restore default tx buffer type: FEC_TXBUF_T_SKB */
1483
+ txq -> tx_buf [index ].type = FEC_TXBUF_T_SKB ;
1484
+ }
1485
+
1486
+ tx_buf_done :
1487
+ /* Make sure the update to bdp and tx_buf are performed
1445
1488
* before dirty_tx
1446
1489
*/
1447
1490
wmb ();
@@ -3249,9 +3292,19 @@ static void fec_enet_free_buffers(struct net_device *ndev)
3249
3292
for (i = 0 ; i < txq -> bd .ring_size ; i ++ ) {
3250
3293
kfree (txq -> tx_bounce [i ]);
3251
3294
txq -> tx_bounce [i ] = NULL ;
3252
- skb = txq -> tx_skbuff [i ];
3253
- txq -> tx_skbuff [i ] = NULL ;
3254
- dev_kfree_skb (skb );
3295
+
3296
+ if (txq -> tx_buf [i ].type == FEC_TXBUF_T_SKB ) {
3297
+ skb = txq -> tx_buf [i ].skb ;
3298
+ txq -> tx_buf [i ].skb = NULL ;
3299
+ dev_kfree_skb (skb );
3300
+ } else {
3301
+ if (txq -> tx_buf [i ].xdp ) {
3302
+ xdp_return_frame (txq -> tx_buf [i ].xdp );
3303
+ txq -> tx_buf [i ].xdp = NULL ;
3304
+ }
3305
+
3306
+ txq -> tx_buf [i ].type = FEC_TXBUF_T_SKB ;
3307
+ }
3255
3308
}
3256
3309
}
3257
3310
}
@@ -3296,8 +3349,7 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
3296
3349
fep -> total_tx_ring_size += fep -> tx_queue [i ]-> bd .ring_size ;
3297
3350
3298
3351
txq -> tx_stop_threshold = FEC_MAX_SKB_DESCS ;
3299
- txq -> tx_wake_threshold =
3300
- (txq -> bd .ring_size - txq -> tx_stop_threshold ) / 2 ;
3352
+ txq -> tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS ;
3301
3353
3302
3354
txq -> tso_hdrs = dma_alloc_coherent (& fep -> pdev -> dev ,
3303
3355
txq -> bd .ring_size * TSO_HEADER_SIZE ,
@@ -3732,21 +3784,27 @@ static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
3732
3784
if (fep -> quirks & FEC_QUIRK_SWAP_FRAME )
3733
3785
return - EOPNOTSUPP ;
3734
3786
3787
+ if (!bpf -> prog )
3788
+ xdp_features_clear_redirect_target (dev );
3789
+
3735
3790
if (is_run ) {
3736
3791
napi_disable (& fep -> napi );
3737
3792
netif_tx_disable (dev );
3738
3793
}
3739
3794
3740
3795
old_prog = xchg (& fep -> xdp_prog , bpf -> prog );
3796
+ if (old_prog )
3797
+ bpf_prog_put (old_prog );
3798
+
3741
3799
fec_restart (dev );
3742
3800
3743
3801
if (is_run ) {
3744
3802
napi_enable (& fep -> napi );
3745
3803
netif_tx_start_all_queues (dev );
3746
3804
}
3747
3805
3748
- if (old_prog )
3749
- bpf_prog_put ( old_prog );
3806
+ if (bpf -> prog )
3807
+ xdp_features_set_redirect_target ( dev , false );
3750
3808
3751
3809
return 0 ;
3752
3810
@@ -3778,7 +3836,7 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
3778
3836
3779
3837
entries_free = fec_enet_get_free_txdesc_num (txq );
3780
3838
if (entries_free < MAX_SKB_FRAGS + 1 ) {
3781
- netdev_err (fep -> netdev , "NOT enough BD for SG!\n" );
3839
+ netdev_err_once (fep -> netdev , "NOT enough BD for SG!\n" );
3782
3840
return - EBUSY ;
3783
3841
}
3784
3842
@@ -3811,7 +3869,8 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
3811
3869
ebdp -> cbd_esc = cpu_to_fec32 (estatus );
3812
3870
}
3813
3871
3814
- txq -> tx_skbuff [index ] = NULL ;
3872
+ txq -> tx_buf [index ].type = FEC_TXBUF_T_XDP_NDO ;
3873
+ txq -> tx_buf [index ].xdp = frame ;
3815
3874
3816
3875
/* Make sure the updates to rest of the descriptor are performed before
3817
3876
* transferring ownership.
@@ -4016,8 +4075,7 @@ static int fec_enet_init(struct net_device *ndev)
4016
4075
4017
4076
if (!(fep -> quirks & FEC_QUIRK_SWAP_FRAME ))
4018
4077
ndev -> xdp_features = NETDEV_XDP_ACT_BASIC |
4019
- NETDEV_XDP_ACT_REDIRECT |
4020
- NETDEV_XDP_ACT_NDO_XMIT ;
4078
+ NETDEV_XDP_ACT_REDIRECT ;
4021
4079
4022
4080
fec_restart (ndev );
4023
4081
0 commit comments