66
66
#include <linux/mfd/syscon.h>
67
67
#include <linux/regmap.h>
68
68
#include <soc/imx/cpuidle.h>
69
+ #include <linux/filter.h>
70
+ #include <linux/bpf.h>
69
71
70
72
#include <asm/cacheflush.h>
71
73
@@ -422,6 +424,48 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
422
424
return 0 ;
423
425
}
424
426
427
+ static int
428
+ fec_enet_create_page_pool (struct fec_enet_private * fep ,
429
+ struct fec_enet_priv_rx_q * rxq , int size )
430
+ {
431
+ struct page_pool_params pp_params = {
432
+ .order = 0 ,
433
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV ,
434
+ .pool_size = size ,
435
+ .nid = dev_to_node (& fep -> pdev -> dev ),
436
+ .dev = & fep -> pdev -> dev ,
437
+ .dma_dir = DMA_FROM_DEVICE ,
438
+ .offset = FEC_ENET_XDP_HEADROOM ,
439
+ .max_len = FEC_ENET_RX_FRSIZE ,
440
+ };
441
+ int err ;
442
+
443
+ rxq -> page_pool = page_pool_create (& pp_params );
444
+ if (IS_ERR (rxq -> page_pool )) {
445
+ err = PTR_ERR (rxq -> page_pool );
446
+ rxq -> page_pool = NULL ;
447
+ return err ;
448
+ }
449
+
450
+ err = xdp_rxq_info_reg (& rxq -> xdp_rxq , fep -> netdev , rxq -> id , 0 );
451
+ if (err < 0 )
452
+ goto err_free_pp ;
453
+
454
+ err = xdp_rxq_info_reg_mem_model (& rxq -> xdp_rxq , MEM_TYPE_PAGE_POOL ,
455
+ rxq -> page_pool );
456
+ if (err )
457
+ goto err_unregister_rxq ;
458
+
459
+ return 0 ;
460
+
461
+ err_unregister_rxq :
462
+ xdp_rxq_info_unreg (& rxq -> xdp_rxq );
463
+ err_free_pp :
464
+ page_pool_destroy (rxq -> page_pool );
465
+ rxq -> page_pool = NULL ;
466
+ return err ;
467
+ }
468
+
425
469
static struct bufdesc *
426
470
fec_enet_txq_submit_frag_skb (struct fec_enet_priv_tx_q * txq ,
427
471
struct sk_buff * skb ,
@@ -1450,7 +1494,7 @@ static void fec_enet_tx(struct net_device *ndev)
1450
1494
fec_enet_tx_queue (ndev , i );
1451
1495
}
1452
1496
1453
- static int
1497
+ static int __maybe_unused
1454
1498
fec_enet_new_rxbdp (struct net_device * ndev , struct bufdesc * bdp , struct sk_buff * skb )
1455
1499
{
1456
1500
struct fec_enet_private * fep = netdev_priv (ndev );
@@ -1470,8 +1514,9 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
1470
1514
return 0 ;
1471
1515
}
1472
1516
1473
- static bool fec_enet_copybreak (struct net_device * ndev , struct sk_buff * * skb ,
1474
- struct bufdesc * bdp , u32 length , bool swap )
1517
+ static bool __maybe_unused
1518
+ fec_enet_copybreak (struct net_device * ndev , struct sk_buff * * skb ,
1519
+ struct bufdesc * bdp , u32 length , bool swap )
1475
1520
{
1476
1521
struct fec_enet_private * fep = netdev_priv (ndev );
1477
1522
struct sk_buff * new_skb ;
@@ -1496,6 +1541,21 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1496
1541
return true;
1497
1542
}
1498
1543
1544
+ static void fec_enet_update_cbd (struct fec_enet_priv_rx_q * rxq ,
1545
+ struct bufdesc * bdp , int index )
1546
+ {
1547
+ struct page * new_page ;
1548
+ dma_addr_t phys_addr ;
1549
+
1550
+ new_page = page_pool_dev_alloc_pages (rxq -> page_pool );
1551
+ WARN_ON (!new_page );
1552
+ rxq -> rx_skb_info [index ].page = new_page ;
1553
+
1554
+ rxq -> rx_skb_info [index ].offset = FEC_ENET_XDP_HEADROOM ;
1555
+ phys_addr = page_pool_get_dma_addr (new_page ) + FEC_ENET_XDP_HEADROOM ;
1556
+ bdp -> cbd_bufaddr = cpu_to_fec32 (phys_addr );
1557
+ }
1558
+
1499
1559
/* During a receive, the bd_rx.cur points to the current incoming buffer.
1500
1560
* When we update through the ring, if the next incoming buffer has
1501
1561
* not been given to the system, we just set the empty indicator,
@@ -1508,7 +1568,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1508
1568
struct fec_enet_priv_rx_q * rxq ;
1509
1569
struct bufdesc * bdp ;
1510
1570
unsigned short status ;
1511
- struct sk_buff * skb_new = NULL ;
1512
1571
struct sk_buff * skb ;
1513
1572
ushort pkt_len ;
1514
1573
__u8 * data ;
@@ -1517,8 +1576,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1517
1576
bool vlan_packet_rcvd = false;
1518
1577
u16 vlan_tag ;
1519
1578
int index = 0 ;
1520
- bool is_copybreak ;
1521
1579
bool need_swap = fep -> quirks & FEC_QUIRK_SWAP_FRAME ;
1580
+ struct page * page ;
1522
1581
1523
1582
#ifdef CONFIG_M532x
1524
1583
flush_cache_all ();
@@ -1570,31 +1629,25 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1570
1629
ndev -> stats .rx_bytes += pkt_len ;
1571
1630
1572
1631
index = fec_enet_get_bd_index (bdp , & rxq -> bd );
1573
- skb = rxq -> rx_skbuff [index ];
1632
+ page = rxq -> rx_skb_info [index ].page ;
1633
+ dma_sync_single_for_cpu (& fep -> pdev -> dev ,
1634
+ fec32_to_cpu (bdp -> cbd_bufaddr ),
1635
+ pkt_len ,
1636
+ DMA_FROM_DEVICE );
1637
+ prefetch (page_address (page ));
1638
+ fec_enet_update_cbd (rxq , bdp , index );
1574
1639
1575
1640
/* The packet length includes FCS, but we don't want to
1576
1641
* include that when passing upstream as it messes up
1577
1642
* bridging applications.
1578
1643
*/
1579
- is_copybreak = fec_enet_copybreak (ndev , & skb , bdp , pkt_len - 4 ,
1580
- need_swap );
1581
- if (!is_copybreak ) {
1582
- skb_new = netdev_alloc_skb (ndev , FEC_ENET_RX_FRSIZE );
1583
- if (unlikely (!skb_new )) {
1584
- ndev -> stats .rx_dropped ++ ;
1585
- goto rx_processing_done ;
1586
- }
1587
- dma_unmap_single (& fep -> pdev -> dev ,
1588
- fec32_to_cpu (bdp -> cbd_bufaddr ),
1589
- FEC_ENET_RX_FRSIZE - fep -> rx_align ,
1590
- DMA_FROM_DEVICE );
1591
- }
1592
-
1593
- prefetch (skb -> data - NET_IP_ALIGN );
1644
+ skb = build_skb (page_address (page ), PAGE_SIZE );
1645
+ skb_reserve (skb , FEC_ENET_XDP_HEADROOM );
1594
1646
skb_put (skb , pkt_len - 4 );
1647
+ skb_mark_for_recycle (skb );
1595
1648
data = skb -> data ;
1596
1649
1597
- if (! is_copybreak && need_swap )
1650
+ if (need_swap )
1598
1651
swap_buffer (data , pkt_len );
1599
1652
1600
1653
#if !defined(CONFIG_M5272 )
@@ -1649,16 +1702,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1649
1702
skb_record_rx_queue (skb , queue_id );
1650
1703
napi_gro_receive (& fep -> napi , skb );
1651
1704
1652
- if (is_copybreak ) {
1653
- dma_sync_single_for_device (& fep -> pdev -> dev ,
1654
- fec32_to_cpu (bdp -> cbd_bufaddr ),
1655
- FEC_ENET_RX_FRSIZE - fep -> rx_align ,
1656
- DMA_FROM_DEVICE );
1657
- } else {
1658
- rxq -> rx_skbuff [index ] = skb_new ;
1659
- fec_enet_new_rxbdp (ndev , bdp , skb_new );
1660
- }
1661
-
1662
1705
rx_processing_done :
1663
1706
/* Clear the status flags for this buffer */
1664
1707
status &= ~BD_ENET_RX_STATS ;
@@ -3002,26 +3045,19 @@ static void fec_enet_free_buffers(struct net_device *ndev)
3002
3045
struct fec_enet_private * fep = netdev_priv (ndev );
3003
3046
unsigned int i ;
3004
3047
struct sk_buff * skb ;
3005
- struct bufdesc * bdp ;
3006
3048
struct fec_enet_priv_tx_q * txq ;
3007
3049
struct fec_enet_priv_rx_q * rxq ;
3008
3050
unsigned int q ;
3009
3051
3010
3052
for (q = 0 ; q < fep -> num_rx_queues ; q ++ ) {
3011
3053
rxq = fep -> rx_queue [q ];
3012
- bdp = rxq -> bd .base ;
3013
- for (i = 0 ; i < rxq -> bd .ring_size ; i ++ ) {
3014
- skb = rxq -> rx_skbuff [i ];
3015
- rxq -> rx_skbuff [i ] = NULL ;
3016
- if (skb ) {
3017
- dma_unmap_single (& fep -> pdev -> dev ,
3018
- fec32_to_cpu (bdp -> cbd_bufaddr ),
3019
- FEC_ENET_RX_FRSIZE - fep -> rx_align ,
3020
- DMA_FROM_DEVICE );
3021
- dev_kfree_skb (skb );
3022
- }
3023
- bdp = fec_enet_get_nextdesc (bdp , & rxq -> bd );
3024
- }
3054
+ for (i = 0 ; i < rxq -> bd .ring_size ; i ++ )
3055
+ page_pool_release_page (rxq -> page_pool , rxq -> rx_skb_info [i ].page );
3056
+
3057
+ if (xdp_rxq_info_is_reg (& rxq -> xdp_rxq ))
3058
+ xdp_rxq_info_unreg (& rxq -> xdp_rxq );
3059
+ page_pool_destroy (rxq -> page_pool );
3060
+ rxq -> page_pool = NULL ;
3025
3061
}
3026
3062
3027
3063
for (q = 0 ; q < fep -> num_tx_queues ; q ++ ) {
@@ -3111,24 +3147,31 @@ static int
3111
3147
fec_enet_alloc_rxq_buffers (struct net_device * ndev , unsigned int queue )
3112
3148
{
3113
3149
struct fec_enet_private * fep = netdev_priv (ndev );
3114
- unsigned int i ;
3115
- struct sk_buff * skb ;
3116
- struct bufdesc * bdp ;
3117
3150
struct fec_enet_priv_rx_q * rxq ;
3151
+ dma_addr_t phys_addr ;
3152
+ struct bufdesc * bdp ;
3153
+ struct page * page ;
3154
+ int i , err ;
3118
3155
3119
3156
rxq = fep -> rx_queue [queue ];
3120
3157
bdp = rxq -> bd .base ;
3158
+
3159
+ err = fec_enet_create_page_pool (fep , rxq , rxq -> bd .ring_size );
3160
+ if (err < 0 ) {
3161
+ netdev_err (ndev , "%s failed queue %d (%d)\n" , __func__ , queue , err );
3162
+ return err ;
3163
+ }
3164
+
3121
3165
for (i = 0 ; i < rxq -> bd .ring_size ; i ++ ) {
3122
- skb = __netdev_alloc_skb ( ndev , FEC_ENET_RX_FRSIZE , GFP_KERNEL );
3123
- if (!skb )
3166
+ page = page_pool_dev_alloc_pages ( rxq -> page_pool );
3167
+ if (!page )
3124
3168
goto err_alloc ;
3125
3169
3126
- if (fec_enet_new_rxbdp (ndev , bdp , skb )) {
3127
- dev_kfree_skb (skb );
3128
- goto err_alloc ;
3129
- }
3170
+ phys_addr = page_pool_get_dma_addr (page ) + FEC_ENET_XDP_HEADROOM ;
3171
+ bdp -> cbd_bufaddr = cpu_to_fec32 (phys_addr );
3130
3172
3131
- rxq -> rx_skbuff [i ] = skb ;
3173
+ rxq -> rx_skb_info [i ].page = page ;
3174
+ rxq -> rx_skb_info [i ].offset = FEC_ENET_XDP_HEADROOM ;
3132
3175
bdp -> cbd_sc = cpu_to_fec16 (BD_ENET_RX_EMPTY );
3133
3176
3134
3177
if (fep -> bufdesc_ex ) {
0 commit comments