@@ -45,6 +45,11 @@ void prueth_cleanup_rx_chns(struct prueth_emac *emac,
45
45
struct prueth_rx_chn * rx_chn ,
46
46
int max_rflows )
47
47
{
48
+ if (rx_chn -> pg_pool ) {
49
+ page_pool_destroy (rx_chn -> pg_pool );
50
+ rx_chn -> pg_pool = NULL ;
51
+ }
52
+
48
53
if (rx_chn -> desc_pool )
49
54
k3_cppi_desc_pool_destroy (rx_chn -> desc_pool );
50
55
@@ -461,43 +466,36 @@ int prueth_init_rx_chns(struct prueth_emac *emac,
461
466
}
462
467
EXPORT_SYMBOL_GPL (prueth_init_rx_chns );
463
468
464
- int prueth_dma_rx_push (struct prueth_emac * emac ,
465
- struct sk_buff * skb ,
466
- struct prueth_rx_chn * rx_chn )
469
+ int prueth_dma_rx_push_mapped (struct prueth_emac * emac ,
470
+ struct prueth_rx_chn * rx_chn ,
471
+ struct page * page , u32 buf_len )
467
472
{
468
473
struct net_device * ndev = emac -> ndev ;
469
474
struct cppi5_host_desc_t * desc_rx ;
470
- u32 pkt_len = skb_tailroom (skb );
471
475
dma_addr_t desc_dma ;
472
476
dma_addr_t buf_dma ;
473
477
void * * swdata ;
474
478
479
+ buf_dma = page_pool_get_dma_addr (page ) + PRUETH_HEADROOM ;
475
480
desc_rx = k3_cppi_desc_pool_alloc (rx_chn -> desc_pool );
476
481
if (!desc_rx ) {
477
482
netdev_err (ndev , "rx push: failed to allocate descriptor\n" );
478
483
return - ENOMEM ;
479
484
}
480
485
desc_dma = k3_cppi_desc_pool_virt2dma (rx_chn -> desc_pool , desc_rx );
481
486
482
- buf_dma = dma_map_single (rx_chn -> dma_dev , skb -> data , pkt_len , DMA_FROM_DEVICE );
483
- if (unlikely (dma_mapping_error (rx_chn -> dma_dev , buf_dma ))) {
484
- k3_cppi_desc_pool_free (rx_chn -> desc_pool , desc_rx );
485
- netdev_err (ndev , "rx push: failed to map rx pkt buffer\n" );
486
- return - EINVAL ;
487
- }
488
-
489
487
cppi5_hdesc_init (desc_rx , CPPI5_INFO0_HDESC_EPIB_PRESENT ,
490
488
PRUETH_NAV_PS_DATA_SIZE );
491
489
k3_udma_glue_rx_dma_to_cppi5_addr (rx_chn -> rx_chn , & buf_dma );
492
- cppi5_hdesc_attach_buf (desc_rx , buf_dma , skb_tailroom ( skb ) , buf_dma , skb_tailroom ( skb ) );
490
+ cppi5_hdesc_attach_buf (desc_rx , buf_dma , buf_len , buf_dma , buf_len );
493
491
494
492
swdata = cppi5_hdesc_get_swdata (desc_rx );
495
- * swdata = skb ;
493
+ * swdata = page ;
496
494
497
- return k3_udma_glue_push_rx_chn (rx_chn -> rx_chn , 0 ,
495
+ return k3_udma_glue_push_rx_chn (rx_chn -> rx_chn , PRUETH_RX_FLOW_DATA ,
498
496
desc_rx , desc_dma );
499
497
}
500
- EXPORT_SYMBOL_GPL (prueth_dma_rx_push );
498
+ EXPORT_SYMBOL_GPL (prueth_dma_rx_push_mapped );
501
499
502
500
u64 icssg_ts_to_ns (u32 hi_sw , u32 hi , u32 lo , u32 cycle_time_ns )
503
501
{
@@ -541,12 +539,16 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
541
539
u32 buf_dma_len , pkt_len , port_id = 0 ;
542
540
struct net_device * ndev = emac -> ndev ;
543
541
struct cppi5_host_desc_t * desc_rx ;
544
- struct sk_buff * skb , * new_skb ;
545
542
dma_addr_t desc_dma , buf_dma ;
543
+ struct page * page , * new_page ;
544
+ struct page_pool * pool ;
545
+ struct sk_buff * skb ;
546
546
void * * swdata ;
547
547
u32 * psdata ;
548
+ void * pa ;
548
549
int ret ;
549
550
551
+ pool = rx_chn -> pg_pool ;
550
552
ret = k3_udma_glue_pop_rx_chn (rx_chn -> rx_chn , flow_id , & desc_dma );
551
553
if (ret ) {
552
554
if (ret != - ENODATA )
@@ -558,48 +560,61 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
558
560
return 0 ;
559
561
560
562
desc_rx = k3_cppi_desc_pool_dma2virt (rx_chn -> desc_pool , desc_dma );
561
-
562
563
swdata = cppi5_hdesc_get_swdata (desc_rx );
563
- skb = * swdata ;
564
-
565
- psdata = cppi5_hdesc_get_psdata (desc_rx );
566
- /* RX HW timestamp */
567
- if (emac -> rx_ts_enabled )
568
- emac_rx_timestamp (emac , skb , psdata );
569
-
564
+ page = * swdata ;
565
+ page_pool_dma_sync_for_cpu (pool , page , 0 , PAGE_SIZE );
570
566
cppi5_hdesc_get_obuf (desc_rx , & buf_dma , & buf_dma_len );
571
567
k3_udma_glue_rx_cppi5_to_dma_addr (rx_chn -> rx_chn , & buf_dma );
572
568
pkt_len = cppi5_hdesc_get_pktlen (desc_rx );
573
569
/* firmware adds 4 CRC bytes, strip them */
574
570
pkt_len -= 4 ;
575
571
cppi5_desc_get_tags_ids (& desc_rx -> hdr , & port_id , NULL );
576
572
577
- dma_unmap_single (rx_chn -> dma_dev , buf_dma , buf_dma_len , DMA_FROM_DEVICE );
578
573
k3_cppi_desc_pool_free (rx_chn -> desc_pool , desc_rx );
579
574
580
- skb -> dev = ndev ;
581
- new_skb = netdev_alloc_skb_ip_align (ndev , PRUETH_MAX_PKT_SIZE );
582
575
/* if allocation fails we drop the packet but push the
583
- * descriptor back to the ring with old skb to prevent a stall
576
+ * descriptor back to the ring with old page to prevent a stall
584
577
*/
585
- if (!new_skb ) {
578
+ new_page = page_pool_dev_alloc_pages (pool );
579
+ if (unlikely (!new_page )) {
580
+ new_page = page ;
586
581
ndev -> stats .rx_dropped ++ ;
587
- new_skb = skb ;
588
- } else {
589
- /* send the filled skb up the n/w stack */
590
- skb_put ( skb , pkt_len );
591
- if ( emac -> prueth -> is_switch_mode )
592
- skb -> offload_fwd_mark = emac -> offload_fwd_mark ;
593
- skb -> protocol = eth_type_trans ( skb , ndev );
594
- napi_gro_receive ( & emac -> napi_rx , skb ) ;
595
- ndev -> stats . rx_bytes += pkt_len ;
596
- ndev -> stats . rx_packets ++ ;
582
+ goto requeue ;
583
+ }
584
+
585
+ /* prepare skb and send to n/w stack */
586
+ pa = page_address ( page );
587
+ skb = napi_build_skb ( pa , PAGE_SIZE ) ;
588
+ if (! skb ) {
589
+ ndev -> stats . rx_dropped ++ ;
590
+ page_pool_recycle_direct ( pool , page ) ;
591
+ goto requeue ;
597
592
}
598
593
594
+ skb_reserve (skb , PRUETH_HEADROOM );
595
+ skb_put (skb , pkt_len );
596
+ skb -> dev = ndev ;
597
+
598
+ psdata = cppi5_hdesc_get_psdata (desc_rx );
599
+ /* RX HW timestamp */
600
+ if (emac -> rx_ts_enabled )
601
+ emac_rx_timestamp (emac , skb , psdata );
602
+
603
+ if (emac -> prueth -> is_switch_mode )
604
+ skb -> offload_fwd_mark = emac -> offload_fwd_mark ;
605
+ skb -> protocol = eth_type_trans (skb , ndev );
606
+
607
+ skb_mark_for_recycle (skb );
608
+ napi_gro_receive (& emac -> napi_rx , skb );
609
+ ndev -> stats .rx_bytes += pkt_len ;
610
+ ndev -> stats .rx_packets ++ ;
611
+
612
+ requeue :
599
613
/* queue another RX DMA */
600
- ret = prueth_dma_rx_push (emac , new_skb , & emac -> rx_chns );
614
+ ret = prueth_dma_rx_push_mapped (emac , & emac -> rx_chns , new_page ,
615
+ PRUETH_MAX_PKT_SIZE );
601
616
if (WARN_ON (ret < 0 )) {
602
- dev_kfree_skb_any ( new_skb );
617
+ page_pool_recycle_direct ( pool , new_page );
603
618
ndev -> stats .rx_errors ++ ;
604
619
ndev -> stats .rx_dropped ++ ;
605
620
}
@@ -611,22 +626,16 @@ static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
611
626
{
612
627
struct prueth_rx_chn * rx_chn = data ;
613
628
struct cppi5_host_desc_t * desc_rx ;
614
- struct sk_buff * skb ;
615
- dma_addr_t buf_dma ;
616
- u32 buf_dma_len ;
629
+ struct page_pool * pool ;
630
+ struct page * page ;
617
631
void * * swdata ;
618
632
633
+ pool = rx_chn -> pg_pool ;
619
634
desc_rx = k3_cppi_desc_pool_dma2virt (rx_chn -> desc_pool , desc_dma );
620
635
swdata = cppi5_hdesc_get_swdata (desc_rx );
621
- skb = * swdata ;
622
- cppi5_hdesc_get_obuf (desc_rx , & buf_dma , & buf_dma_len );
623
- k3_udma_glue_rx_cppi5_to_dma_addr (rx_chn -> rx_chn , & buf_dma );
624
-
625
- dma_unmap_single (rx_chn -> dma_dev , buf_dma , buf_dma_len ,
626
- DMA_FROM_DEVICE );
636
+ page = * swdata ;
637
+ page_pool_recycle_direct (pool , page );
627
638
k3_cppi_desc_pool_free (rx_chn -> desc_pool , desc_rx );
628
-
629
- dev_kfree_skb_any (skb );
630
639
}
631
640
632
641
static int prueth_tx_ts_cookie_get (struct prueth_emac * emac )
@@ -907,29 +916,71 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
907
916
}
908
917
EXPORT_SYMBOL_GPL (icssg_napi_rx_poll );
909
918
919
+ static struct page_pool * prueth_create_page_pool (struct prueth_emac * emac ,
920
+ struct device * dma_dev ,
921
+ int size )
922
+ {
923
+ struct page_pool_params pp_params = { 0 };
924
+ struct page_pool * pool ;
925
+
926
+ pp_params .order = 0 ;
927
+ pp_params .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV ;
928
+ pp_params .pool_size = size ;
929
+ pp_params .nid = dev_to_node (emac -> prueth -> dev );
930
+ pp_params .dma_dir = DMA_BIDIRECTIONAL ;
931
+ pp_params .dev = dma_dev ;
932
+ pp_params .napi = & emac -> napi_rx ;
933
+ pp_params .max_len = PAGE_SIZE ;
934
+
935
+ pool = page_pool_create (& pp_params );
936
+ if (IS_ERR (pool ))
937
+ netdev_err (emac -> ndev , "cannot create rx page pool\n" );
938
+
939
+ return pool ;
940
+ }
941
+
910
942
int prueth_prepare_rx_chan (struct prueth_emac * emac ,
911
943
struct prueth_rx_chn * chn ,
912
944
int buf_size )
913
945
{
914
- struct sk_buff * skb ;
946
+ struct page_pool * pool ;
947
+ struct page * page ;
915
948
int i , ret ;
916
949
950
+ pool = prueth_create_page_pool (emac , chn -> dma_dev , chn -> descs_num );
951
+ if (IS_ERR (pool ))
952
+ return PTR_ERR (pool );
953
+
954
+ chn -> pg_pool = pool ;
955
+
917
956
for (i = 0 ; i < chn -> descs_num ; i ++ ) {
918
- skb = __netdev_alloc_skb_ip_align (NULL , buf_size , GFP_KERNEL );
919
- if (!skb )
920
- return - ENOMEM ;
957
+ /* NOTE: we're not using memory efficiently here.
958
+ * 1 full page (4KB?) used here instead of
959
+ * PRUETH_MAX_PKT_SIZE (~1.5KB?)
960
+ */
961
+ page = page_pool_dev_alloc_pages (pool );
962
+ if (!page ) {
963
+ netdev_err (emac -> ndev , "couldn't allocate rx page\n" );
964
+ ret = - ENOMEM ;
965
+ goto recycle_alloc_pg ;
966
+ }
921
967
922
- ret = prueth_dma_rx_push (emac , skb , chn );
968
+ ret = prueth_dma_rx_push_mapped (emac , chn , page , buf_size );
923
969
if (ret < 0 ) {
924
970
netdev_err (emac -> ndev ,
925
- "cannot submit skb for rx chan %s ret %d\n" ,
971
+ "cannot submit page for rx chan %s ret %d\n" ,
926
972
chn -> name , ret );
927
- kfree_skb ( skb );
928
- return ret ;
973
+ page_pool_recycle_direct ( pool , page );
974
+ goto recycle_alloc_pg ;
929
975
}
930
976
}
931
977
932
978
return 0 ;
979
+
980
+ recycle_alloc_pg :
981
+ prueth_reset_rx_chan (& emac -> rx_chns , PRUETH_MAX_RX_FLOWS , false);
982
+
983
+ return ret ;
933
984
}
934
985
EXPORT_SYMBOL_GPL (prueth_prepare_rx_chan );
935
986
@@ -958,6 +1009,9 @@ void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
958
1009
prueth_rx_cleanup , !!i );
959
1010
if (disable )
960
1011
k3_udma_glue_disable_rx_chn (chn -> rx_chn );
1012
+
1013
+ page_pool_destroy (chn -> pg_pool );
1014
+ chn -> pg_pool = NULL ;
961
1015
}
962
1016
EXPORT_SYMBOL_GPL (prueth_reset_rx_chan );
963
1017
0 commit comments