54
54
#define TSNEP_TX_TYPE_SKB_FRAG BIT(1)
55
55
#define TSNEP_TX_TYPE_XDP_TX BIT(2)
56
56
#define TSNEP_TX_TYPE_XDP_NDO BIT(3)
57
+ #define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO)
58
+ #define TSNEP_TX_TYPE_XSK BIT(4)
57
59
58
60
#define TSNEP_XDP_TX BIT(0)
59
61
#define TSNEP_XDP_REDIRECT BIT(1)
@@ -322,13 +324,47 @@ static void tsnep_tx_init(struct tsnep_tx *tx)
322
324
tx -> increment_owner_counter = TSNEP_RING_SIZE - 1 ;
323
325
}
324
326
327
+ static void tsnep_tx_enable (struct tsnep_tx * tx )
328
+ {
329
+ struct netdev_queue * nq ;
330
+
331
+ nq = netdev_get_tx_queue (tx -> adapter -> netdev , tx -> queue_index );
332
+
333
+ __netif_tx_lock_bh (nq );
334
+ netif_tx_wake_queue (nq );
335
+ __netif_tx_unlock_bh (nq );
336
+ }
337
+
338
+ static void tsnep_tx_disable (struct tsnep_tx * tx , struct napi_struct * napi )
339
+ {
340
+ struct netdev_queue * nq ;
341
+ u32 val ;
342
+
343
+ nq = netdev_get_tx_queue (tx -> adapter -> netdev , tx -> queue_index );
344
+
345
+ __netif_tx_lock_bh (nq );
346
+ netif_tx_stop_queue (nq );
347
+ __netif_tx_unlock_bh (nq );
348
+
349
+ /* wait until TX is done in hardware */
350
+ readx_poll_timeout (ioread32 , tx -> addr + TSNEP_CONTROL , val ,
351
+ ((val & TSNEP_CONTROL_TX_ENABLE ) == 0 ), 10000 ,
352
+ 1000000 );
353
+
354
+ /* wait until TX is also done in software */
355
+ while (READ_ONCE (tx -> read ) != tx -> write ) {
356
+ napi_schedule (napi );
357
+ napi_synchronize (napi );
358
+ }
359
+ }
360
+
325
361
static void tsnep_tx_activate (struct tsnep_tx * tx , int index , int length ,
326
362
bool last )
327
363
{
328
364
struct tsnep_tx_entry * entry = & tx -> entry [index ];
329
365
330
366
entry -> properties = 0 ;
331
- /* xdpf is union with skb */
367
+ /* xdpf and zc are union with skb */
332
368
if (entry -> skb ) {
333
369
entry -> properties = length & TSNEP_DESC_LENGTH_MASK ;
334
370
entry -> properties |= TSNEP_DESC_INTERRUPT_FLAG ;
@@ -646,10 +682,69 @@ static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter,
646
682
return xmit ;
647
683
}
648
684
685
+ static int tsnep_xdp_tx_map_zc (struct xdp_desc * xdpd , struct tsnep_tx * tx )
686
+ {
687
+ struct tsnep_tx_entry * entry ;
688
+ dma_addr_t dma ;
689
+
690
+ entry = & tx -> entry [tx -> write ];
691
+ entry -> zc = true;
692
+
693
+ dma = xsk_buff_raw_get_dma (tx -> xsk_pool , xdpd -> addr );
694
+ xsk_buff_raw_dma_sync_for_device (tx -> xsk_pool , dma , xdpd -> len );
695
+
696
+ entry -> type = TSNEP_TX_TYPE_XSK ;
697
+ entry -> len = xdpd -> len ;
698
+
699
+ entry -> desc -> tx = __cpu_to_le64 (dma );
700
+
701
+ return xdpd -> len ;
702
+ }
703
+
704
+ static void tsnep_xdp_xmit_frame_ring_zc (struct xdp_desc * xdpd ,
705
+ struct tsnep_tx * tx )
706
+ {
707
+ int length ;
708
+
709
+ length = tsnep_xdp_tx_map_zc (xdpd , tx );
710
+
711
+ tsnep_tx_activate (tx , tx -> write , length , true);
712
+ tx -> write = (tx -> write + 1 ) & TSNEP_RING_MASK ;
713
+ }
714
+
715
+ static void tsnep_xdp_xmit_zc (struct tsnep_tx * tx )
716
+ {
717
+ int desc_available = tsnep_tx_desc_available (tx );
718
+ struct xdp_desc * descs = tx -> xsk_pool -> tx_descs ;
719
+ int batch , i ;
720
+
721
+ /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
722
+ * will be available for normal TX path and queue is stopped there if
723
+ * necessary
724
+ */
725
+ if (desc_available <= (MAX_SKB_FRAGS + 1 ))
726
+ return ;
727
+ desc_available -= MAX_SKB_FRAGS + 1 ;
728
+
729
+ batch = xsk_tx_peek_release_desc_batch (tx -> xsk_pool , desc_available );
730
+ for (i = 0 ; i < batch ; i ++ )
731
+ tsnep_xdp_xmit_frame_ring_zc (& descs [i ], tx );
732
+
733
+ if (batch ) {
734
+ /* descriptor properties shall be valid before hardware is
735
+ * notified
736
+ */
737
+ dma_wmb ();
738
+
739
+ tsnep_xdp_xmit_flush (tx );
740
+ }
741
+ }
742
+
649
743
static bool tsnep_tx_poll (struct tsnep_tx * tx , int napi_budget )
650
744
{
651
745
struct tsnep_tx_entry * entry ;
652
746
struct netdev_queue * nq ;
747
+ int xsk_frames = 0 ;
653
748
int budget = 128 ;
654
749
int length ;
655
750
int count ;
@@ -676,7 +771,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
676
771
if ((entry -> type & TSNEP_TX_TYPE_SKB ) &&
677
772
skb_shinfo (entry -> skb )-> nr_frags > 0 )
678
773
count += skb_shinfo (entry -> skb )-> nr_frags ;
679
- else if (! (entry -> type & TSNEP_TX_TYPE_SKB ) &&
774
+ else if ((entry -> type & TSNEP_TX_TYPE_XDP ) &&
680
775
xdp_frame_has_frags (entry -> xdpf ))
681
776
count += xdp_get_shared_info_from_frame (entry -> xdpf )-> nr_frags ;
682
777
@@ -705,9 +800,11 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
705
800
706
801
if (entry -> type & TSNEP_TX_TYPE_SKB )
707
802
napi_consume_skb (entry -> skb , napi_budget );
708
- else
803
+ else if ( entry -> type & TSNEP_TX_TYPE_XDP )
709
804
xdp_return_frame_rx_napi (entry -> xdpf );
710
- /* xdpf is union with skb */
805
+ else
806
+ xsk_frames ++ ;
807
+ /* xdpf and zc are union with skb */
711
808
entry -> skb = NULL ;
712
809
713
810
tx -> read = (tx -> read + count ) & TSNEP_RING_MASK ;
@@ -718,6 +815,14 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
718
815
budget -- ;
719
816
} while (likely (budget ));
720
817
818
+ if (tx -> xsk_pool ) {
819
+ if (xsk_frames )
820
+ xsk_tx_completed (tx -> xsk_pool , xsk_frames );
821
+ if (xsk_uses_need_wakeup (tx -> xsk_pool ))
822
+ xsk_set_tx_need_wakeup (tx -> xsk_pool );
823
+ tsnep_xdp_xmit_zc (tx );
824
+ }
825
+
721
826
if ((tsnep_tx_desc_available (tx ) >= ((MAX_SKB_FRAGS + 1 ) * 2 )) &&
722
827
netif_tx_queue_stopped (nq )) {
723
828
netif_tx_wake_queue (nq );
@@ -765,12 +870,6 @@ static int tsnep_tx_open(struct tsnep_tx *tx)
765
870
766
871
static void tsnep_tx_close (struct tsnep_tx * tx )
767
872
{
768
- u32 val ;
769
-
770
- readx_poll_timeout (ioread32 , tx -> addr + TSNEP_CONTROL , val ,
771
- ((val & TSNEP_CONTROL_TX_ENABLE ) == 0 ), 10000 ,
772
- 1000000 );
773
-
774
873
tsnep_tx_ring_cleanup (tx );
775
874
}
776
875
@@ -1786,12 +1885,18 @@ static void tsnep_queue_enable(struct tsnep_queue *queue)
1786
1885
napi_enable (& queue -> napi );
1787
1886
tsnep_enable_irq (queue -> adapter , queue -> irq_mask );
1788
1887
1888
+ if (queue -> tx )
1889
+ tsnep_tx_enable (queue -> tx );
1890
+
1789
1891
if (queue -> rx )
1790
1892
tsnep_rx_enable (queue -> rx );
1791
1893
}
1792
1894
1793
1895
static void tsnep_queue_disable (struct tsnep_queue * queue )
1794
1896
{
1897
+ if (queue -> tx )
1898
+ tsnep_tx_disable (queue -> tx , & queue -> napi );
1899
+
1795
1900
napi_disable (& queue -> napi );
1796
1901
tsnep_disable_irq (queue -> adapter , queue -> irq_mask );
1797
1902
@@ -1908,6 +2013,7 @@ int tsnep_enable_xsk(struct tsnep_queue *queue, struct xsk_buff_pool *pool)
1908
2013
if (running )
1909
2014
tsnep_queue_disable (queue );
1910
2015
2016
+ queue -> tx -> xsk_pool = pool ;
1911
2017
queue -> rx -> xsk_pool = pool ;
1912
2018
1913
2019
if (running ) {
@@ -1928,6 +2034,7 @@ void tsnep_disable_xsk(struct tsnep_queue *queue)
1928
2034
tsnep_rx_free_zc (queue -> rx );
1929
2035
1930
2036
queue -> rx -> xsk_pool = NULL ;
2037
+ queue -> tx -> xsk_pool = NULL ;
1931
2038
1932
2039
if (running ) {
1933
2040
tsnep_rx_reopen (queue -> rx );
@@ -2438,7 +2545,8 @@ static int tsnep_probe(struct platform_device *pdev)
2438
2545
2439
2546
netdev -> xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
2440
2547
NETDEV_XDP_ACT_NDO_XMIT |
2441
- NETDEV_XDP_ACT_NDO_XMIT_SG ;
2548
+ NETDEV_XDP_ACT_NDO_XMIT_SG |
2549
+ NETDEV_XDP_ACT_XSK_ZEROCOPY ;
2442
2550
2443
2551
/* carrier off reporting is important to ethtool even BEFORE open */
2444
2552
netif_carrier_off (netdev );
0 commit comments