@@ -785,6 +785,17 @@ struct airoha_hw_stats {
785
785
786
786
struct airoha_qdma {
787
787
void __iomem * regs ;
788
+
789
+ struct airoha_tx_irq_queue q_tx_irq [AIROHA_NUM_TX_IRQ ];
790
+
791
+ struct airoha_queue q_tx [AIROHA_NUM_TX_RING ];
792
+ struct airoha_queue q_rx [AIROHA_NUM_RX_RING ];
793
+
794
+ /* descriptor and packet buffers for qdma hw forward */
795
+ struct {
796
+ void * desc ;
797
+ void * q ;
798
+ } hfwd ;
788
799
};
789
800
790
801
struct airoha_gdm_port {
@@ -809,20 +820,10 @@ struct airoha_eth {
809
820
struct reset_control_bulk_data rsts [AIROHA_MAX_NUM_RSTS ];
810
821
struct reset_control_bulk_data xsi_rsts [AIROHA_MAX_NUM_XSI_RSTS ];
811
822
812
- struct airoha_qdma qdma [AIROHA_MAX_NUM_QDMA ];
813
- struct airoha_gdm_port * ports [AIROHA_MAX_NUM_GDM_PORTS ];
814
-
815
823
struct net_device * napi_dev ;
816
- struct airoha_queue q_tx [AIROHA_NUM_TX_RING ];
817
- struct airoha_queue q_rx [AIROHA_NUM_RX_RING ];
818
-
819
- struct airoha_tx_irq_queue q_tx_irq [AIROHA_NUM_TX_IRQ ];
820
824
821
- /* descriptor and packet buffers for qdma hw forward */
822
- struct {
823
- void * desc ;
824
- void * q ;
825
- } hfwd ;
825
+ struct airoha_qdma qdma [AIROHA_MAX_NUM_QDMA ];
826
+ struct airoha_gdm_port * ports [AIROHA_MAX_NUM_GDM_PORTS ];
826
827
};
827
828
828
829
static u32 airoha_rr (void __iomem * base , u32 offset )
@@ -1390,7 +1391,7 @@ static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
1390
1391
enum dma_data_direction dir = page_pool_get_dma_dir (q -> page_pool );
1391
1392
struct airoha_qdma * qdma = & q -> eth -> qdma [0 ];
1392
1393
struct airoha_eth * eth = q -> eth ;
1393
- int qid = q - & eth -> q_rx [0 ];
1394
+ int qid = q - & qdma -> q_rx [0 ];
1394
1395
int nframes = 0 ;
1395
1396
1396
1397
while (q -> queued < q -> ndesc - 1 ) {
@@ -1457,8 +1458,9 @@ static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
1457
1458
static int airoha_qdma_rx_process (struct airoha_queue * q , int budget )
1458
1459
{
1459
1460
enum dma_data_direction dir = page_pool_get_dma_dir (q -> page_pool );
1461
+ struct airoha_qdma * qdma = & q -> eth -> qdma [0 ];
1460
1462
struct airoha_eth * eth = q -> eth ;
1461
- int qid = q - & eth -> q_rx [0 ];
1463
+ int qid = q - & qdma -> q_rx [0 ];
1462
1464
int done = 0 ;
1463
1465
1464
1466
while (done < budget ) {
@@ -1549,7 +1551,7 @@ static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
1549
1551
.dev = eth -> dev ,
1550
1552
.napi = & q -> napi ,
1551
1553
};
1552
- int qid = q - & eth -> q_rx [0 ], thr ;
1554
+ int qid = q - & qdma -> q_rx [0 ], thr ;
1553
1555
dma_addr_t dma_addr ;
1554
1556
1555
1557
q -> buf_size = PAGE_SIZE / 2 ;
@@ -1613,15 +1615,15 @@ static int airoha_qdma_init_rx(struct airoha_eth *eth,
1613
1615
{
1614
1616
int i ;
1615
1617
1616
- for (i = 0 ; i < ARRAY_SIZE (eth -> q_rx ); i ++ ) {
1618
+ for (i = 0 ; i < ARRAY_SIZE (qdma -> q_rx ); i ++ ) {
1617
1619
int err ;
1618
1620
1619
1621
if (!(RX_DONE_INT_MASK & BIT (i ))) {
1620
1622
/* rx-queue not binded to irq */
1621
1623
continue ;
1622
1624
}
1623
1625
1624
- err = airoha_qdma_init_rx_queue (eth , & eth -> q_rx [i ],
1626
+ err = airoha_qdma_init_rx_queue (eth , & qdma -> q_rx [i ],
1625
1627
qdma , RX_DSCP_NUM (i ));
1626
1628
if (err )
1627
1629
return err ;
@@ -1640,7 +1642,7 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
1640
1642
irq_q = container_of (napi , struct airoha_tx_irq_queue , napi );
1641
1643
eth = irq_q -> eth ;
1642
1644
qdma = & eth -> qdma [0 ];
1643
- id = irq_q - & eth -> q_tx_irq [0 ];
1645
+ id = irq_q - & qdma -> q_tx_irq [0 ];
1644
1646
1645
1647
while (irq_q -> queued > 0 && done < budget ) {
1646
1648
u32 qid , last , val = irq_q -> q [irq_q -> head ];
@@ -1657,10 +1659,10 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
1657
1659
last = FIELD_GET (IRQ_DESC_IDX_MASK , val );
1658
1660
qid = FIELD_GET (IRQ_RING_IDX_MASK , val );
1659
1661
1660
- if (qid >= ARRAY_SIZE (eth -> q_tx ))
1662
+ if (qid >= ARRAY_SIZE (qdma -> q_tx ))
1661
1663
continue ;
1662
1664
1663
- q = & eth -> q_tx [qid ];
1665
+ q = & qdma -> q_tx [qid ];
1664
1666
if (!q -> ndesc )
1665
1667
continue ;
1666
1668
@@ -1726,7 +1728,7 @@ static int airoha_qdma_init_tx_queue(struct airoha_eth *eth,
1726
1728
struct airoha_queue * q ,
1727
1729
struct airoha_qdma * qdma , int size )
1728
1730
{
1729
- int i , qid = q - & eth -> q_tx [0 ];
1731
+ int i , qid = q - & qdma -> q_tx [0 ];
1730
1732
dma_addr_t dma_addr ;
1731
1733
1732
1734
spin_lock_init (& q -> lock );
@@ -1764,7 +1766,7 @@ static int airoha_qdma_tx_irq_init(struct airoha_eth *eth,
1764
1766
struct airoha_tx_irq_queue * irq_q ,
1765
1767
struct airoha_qdma * qdma , int size )
1766
1768
{
1767
- int id = irq_q - & eth -> q_tx_irq [0 ];
1769
+ int id = irq_q - & qdma -> q_tx_irq [0 ];
1768
1770
dma_addr_t dma_addr ;
1769
1771
1770
1772
netif_napi_add_tx (eth -> napi_dev , & irq_q -> napi ,
@@ -1792,15 +1794,15 @@ static int airoha_qdma_init_tx(struct airoha_eth *eth,
1792
1794
{
1793
1795
int i , err ;
1794
1796
1795
- for (i = 0 ; i < ARRAY_SIZE (eth -> q_tx_irq ); i ++ ) {
1796
- err = airoha_qdma_tx_irq_init (eth , & eth -> q_tx_irq [i ],
1797
+ for (i = 0 ; i < ARRAY_SIZE (qdma -> q_tx_irq ); i ++ ) {
1798
+ err = airoha_qdma_tx_irq_init (eth , & qdma -> q_tx_irq [i ],
1797
1799
qdma , IRQ_QUEUE_LEN (i ));
1798
1800
if (err )
1799
1801
return err ;
1800
1802
}
1801
1803
1802
- for (i = 0 ; i < ARRAY_SIZE (eth -> q_tx ); i ++ ) {
1803
- err = airoha_qdma_init_tx_queue (eth , & eth -> q_tx [i ],
1804
+ for (i = 0 ; i < ARRAY_SIZE (qdma -> q_tx ); i ++ ) {
1805
+ err = airoha_qdma_init_tx_queue (eth , & qdma -> q_tx [i ],
1804
1806
qdma , TX_DSCP_NUM );
1805
1807
if (err )
1806
1808
return err ;
@@ -1836,17 +1838,17 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth,
1836
1838
int size ;
1837
1839
1838
1840
size = HW_DSCP_NUM * sizeof (struct airoha_qdma_fwd_desc );
1839
- eth -> hfwd .desc = dmam_alloc_coherent (eth -> dev , size , & dma_addr ,
1840
- GFP_KERNEL );
1841
- if (!eth -> hfwd .desc )
1841
+ qdma -> hfwd .desc = dmam_alloc_coherent (eth -> dev , size , & dma_addr ,
1842
+ GFP_KERNEL );
1843
+ if (!qdma -> hfwd .desc )
1842
1844
return - ENOMEM ;
1843
1845
1844
1846
airoha_qdma_wr (qdma , REG_FWD_DSCP_BASE , dma_addr );
1845
1847
1846
1848
size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM ;
1847
- eth -> hfwd .q = dmam_alloc_coherent (eth -> dev , size , & dma_addr ,
1848
- GFP_KERNEL );
1849
- if (!eth -> hfwd .q )
1849
+ qdma -> hfwd .q = dmam_alloc_coherent (eth -> dev , size , & dma_addr ,
1850
+ GFP_KERNEL );
1851
+ if (!qdma -> hfwd .q )
1850
1852
return - ENOMEM ;
1851
1853
1852
1854
airoha_qdma_wr (qdma , REG_FWD_BUF_BASE , dma_addr );
@@ -1934,8 +1936,8 @@ static int airoha_qdma_hw_init(struct airoha_eth *eth,
1934
1936
airoha_qdma_irq_enable (eth , QDMA_INT_REG_IDX4 , INT_IDX4_MASK );
1935
1937
1936
1938
/* setup irq binding */
1937
- for (i = 0 ; i < ARRAY_SIZE (eth -> q_tx ); i ++ ) {
1938
- if (!eth -> q_tx [i ].ndesc )
1939
+ for (i = 0 ; i < ARRAY_SIZE (qdma -> q_tx ); i ++ ) {
1940
+ if (!qdma -> q_tx [i ].ndesc )
1939
1941
continue ;
1940
1942
1941
1943
if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT (i ))
@@ -1960,8 +1962,8 @@ static int airoha_qdma_hw_init(struct airoha_eth *eth,
1960
1962
airoha_qdma_init_qos (eth , qdma );
1961
1963
1962
1964
/* disable qdma rx delay interrupt */
1963
- for (i = 0 ; i < ARRAY_SIZE (eth -> q_rx ); i ++ ) {
1964
- if (!eth -> q_rx [i ].ndesc )
1965
+ for (i = 0 ; i < ARRAY_SIZE (qdma -> q_rx ); i ++ ) {
1966
+ if (!qdma -> q_rx [i ].ndesc )
1965
1967
continue ;
1966
1968
1967
1969
airoha_qdma_clear (qdma , REG_RX_DELAY_INT_IDX (i ),
@@ -1995,18 +1997,18 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
1995
1997
airoha_qdma_irq_disable (eth , QDMA_INT_REG_IDX1 ,
1996
1998
RX_DONE_INT_MASK );
1997
1999
1998
- for (i = 0 ; i < ARRAY_SIZE (eth -> q_rx ); i ++ ) {
1999
- if (!eth -> q_rx [i ].ndesc )
2000
+ for (i = 0 ; i < ARRAY_SIZE (qdma -> q_rx ); i ++ ) {
2001
+ if (!qdma -> q_rx [i ].ndesc )
2000
2002
continue ;
2001
2003
2002
2004
if (intr [1 ] & BIT (i ))
2003
- napi_schedule (& eth -> q_rx [i ].napi );
2005
+ napi_schedule (& qdma -> q_rx [i ].napi );
2004
2006
}
2005
2007
}
2006
2008
2007
2009
if (intr [0 ] & INT_TX_MASK ) {
2008
- for (i = 0 ; i < ARRAY_SIZE (eth -> q_tx_irq ); i ++ ) {
2009
- struct airoha_tx_irq_queue * irq_q = & eth -> q_tx_irq [i ];
2010
+ for (i = 0 ; i < ARRAY_SIZE (qdma -> q_tx_irq ); i ++ ) {
2011
+ struct airoha_tx_irq_queue * irq_q = & qdma -> q_tx_irq [i ];
2010
2012
u32 status , head ;
2011
2013
2012
2014
if (!(intr [0 ] & TX_DONE_INT_MASK (i )))
@@ -2020,7 +2022,7 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
2020
2022
irq_q -> head = head % irq_q -> size ;
2021
2023
irq_q -> queued = FIELD_GET (IRQ_ENTRY_LEN_MASK , status );
2022
2024
2023
- napi_schedule (& eth -> q_tx_irq [i ].napi );
2025
+ napi_schedule (& qdma -> q_tx_irq [i ].napi );
2024
2026
}
2025
2027
}
2026
2028
@@ -2079,44 +2081,46 @@ static int airoha_hw_init(struct airoha_eth *eth)
2079
2081
2080
2082
static void airoha_hw_cleanup (struct airoha_eth * eth )
2081
2083
{
2084
+ struct airoha_qdma * qdma = & eth -> qdma [0 ];
2082
2085
int i ;
2083
2086
2084
- for (i = 0 ; i < ARRAY_SIZE (eth -> q_rx ); i ++ ) {
2085
- if (!eth -> q_rx [i ].ndesc )
2087
+ for (i = 0 ; i < ARRAY_SIZE (qdma -> q_rx ); i ++ ) {
2088
+ if (!qdma -> q_rx [i ].ndesc )
2086
2089
continue ;
2087
2090
2088
- napi_disable (& eth -> q_rx [i ].napi );
2089
- netif_napi_del (& eth -> q_rx [i ].napi );
2090
- airoha_qdma_cleanup_rx_queue (& eth -> q_rx [i ]);
2091
- if (eth -> q_rx [i ].page_pool )
2092
- page_pool_destroy (eth -> q_rx [i ].page_pool );
2091
+ napi_disable (& qdma -> q_rx [i ].napi );
2092
+ netif_napi_del (& qdma -> q_rx [i ].napi );
2093
+ airoha_qdma_cleanup_rx_queue (& qdma -> q_rx [i ]);
2094
+ if (qdma -> q_rx [i ].page_pool )
2095
+ page_pool_destroy (qdma -> q_rx [i ].page_pool );
2093
2096
}
2094
2097
2095
- for (i = 0 ; i < ARRAY_SIZE (eth -> q_tx_irq ); i ++ ) {
2096
- napi_disable (& eth -> q_tx_irq [i ].napi );
2097
- netif_napi_del (& eth -> q_tx_irq [i ].napi );
2098
+ for (i = 0 ; i < ARRAY_SIZE (qdma -> q_tx_irq ); i ++ ) {
2099
+ napi_disable (& qdma -> q_tx_irq [i ].napi );
2100
+ netif_napi_del (& qdma -> q_tx_irq [i ].napi );
2098
2101
}
2099
2102
2100
- for (i = 0 ; i < ARRAY_SIZE (eth -> q_tx ); i ++ ) {
2101
- if (!eth -> q_tx [i ].ndesc )
2103
+ for (i = 0 ; i < ARRAY_SIZE (qdma -> q_tx ); i ++ ) {
2104
+ if (!qdma -> q_tx [i ].ndesc )
2102
2105
continue ;
2103
2106
2104
- airoha_qdma_cleanup_tx_queue (& eth -> q_tx [i ]);
2107
+ airoha_qdma_cleanup_tx_queue (& qdma -> q_tx [i ]);
2105
2108
}
2106
2109
}
2107
2110
2108
2111
static void airoha_qdma_start_napi (struct airoha_eth * eth )
2109
2112
{
2113
+ struct airoha_qdma * qdma = & eth -> qdma [0 ];
2110
2114
int i ;
2111
2115
2112
- for (i = 0 ; i < ARRAY_SIZE (eth -> q_tx_irq ); i ++ )
2113
- napi_enable (& eth -> q_tx_irq [i ].napi );
2116
+ for (i = 0 ; i < ARRAY_SIZE (qdma -> q_tx_irq ); i ++ )
2117
+ napi_enable (& qdma -> q_tx_irq [i ].napi );
2114
2118
2115
- for (i = 0 ; i < ARRAY_SIZE (eth -> q_rx ); i ++ ) {
2116
- if (!eth -> q_rx [i ].ndesc )
2119
+ for (i = 0 ; i < ARRAY_SIZE (qdma -> q_rx ); i ++ ) {
2120
+ if (!qdma -> q_rx [i ].ndesc )
2117
2121
continue ;
2118
2122
2119
- napi_enable (& eth -> q_rx [i ].napi );
2123
+ napi_enable (& qdma -> q_rx [i ].napi );
2120
2124
}
2121
2125
}
2122
2126
@@ -2391,7 +2395,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
2391
2395
FIELD_PREP (QDMA_ETH_TXMSG_METER_MASK , 0x7f );
2392
2396
2393
2397
qdma = & eth -> qdma [0 ];
2394
- q = & eth -> q_tx [qid ];
2398
+ q = & qdma -> q_tx [qid ];
2395
2399
if (WARN_ON_ONCE (!q -> ndesc ))
2396
2400
goto error ;
2397
2401
0 commit comments