Skip to content

Commit 245c7bc

Browse files
LorenzoBianconikuba-moo
authored andcommitted
net: airoha: Move airoha_queues in airoha_qdma
QDMA controllers available in EN7581 SoC have independent tx/rx hw queues so move them in airoha_queues structure. Signed-off-by: Lorenzo Bianconi <[email protected]> Link: https://patch.msgid.link/795fc4797bffbf7f0a1351308aa9bf0e65b5126e.1722522582.git.lorenzo@kernel.org Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 16874d1 commit 245c7bc

File tree

1 file changed

+65
-61
lines changed

1 file changed

+65
-61
lines changed

drivers/net/ethernet/mediatek/airoha_eth.c

Lines changed: 65 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -785,6 +785,17 @@ struct airoha_hw_stats {
785785

786786
struct airoha_qdma {
787787
void __iomem *regs;
788+
789+
struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
790+
791+
struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
792+
struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
793+
794+
/* descriptor and packet buffers for qdma hw forward */
795+
struct {
796+
void *desc;
797+
void *q;
798+
} hfwd;
788799
};
789800

790801
struct airoha_gdm_port {
@@ -809,20 +820,10 @@ struct airoha_eth {
809820
struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
810821
struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
811822

812-
struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
813-
struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
814-
815823
struct net_device *napi_dev;
816-
struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
817-
struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
818-
819-
struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
820824

821-
/* descriptor and packet buffers for qdma hw forward */
822-
struct {
823-
void *desc;
824-
void *q;
825-
} hfwd;
825+
struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
826+
struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
826827
};
827828

828829
static u32 airoha_rr(void __iomem *base, u32 offset)
@@ -1390,7 +1391,7 @@ static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
13901391
enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
13911392
struct airoha_qdma *qdma = &q->eth->qdma[0];
13921393
struct airoha_eth *eth = q->eth;
1393-
int qid = q - &eth->q_rx[0];
1394+
int qid = q - &qdma->q_rx[0];
13941395
int nframes = 0;
13951396

13961397
while (q->queued < q->ndesc - 1) {
@@ -1457,8 +1458,9 @@ static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
14571458
static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
14581459
{
14591460
enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
1461+
struct airoha_qdma *qdma = &q->eth->qdma[0];
14601462
struct airoha_eth *eth = q->eth;
1461-
int qid = q - &eth->q_rx[0];
1463+
int qid = q - &qdma->q_rx[0];
14621464
int done = 0;
14631465

14641466
while (done < budget) {
@@ -1549,7 +1551,7 @@ static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
15491551
.dev = eth->dev,
15501552
.napi = &q->napi,
15511553
};
1552-
int qid = q - &eth->q_rx[0], thr;
1554+
int qid = q - &qdma->q_rx[0], thr;
15531555
dma_addr_t dma_addr;
15541556

15551557
q->buf_size = PAGE_SIZE / 2;
@@ -1613,15 +1615,15 @@ static int airoha_qdma_init_rx(struct airoha_eth *eth,
16131615
{
16141616
int i;
16151617

1616-
for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
1618+
for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
16171619
int err;
16181620

16191621
if (!(RX_DONE_INT_MASK & BIT(i))) {
16201622
/* rx-queue not binded to irq */
16211623
continue;
16221624
}
16231625

1624-
err = airoha_qdma_init_rx_queue(eth, &eth->q_rx[i],
1626+
err = airoha_qdma_init_rx_queue(eth, &qdma->q_rx[i],
16251627
qdma, RX_DSCP_NUM(i));
16261628
if (err)
16271629
return err;
@@ -1640,7 +1642,7 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
16401642
irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
16411643
eth = irq_q->eth;
16421644
qdma = &eth->qdma[0];
1643-
id = irq_q - &eth->q_tx_irq[0];
1645+
id = irq_q - &qdma->q_tx_irq[0];
16441646

16451647
while (irq_q->queued > 0 && done < budget) {
16461648
u32 qid, last, val = irq_q->q[irq_q->head];
@@ -1657,10 +1659,10 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
16571659
last = FIELD_GET(IRQ_DESC_IDX_MASK, val);
16581660
qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
16591661

1660-
if (qid >= ARRAY_SIZE(eth->q_tx))
1662+
if (qid >= ARRAY_SIZE(qdma->q_tx))
16611663
continue;
16621664

1663-
q = &eth->q_tx[qid];
1665+
q = &qdma->q_tx[qid];
16641666
if (!q->ndesc)
16651667
continue;
16661668

@@ -1726,7 +1728,7 @@ static int airoha_qdma_init_tx_queue(struct airoha_eth *eth,
17261728
struct airoha_queue *q,
17271729
struct airoha_qdma *qdma, int size)
17281730
{
1729-
int i, qid = q - &eth->q_tx[0];
1731+
int i, qid = q - &qdma->q_tx[0];
17301732
dma_addr_t dma_addr;
17311733

17321734
spin_lock_init(&q->lock);
@@ -1764,7 +1766,7 @@ static int airoha_qdma_tx_irq_init(struct airoha_eth *eth,
17641766
struct airoha_tx_irq_queue *irq_q,
17651767
struct airoha_qdma *qdma, int size)
17661768
{
1767-
int id = irq_q - &eth->q_tx_irq[0];
1769+
int id = irq_q - &qdma->q_tx_irq[0];
17681770
dma_addr_t dma_addr;
17691771

17701772
netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
@@ -1792,15 +1794,15 @@ static int airoha_qdma_init_tx(struct airoha_eth *eth,
17921794
{
17931795
int i, err;
17941796

1795-
for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
1796-
err = airoha_qdma_tx_irq_init(eth, &eth->q_tx_irq[i],
1797+
for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
1798+
err = airoha_qdma_tx_irq_init(eth, &qdma->q_tx_irq[i],
17971799
qdma, IRQ_QUEUE_LEN(i));
17981800
if (err)
17991801
return err;
18001802
}
18011803

1802-
for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
1803-
err = airoha_qdma_init_tx_queue(eth, &eth->q_tx[i],
1804+
for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
1805+
err = airoha_qdma_init_tx_queue(eth, &qdma->q_tx[i],
18041806
qdma, TX_DSCP_NUM);
18051807
if (err)
18061808
return err;
@@ -1836,17 +1838,17 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth,
18361838
int size;
18371839

18381840
size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
1839-
eth->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
1840-
GFP_KERNEL);
1841-
if (!eth->hfwd.desc)
1841+
qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
1842+
GFP_KERNEL);
1843+
if (!qdma->hfwd.desc)
18421844
return -ENOMEM;
18431845

18441846
airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
18451847

18461848
size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
1847-
eth->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
1848-
GFP_KERNEL);
1849-
if (!eth->hfwd.q)
1849+
qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
1850+
GFP_KERNEL);
1851+
if (!qdma->hfwd.q)
18501852
return -ENOMEM;
18511853

18521854
airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
@@ -1934,8 +1936,8 @@ static int airoha_qdma_hw_init(struct airoha_eth *eth,
19341936
airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
19351937

19361938
/* setup irq binding */
1937-
for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
1938-
if (!eth->q_tx[i].ndesc)
1939+
for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
1940+
if (!qdma->q_tx[i].ndesc)
19391941
continue;
19401942

19411943
if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
@@ -1960,8 +1962,8 @@ static int airoha_qdma_hw_init(struct airoha_eth *eth,
19601962
airoha_qdma_init_qos(eth, qdma);
19611963

19621964
/* disable qdma rx delay interrupt */
1963-
for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
1964-
if (!eth->q_rx[i].ndesc)
1965+
for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1966+
if (!qdma->q_rx[i].ndesc)
19651967
continue;
19661968

19671969
airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
@@ -1995,18 +1997,18 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
19951997
airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX1,
19961998
RX_DONE_INT_MASK);
19971999

1998-
for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
1999-
if (!eth->q_rx[i].ndesc)
2000+
for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2001+
if (!qdma->q_rx[i].ndesc)
20002002
continue;
20012003

20022004
if (intr[1] & BIT(i))
2003-
napi_schedule(&eth->q_rx[i].napi);
2005+
napi_schedule(&qdma->q_rx[i].napi);
20042006
}
20052007
}
20062008

20072009
if (intr[0] & INT_TX_MASK) {
2008-
for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
2009-
struct airoha_tx_irq_queue *irq_q = &eth->q_tx_irq[i];
2010+
for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
2011+
struct airoha_tx_irq_queue *irq_q = &qdma->q_tx_irq[i];
20102012
u32 status, head;
20112013

20122014
if (!(intr[0] & TX_DONE_INT_MASK(i)))
@@ -2020,7 +2022,7 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
20202022
irq_q->head = head % irq_q->size;
20212023
irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
20222024

2023-
napi_schedule(&eth->q_tx_irq[i].napi);
2025+
napi_schedule(&qdma->q_tx_irq[i].napi);
20242026
}
20252027
}
20262028

@@ -2079,44 +2081,46 @@ static int airoha_hw_init(struct airoha_eth *eth)
20792081

20802082
static void airoha_hw_cleanup(struct airoha_eth *eth)
20812083
{
2084+
struct airoha_qdma *qdma = &eth->qdma[0];
20822085
int i;
20832086

2084-
for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
2085-
if (!eth->q_rx[i].ndesc)
2087+
for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2088+
if (!qdma->q_rx[i].ndesc)
20862089
continue;
20872090

2088-
napi_disable(&eth->q_rx[i].napi);
2089-
netif_napi_del(&eth->q_rx[i].napi);
2090-
airoha_qdma_cleanup_rx_queue(&eth->q_rx[i]);
2091-
if (eth->q_rx[i].page_pool)
2092-
page_pool_destroy(eth->q_rx[i].page_pool);
2091+
napi_disable(&qdma->q_rx[i].napi);
2092+
netif_napi_del(&qdma->q_rx[i].napi);
2093+
airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
2094+
if (qdma->q_rx[i].page_pool)
2095+
page_pool_destroy(qdma->q_rx[i].page_pool);
20932096
}
20942097

2095-
for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
2096-
napi_disable(&eth->q_tx_irq[i].napi);
2097-
netif_napi_del(&eth->q_tx_irq[i].napi);
2098+
for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
2099+
napi_disable(&qdma->q_tx_irq[i].napi);
2100+
netif_napi_del(&qdma->q_tx_irq[i].napi);
20982101
}
20992102

2100-
for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
2101-
if (!eth->q_tx[i].ndesc)
2103+
for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
2104+
if (!qdma->q_tx[i].ndesc)
21022105
continue;
21032106

2104-
airoha_qdma_cleanup_tx_queue(&eth->q_tx[i]);
2107+
airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
21052108
}
21062109
}
21072110

21082111
static void airoha_qdma_start_napi(struct airoha_eth *eth)
21092112
{
2113+
struct airoha_qdma *qdma = &eth->qdma[0];
21102114
int i;
21112115

2112-
for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++)
2113-
napi_enable(&eth->q_tx_irq[i].napi);
2116+
for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
2117+
napi_enable(&qdma->q_tx_irq[i].napi);
21142118

2115-
for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
2116-
if (!eth->q_rx[i].ndesc)
2119+
for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
2120+
if (!qdma->q_rx[i].ndesc)
21172121
continue;
21182122

2119-
napi_enable(&eth->q_rx[i].napi);
2123+
napi_enable(&qdma->q_rx[i].napi);
21202124
}
21212125
}
21222126

@@ -2391,7 +2395,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
23912395
FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
23922396

23932397
qdma = &eth->qdma[0];
2394-
q = &eth->q_tx[qid];
2398+
q = &qdma->q_tx[qid];
23952399
if (WARN_ON_ONCE(!q->ndesc))
23962400
goto error;
23972401

0 commit comments

Comments
 (0)