Skip to content

Commit 9eaff63

Browse files
author
Paolo Abeni
committed
Merge branch 'net-ethernet-ti-am65-cpsw-fixes-to-multi-queue-rx-feature'
Roger Quadros says: ==================== net: ethernet: ti: am65-cpsw: Fixes to multi queue RX feature On J7 platforms, setting up multiple RX flows was failing as the RX free descriptor ring 0 is shared among all flows and we did not allocate enough elements in the RX free descriptor ring 0 to accommodate for all RX flows. Patch 1 fixes this. The second patch fixes a warning if there was any error in am65_cpsw_nuss_init_rx_chns() and am65_cpsw_nuss_cleanup_rx_chns() was called after that. Signed-off-by: Roger Quadros <[email protected]> ==================== Link: https://patch.msgid.link/[email protected] Signed-off-by: Paolo Abeni <[email protected]>
2 parents df3dff8 + ba3b7ac commit 9eaff63

File tree

2 files changed

+37
-44
lines changed

2 files changed

+37
-44
lines changed

drivers/net/ethernet/ti/am65-cpsw-nuss.c

Lines changed: 32 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -337,9 +337,9 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
337337
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
338338
struct cppi5_host_desc_t *desc_rx;
339339
struct device *dev = common->dev;
340+
struct am65_cpsw_swdata *swdata;
340341
dma_addr_t desc_dma;
341342
dma_addr_t buf_dma;
342-
void *swdata;
343343

344344
desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
345345
if (!desc_rx) {
@@ -363,7 +363,8 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
363363
cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE,
364364
buf_dma, AM65_CPSW_MAX_PACKET_SIZE);
365365
swdata = cppi5_hdesc_get_swdata(desc_rx);
366-
*((void **)swdata) = page_address(page);
366+
swdata->page = page;
367+
swdata->flow_id = flow_idx;
367368

368369
return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, flow_idx,
369370
desc_rx, desc_dma);
@@ -519,36 +520,31 @@ static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_ch
519520

520521
static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
521522
struct page *page,
522-
bool allow_direct,
523-
int desc_idx)
523+
bool allow_direct)
524524
{
525525
page_pool_put_full_page(flow->page_pool, page, allow_direct);
526-
flow->pages[desc_idx] = NULL;
527526
}
528527

529528
static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
530529
{
531-
struct am65_cpsw_rx_flow *flow = data;
530+
struct am65_cpsw_rx_chn *rx_chn = data;
532531
struct cppi5_host_desc_t *desc_rx;
533-
struct am65_cpsw_rx_chn *rx_chn;
532+
struct am65_cpsw_swdata *swdata;
534533
dma_addr_t buf_dma;
534+
struct page *page;
535535
u32 buf_dma_len;
536-
void *page_addr;
537-
void **swdata;
538-
int desc_idx;
536+
u32 flow_id;
539537

540-
rx_chn = &flow->common->rx_chns;
541538
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
542539
swdata = cppi5_hdesc_get_swdata(desc_rx);
543-
page_addr = *swdata;
540+
page = swdata->page;
541+
flow_id = swdata->flow_id;
544542
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
545543
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
546544
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
547545
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
548546

549-
desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
550-
rx_chn->dsize_log2);
551-
am65_cpsw_put_page(flow, virt_to_page(page_addr), false, desc_idx);
547+
am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false);
552548
}
553549

554550
static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
@@ -703,14 +699,13 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
703699
ret = -ENOMEM;
704700
goto fail_rx;
705701
}
706-
flow->pages[i] = page;
707702

708703
ret = am65_cpsw_nuss_rx_push(common, page, flow_idx);
709704
if (ret < 0) {
710705
dev_err(common->dev,
711706
"cannot submit page to rx channel flow %d, error %d\n",
712707
flow_idx, ret);
713-
am65_cpsw_put_page(flow, page, false, i);
708+
am65_cpsw_put_page(flow, page, false);
714709
goto fail_rx;
715710
}
716711
}
@@ -764,8 +759,8 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
764759

765760
fail_rx:
766761
for (i = 0; i < common->rx_ch_num_flows; i++)
767-
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
768-
am65_cpsw_nuss_rx_cleanup, 0);
762+
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
763+
am65_cpsw_nuss_rx_cleanup, !!i);
769764

770765
am65_cpsw_destroy_xdp_rxqs(common);
771766

@@ -817,11 +812,11 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
817812
dev_err(common->dev, "rx teardown timeout\n");
818813
}
819814

820-
for (i = 0; i < common->rx_ch_num_flows; i++) {
815+
for (i = common->rx_ch_num_flows - 1; i >= 0; i--) {
821816
napi_disable(&rx_chn->flows[i].napi_rx);
822817
hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer);
823-
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
824-
am65_cpsw_nuss_rx_cleanup, 0);
818+
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
819+
am65_cpsw_nuss_rx_cleanup, !!i);
825820
}
826821

827822
k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
@@ -1028,7 +1023,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
10281023
static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
10291024
struct am65_cpsw_port *port,
10301025
struct xdp_buff *xdp,
1031-
int desc_idx, int cpu, int *len)
1026+
int cpu, int *len)
10321027
{
10331028
struct am65_cpsw_common *common = flow->common;
10341029
struct am65_cpsw_ndev_priv *ndev_priv;
@@ -1101,7 +1096,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
11011096
}
11021097

11031098
page = virt_to_head_page(xdp->data);
1104-
am65_cpsw_put_page(flow, page, true, desc_idx);
1099+
am65_cpsw_put_page(flow, page, true);
11051100

11061101
out:
11071102
return ret;
@@ -1150,16 +1145,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
11501145
struct am65_cpsw_ndev_stats *stats;
11511146
struct cppi5_host_desc_t *desc_rx;
11521147
struct device *dev = common->dev;
1148+
struct am65_cpsw_swdata *swdata;
11531149
struct page *page, *new_page;
11541150
dma_addr_t desc_dma, buf_dma;
11551151
struct am65_cpsw_port *port;
1156-
int headroom, desc_idx, ret;
11571152
struct net_device *ndev;
11581153
u32 flow_idx = flow->id;
11591154
struct sk_buff *skb;
11601155
struct xdp_buff xdp;
1156+
int headroom, ret;
11611157
void *page_addr;
1162-
void **swdata;
11631158
u32 *psdata;
11641159

11651160
*xdp_state = AM65_CPSW_XDP_PASS;
@@ -1182,8 +1177,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
11821177
__func__, flow_idx, &desc_dma);
11831178

11841179
swdata = cppi5_hdesc_get_swdata(desc_rx);
1185-
page_addr = *swdata;
1186-
page = virt_to_page(page_addr);
1180+
page = swdata->page;
1181+
page_addr = page_address(page);
11871182
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
11881183
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
11891184
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
@@ -1199,9 +1194,6 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
11991194

12001195
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
12011196

1202-
desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
1203-
rx_chn->dsize_log2);
1204-
12051197
skb = am65_cpsw_build_skb(page_addr, ndev,
12061198
AM65_CPSW_MAX_PACKET_SIZE);
12071199
if (unlikely(!skb)) {
@@ -1213,7 +1205,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
12131205
xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
12141206
xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
12151207
pkt_len, false);
1216-
*xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, desc_idx,
1208+
*xdp_state = am65_cpsw_run_xdp(flow, port, &xdp,
12171209
cpu, &pkt_len);
12181210
if (*xdp_state != AM65_CPSW_XDP_PASS)
12191211
goto allocate;
@@ -1247,18 +1239,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
12471239
return -ENOMEM;
12481240
}
12491241

1250-
flow->pages[desc_idx] = new_page;
1251-
12521242
if (netif_dormant(ndev)) {
1253-
am65_cpsw_put_page(flow, new_page, true, desc_idx);
1243+
am65_cpsw_put_page(flow, new_page, true);
12541244
ndev->stats.rx_dropped++;
12551245
return 0;
12561246
}
12571247

12581248
requeue:
12591249
ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx);
12601250
if (WARN_ON(ret < 0)) {
1261-
am65_cpsw_put_page(flow, new_page, true, desc_idx);
1251+
am65_cpsw_put_page(flow, new_page, true);
12621252
ndev->stats.rx_errors++;
12631253
ndev->stats.rx_dropped++;
12641254
}
@@ -2402,10 +2392,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
24022392
for (i = 0; i < common->rx_ch_num_flows; i++) {
24032393
flow = &rx_chn->flows[i];
24042394
flow->page_pool = NULL;
2405-
flow->pages = devm_kcalloc(dev, AM65_CPSW_MAX_RX_DESC,
2406-
sizeof(*flow->pages), GFP_KERNEL);
2407-
if (!flow->pages)
2408-
return -ENOMEM;
24092395
}
24102396

24112397
rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
@@ -2455,10 +2441,12 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
24552441
flow = &rx_chn->flows[i];
24562442
flow->id = i;
24572443
flow->common = common;
2444+
flow->irq = -EINVAL;
24582445

24592446
rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
24602447
rx_flow_cfg.rx_cfg.size = max_desc_num;
2461-
rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
2448+
/* share same FDQ for all flows */
2449+
rx_flow_cfg.rxfdq_cfg.size = max_desc_num * rx_cfg.flow_id_num;
24622450
rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode;
24632451

24642452
ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
@@ -2496,6 +2484,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
24962484
if (ret) {
24972485
dev_err(dev, "failure requesting rx %d irq %u, %d\n",
24982486
i, flow->irq, ret);
2487+
flow->irq = -EINVAL;
24992488
goto err;
25002489
}
25012490
}
@@ -3349,8 +3338,8 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
33493338

33503339
for (i = 0; i < common->rx_ch_num_flows; i++)
33513340
k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i,
3352-
&rx_chan->flows[i],
3353-
am65_cpsw_nuss_rx_cleanup, 0);
3341+
rx_chan,
3342+
am65_cpsw_nuss_rx_cleanup, !!i);
33543343

33553344
k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
33563345

drivers/net/ethernet/ti/am65-cpsw-nuss.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,10 +101,14 @@ struct am65_cpsw_rx_flow {
101101
struct hrtimer rx_hrtimer;
102102
unsigned long rx_pace_timeout;
103103
struct page_pool *page_pool;
104-
struct page **pages;
105104
char name[32];
106105
};
107106

107+
struct am65_cpsw_swdata {
108+
u32 flow_id;
109+
struct page *page;
110+
};
111+
108112
struct am65_cpsw_rx_chn {
109113
struct device *dev;
110114
struct device *dma_dev;

0 commit comments

Comments
 (0)