@@ -337,9 +337,9 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
337
337
struct am65_cpsw_rx_chn * rx_chn = & common -> rx_chns ;
338
338
struct cppi5_host_desc_t * desc_rx ;
339
339
struct device * dev = common -> dev ;
340
+ struct am65_cpsw_swdata * swdata ;
340
341
dma_addr_t desc_dma ;
341
342
dma_addr_t buf_dma ;
342
- void * swdata ;
343
343
344
344
desc_rx = k3_cppi_desc_pool_alloc (rx_chn -> desc_pool );
345
345
if (!desc_rx ) {
@@ -363,7 +363,8 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
363
363
cppi5_hdesc_attach_buf (desc_rx , buf_dma , AM65_CPSW_MAX_PACKET_SIZE ,
364
364
buf_dma , AM65_CPSW_MAX_PACKET_SIZE );
365
365
swdata = cppi5_hdesc_get_swdata (desc_rx );
366
- * ((void * * )swdata ) = page_address (page );
366
+ swdata -> page = page ;
367
+ swdata -> flow_id = flow_idx ;
367
368
368
369
return k3_udma_glue_push_rx_chn (rx_chn -> rx_chn , flow_idx ,
369
370
desc_rx , desc_dma );
@@ -519,36 +520,31 @@ static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_ch
519
520
520
521
static inline void am65_cpsw_put_page (struct am65_cpsw_rx_flow * flow ,
521
522
struct page * page ,
522
- bool allow_direct ,
523
- int desc_idx )
523
+ bool allow_direct )
524
524
{
525
525
page_pool_put_full_page (flow -> page_pool , page , allow_direct );
526
- flow -> pages [desc_idx ] = NULL ;
527
526
}
528
527
529
528
static void am65_cpsw_nuss_rx_cleanup (void * data , dma_addr_t desc_dma )
530
529
{
531
- struct am65_cpsw_rx_flow * flow = data ;
530
+ struct am65_cpsw_rx_chn * rx_chn = data ;
532
531
struct cppi5_host_desc_t * desc_rx ;
533
- struct am65_cpsw_rx_chn * rx_chn ;
532
+ struct am65_cpsw_swdata * swdata ;
534
533
dma_addr_t buf_dma ;
534
+ struct page * page ;
535
535
u32 buf_dma_len ;
536
- void * page_addr ;
537
- void * * swdata ;
538
- int desc_idx ;
536
+ u32 flow_id ;
539
537
540
- rx_chn = & flow -> common -> rx_chns ;
541
538
desc_rx = k3_cppi_desc_pool_dma2virt (rx_chn -> desc_pool , desc_dma );
542
539
swdata = cppi5_hdesc_get_swdata (desc_rx );
543
- page_addr = * swdata ;
540
+ page = swdata -> page ;
541
+ flow_id = swdata -> flow_id ;
544
542
cppi5_hdesc_get_obuf (desc_rx , & buf_dma , & buf_dma_len );
545
543
k3_udma_glue_rx_cppi5_to_dma_addr (rx_chn -> rx_chn , & buf_dma );
546
544
dma_unmap_single (rx_chn -> dma_dev , buf_dma , buf_dma_len , DMA_FROM_DEVICE );
547
545
k3_cppi_desc_pool_free (rx_chn -> desc_pool , desc_rx );
548
546
549
- desc_idx = am65_cpsw_nuss_desc_idx (rx_chn -> desc_pool , desc_rx ,
550
- rx_chn -> dsize_log2 );
551
- am65_cpsw_put_page (flow , virt_to_page (page_addr ), false, desc_idx );
547
+ am65_cpsw_put_page (& rx_chn -> flows [flow_id ], page , false);
552
548
}
553
549
554
550
static void am65_cpsw_nuss_xmit_free (struct am65_cpsw_tx_chn * tx_chn ,
@@ -703,14 +699,13 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
703
699
ret = - ENOMEM ;
704
700
goto fail_rx ;
705
701
}
706
- flow -> pages [i ] = page ;
707
702
708
703
ret = am65_cpsw_nuss_rx_push (common , page , flow_idx );
709
704
if (ret < 0 ) {
710
705
dev_err (common -> dev ,
711
706
"cannot submit page to rx channel flow %d, error %d\n" ,
712
707
flow_idx , ret );
713
- am65_cpsw_put_page (flow , page , false, i );
708
+ am65_cpsw_put_page (flow , page , false);
714
709
goto fail_rx ;
715
710
}
716
711
}
@@ -764,8 +759,8 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
764
759
765
760
fail_rx :
766
761
for (i = 0 ; i < common -> rx_ch_num_flows ; i ++ )
767
- k3_udma_glue_reset_rx_chn (rx_chn -> rx_chn , i , & rx_chn -> flows [ i ] ,
768
- am65_cpsw_nuss_rx_cleanup , 0 );
762
+ k3_udma_glue_reset_rx_chn (rx_chn -> rx_chn , i , rx_chn ,
763
+ am65_cpsw_nuss_rx_cleanup , !! i );
769
764
770
765
am65_cpsw_destroy_xdp_rxqs (common );
771
766
@@ -817,11 +812,11 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
817
812
dev_err (common -> dev , "rx teardown timeout\n" );
818
813
}
819
814
820
- for (i = 0 ; i < common -> rx_ch_num_flows ; i ++ ) {
815
+ for (i = common -> rx_ch_num_flows - 1 ; i >= 0 ; i -- ) {
821
816
napi_disable (& rx_chn -> flows [i ].napi_rx );
822
817
hrtimer_cancel (& rx_chn -> flows [i ].rx_hrtimer );
823
- k3_udma_glue_reset_rx_chn (rx_chn -> rx_chn , i , & rx_chn -> flows [ i ] ,
824
- am65_cpsw_nuss_rx_cleanup , 0 );
818
+ k3_udma_glue_reset_rx_chn (rx_chn -> rx_chn , i , rx_chn ,
819
+ am65_cpsw_nuss_rx_cleanup , !! i );
825
820
}
826
821
827
822
k3_udma_glue_disable_rx_chn (rx_chn -> rx_chn );
@@ -1028,7 +1023,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
1028
1023
static int am65_cpsw_run_xdp (struct am65_cpsw_rx_flow * flow ,
1029
1024
struct am65_cpsw_port * port ,
1030
1025
struct xdp_buff * xdp ,
1031
- int desc_idx , int cpu , int * len )
1026
+ int cpu , int * len )
1032
1027
{
1033
1028
struct am65_cpsw_common * common = flow -> common ;
1034
1029
struct am65_cpsw_ndev_priv * ndev_priv ;
@@ -1101,7 +1096,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
1101
1096
}
1102
1097
1103
1098
page = virt_to_head_page (xdp -> data );
1104
- am65_cpsw_put_page (flow , page , true, desc_idx );
1099
+ am65_cpsw_put_page (flow , page , true);
1105
1100
1106
1101
out :
1107
1102
return ret ;
@@ -1150,16 +1145,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
1150
1145
struct am65_cpsw_ndev_stats * stats ;
1151
1146
struct cppi5_host_desc_t * desc_rx ;
1152
1147
struct device * dev = common -> dev ;
1148
+ struct am65_cpsw_swdata * swdata ;
1153
1149
struct page * page , * new_page ;
1154
1150
dma_addr_t desc_dma , buf_dma ;
1155
1151
struct am65_cpsw_port * port ;
1156
- int headroom , desc_idx , ret ;
1157
1152
struct net_device * ndev ;
1158
1153
u32 flow_idx = flow -> id ;
1159
1154
struct sk_buff * skb ;
1160
1155
struct xdp_buff xdp ;
1156
+ int headroom , ret ;
1161
1157
void * page_addr ;
1162
- void * * swdata ;
1163
1158
u32 * psdata ;
1164
1159
1165
1160
* xdp_state = AM65_CPSW_XDP_PASS ;
@@ -1182,8 +1177,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
1182
1177
__func__ , flow_idx , & desc_dma );
1183
1178
1184
1179
swdata = cppi5_hdesc_get_swdata (desc_rx );
1185
- page_addr = * swdata ;
1186
- page = virt_to_page ( page_addr );
1180
+ page = swdata -> page ;
1181
+ page_addr = page_address ( page );
1187
1182
cppi5_hdesc_get_obuf (desc_rx , & buf_dma , & buf_dma_len );
1188
1183
k3_udma_glue_rx_cppi5_to_dma_addr (rx_chn -> rx_chn , & buf_dma );
1189
1184
pkt_len = cppi5_hdesc_get_pktlen (desc_rx );
@@ -1199,9 +1194,6 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
1199
1194
1200
1195
k3_cppi_desc_pool_free (rx_chn -> desc_pool , desc_rx );
1201
1196
1202
- desc_idx = am65_cpsw_nuss_desc_idx (rx_chn -> desc_pool , desc_rx ,
1203
- rx_chn -> dsize_log2 );
1204
-
1205
1197
skb = am65_cpsw_build_skb (page_addr , ndev ,
1206
1198
AM65_CPSW_MAX_PACKET_SIZE );
1207
1199
if (unlikely (!skb )) {
@@ -1213,7 +1205,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
1213
1205
xdp_init_buff (& xdp , PAGE_SIZE , & port -> xdp_rxq [flow -> id ]);
1214
1206
xdp_prepare_buff (& xdp , page_addr , AM65_CPSW_HEADROOM ,
1215
1207
pkt_len , false);
1216
- * xdp_state = am65_cpsw_run_xdp (flow , port , & xdp , desc_idx ,
1208
+ * xdp_state = am65_cpsw_run_xdp (flow , port , & xdp ,
1217
1209
cpu , & pkt_len );
1218
1210
if (* xdp_state != AM65_CPSW_XDP_PASS )
1219
1211
goto allocate ;
@@ -1247,18 +1239,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
1247
1239
return - ENOMEM ;
1248
1240
}
1249
1241
1250
- flow -> pages [desc_idx ] = new_page ;
1251
-
1252
1242
if (netif_dormant (ndev )) {
1253
- am65_cpsw_put_page (flow , new_page , true, desc_idx );
1243
+ am65_cpsw_put_page (flow , new_page , true);
1254
1244
ndev -> stats .rx_dropped ++ ;
1255
1245
return 0 ;
1256
1246
}
1257
1247
1258
1248
requeue :
1259
1249
ret = am65_cpsw_nuss_rx_push (common , new_page , flow_idx );
1260
1250
if (WARN_ON (ret < 0 )) {
1261
- am65_cpsw_put_page (flow , new_page , true, desc_idx );
1251
+ am65_cpsw_put_page (flow , new_page , true);
1262
1252
ndev -> stats .rx_errors ++ ;
1263
1253
ndev -> stats .rx_dropped ++ ;
1264
1254
}
@@ -2402,10 +2392,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
2402
2392
for (i = 0 ; i < common -> rx_ch_num_flows ; i ++ ) {
2403
2393
flow = & rx_chn -> flows [i ];
2404
2394
flow -> page_pool = NULL ;
2405
- flow -> pages = devm_kcalloc (dev , AM65_CPSW_MAX_RX_DESC ,
2406
- sizeof (* flow -> pages ), GFP_KERNEL );
2407
- if (!flow -> pages )
2408
- return - ENOMEM ;
2409
2395
}
2410
2396
2411
2397
rx_chn -> rx_chn = k3_udma_glue_request_rx_chn (dev , "rx" , & rx_cfg );
@@ -2455,10 +2441,12 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
2455
2441
flow = & rx_chn -> flows [i ];
2456
2442
flow -> id = i ;
2457
2443
flow -> common = common ;
2444
+ flow -> irq = - EINVAL ;
2458
2445
2459
2446
rx_flow_cfg .ring_rxfdq0_id = fdqring_id ;
2460
2447
rx_flow_cfg .rx_cfg .size = max_desc_num ;
2461
- rx_flow_cfg .rxfdq_cfg .size = max_desc_num ;
2448
+ /* share same FDQ for all flows */
2449
+ rx_flow_cfg .rxfdq_cfg .size = max_desc_num * rx_cfg .flow_id_num ;
2462
2450
rx_flow_cfg .rxfdq_cfg .mode = common -> pdata .fdqring_mode ;
2463
2451
2464
2452
ret = k3_udma_glue_rx_flow_init (rx_chn -> rx_chn ,
@@ -2496,6 +2484,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
2496
2484
if (ret ) {
2497
2485
dev_err (dev , "failure requesting rx %d irq %u, %d\n" ,
2498
2486
i , flow -> irq , ret );
2487
+ flow -> irq = - EINVAL ;
2499
2488
goto err ;
2500
2489
}
2501
2490
}
@@ -3349,8 +3338,8 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
3349
3338
3350
3339
for (i = 0 ; i < common -> rx_ch_num_flows ; i ++ )
3351
3340
k3_udma_glue_reset_rx_chn (rx_chan -> rx_chn , i ,
3352
- & rx_chan -> flows [ i ] ,
3353
- am65_cpsw_nuss_rx_cleanup , 0 );
3341
+ rx_chan ,
3342
+ am65_cpsw_nuss_rx_cleanup , !! i );
3354
3343
3355
3344
k3_udma_glue_disable_rx_chn (rx_chan -> rx_chn );
3356
3345
0 commit comments