@@ -126,14 +126,6 @@ static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
126
126
return 0 ;
127
127
}
128
128
129
- static void lan966x_fdma_rx_advance_dcb (struct lan966x_rx * rx )
130
- {
131
- struct fdma * fdma = & rx -> fdma ;
132
-
133
- fdma -> dcb_index ++ ;
134
- fdma -> dcb_index &= fdma -> n_dcbs - 1 ;
135
- }
136
-
137
129
static void lan966x_fdma_rx_start (struct lan966x_rx * rx )
138
130
{
139
131
struct lan966x * lan966x = rx -> lan966x ;
@@ -355,8 +347,8 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
355
347
if (!dcb_buf -> used )
356
348
continue ;
357
349
358
- db = & fdma -> dcbs [ i ]. db [ 0 ] ;
359
- if (!(db -> status & FDMA_DCB_STATUS_DONE ))
350
+ db = fdma_db_get ( fdma , i , 0 ) ;
351
+ if (!fdma_db_is_done (db ))
360
352
continue ;
361
353
362
354
dcb_buf -> dev -> stats .tx_packets ++ ;
@@ -396,19 +388,6 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
396
388
spin_unlock_irqrestore (& lan966x -> tx_lock , flags );
397
389
}
398
390
399
- static bool lan966x_fdma_rx_more_frames (struct lan966x_rx * rx )
400
- {
401
- struct fdma * fdma = & rx -> fdma ;
402
- struct fdma_db * db ;
403
-
404
- /* Check if there is any data */
405
- db = & fdma -> dcbs [fdma -> dcb_index ].db [fdma -> db_index ];
406
- if (unlikely (!(db -> status & FDMA_DCB_STATUS_DONE )))
407
- return false;
408
-
409
- return true;
410
- }
411
-
412
391
static int lan966x_fdma_rx_check_frame (struct lan966x_rx * rx , u64 * src_port )
413
392
{
414
393
struct lan966x * lan966x = rx -> lan966x ;
@@ -417,7 +396,7 @@ static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
417
396
struct fdma_db * db ;
418
397
struct page * page ;
419
398
420
- db = & fdma -> dcbs [ fdma -> dcb_index ]. db [ fdma -> db_index ] ;
399
+ db = fdma_db_next_get ( fdma ) ;
421
400
page = rx -> page [fdma -> dcb_index ][fdma -> db_index ];
422
401
if (unlikely (!page ))
423
402
return FDMA_ERROR ;
@@ -450,7 +429,7 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
450
429
u64 timestamp ;
451
430
452
431
/* Get the received frame and unmap it */
453
- db = & fdma -> dcbs [ fdma -> dcb_index ]. db [ fdma -> db_index ] ;
432
+ db = fdma_db_next_get ( fdma ) ;
454
433
page = rx -> page [fdma -> dcb_index ][fdma -> db_index ];
455
434
456
435
skb = build_skb (page_address (page ), fdma -> db_size );
@@ -508,7 +487,7 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
508
487
509
488
/* Get all received skb */
510
489
while (counter < weight ) {
511
- if (!lan966x_fdma_rx_more_frames ( rx ))
490
+ if (!fdma_has_frames ( fdma ))
512
491
break ;
513
492
514
493
counter ++ ;
@@ -518,22 +497,22 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
518
497
break ;
519
498
case FDMA_ERROR :
520
499
lan966x_fdma_rx_free_page (rx );
521
- lan966x_fdma_rx_advance_dcb ( rx );
500
+ fdma_dcb_advance ( fdma );
522
501
goto allocate_new ;
523
502
case FDMA_REDIRECT :
524
503
redirect = true;
525
504
fallthrough ;
526
505
case FDMA_TX :
527
- lan966x_fdma_rx_advance_dcb ( rx );
506
+ fdma_dcb_advance ( fdma );
528
507
continue ;
529
508
case FDMA_DROP :
530
509
lan966x_fdma_rx_free_page (rx );
531
- lan966x_fdma_rx_advance_dcb ( rx );
510
+ fdma_dcb_advance ( fdma );
532
511
continue ;
533
512
}
534
513
535
514
skb = lan966x_fdma_rx_get_frame (rx , src_port );
536
- lan966x_fdma_rx_advance_dcb ( rx );
515
+ fdma_dcb_advance ( fdma );
537
516
if (!skb )
538
517
goto allocate_new ;
539
518
@@ -597,7 +576,8 @@ static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
597
576
598
577
for (i = 0 ; i < fdma -> n_dcbs ; ++ i ) {
599
578
dcb_buf = & tx -> dcbs_buf [i ];
600
- if (!dcb_buf -> used && & fdma -> dcbs [i ] != fdma -> last_dcb )
579
+ if (!dcb_buf -> used &&
580
+ !fdma_is_last (& tx -> fdma , & tx -> fdma .dcbs [i ]))
601
581
return i ;
602
582
}
603
583
0 commit comments