2
2
/* Copyright (C) 2023 Intel Corporation */
3
3
4
4
#include <net/libeth/rx.h>
5
+ #include <net/libeth/tx.h>
5
6
6
7
#include "idpf.h"
7
8
@@ -224,6 +225,7 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
224
225
/* record length, and DMA address */
225
226
dma_unmap_len_set (tx_buf , len , size );
226
227
dma_unmap_addr_set (tx_buf , dma , dma );
228
+ tx_buf -> type = LIBETH_SQE_FRAG ;
227
229
228
230
/* align size to end of page */
229
231
max_data += - dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1 );
@@ -245,6 +247,8 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
245
247
i = 0 ;
246
248
}
247
249
250
+ tx_q -> tx_buf [i ].type = LIBETH_SQE_EMPTY ;
251
+
248
252
dma += max_data ;
249
253
size -= max_data ;
250
254
@@ -282,13 +286,13 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
282
286
tx_desc -> qw1 = idpf_tx_singleq_build_ctob (td_cmd , offsets ,
283
287
size , td_tag );
284
288
285
- IDPF_SINGLEQ_BUMP_RING_IDX (tx_q , i );
289
+ first -> type = LIBETH_SQE_SKB ;
290
+ first -> rs_idx = i ;
286
291
287
- /* set next_to_watch value indicating a packet is present */
288
- first -> next_to_watch = tx_desc ;
292
+ IDPF_SINGLEQ_BUMP_RING_IDX (tx_q , i );
289
293
290
294
nq = netdev_get_tx_queue (tx_q -> netdev , tx_q -> idx );
291
- netdev_tx_sent_queue (nq , first -> bytecount );
295
+ netdev_tx_sent_queue (nq , first -> bytes );
292
296
293
297
idpf_tx_buf_hw_update (tx_q , i , netdev_xmit_more ());
294
298
}
@@ -306,8 +310,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_tx_queue *txq)
306
310
struct idpf_base_tx_ctx_desc * ctx_desc ;
307
311
int ntu = txq -> next_to_use ;
308
312
309
- memset (& txq -> tx_buf [ntu ], 0 , sizeof (struct idpf_tx_buf ));
310
- txq -> tx_buf [ntu ].ctx_entry = true;
313
+ txq -> tx_buf [ntu ].type = LIBETH_SQE_CTX ;
311
314
312
315
ctx_desc = & txq -> base_ctx [ntu ];
313
316
@@ -396,11 +399,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
396
399
first -> skb = skb ;
397
400
398
401
if (tso ) {
399
- first -> gso_segs = offload .tso_segs ;
400
- first -> bytecount = skb -> len + ((first -> gso_segs - 1 ) * offload .tso_hdr_len );
402
+ first -> packets = offload .tso_segs ;
403
+ first -> bytes = skb -> len + ((first -> packets - 1 ) * offload .tso_hdr_len );
401
404
} else {
402
- first -> bytecount = max_t (unsigned int , skb -> len , ETH_ZLEN );
403
- first -> gso_segs = 1 ;
405
+ first -> bytes = max_t (unsigned int , skb -> len , ETH_ZLEN );
406
+ first -> packets = 1 ;
404
407
}
405
408
idpf_tx_singleq_map (tx_q , first , & offload );
406
409
@@ -420,10 +423,15 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
420
423
static bool idpf_tx_singleq_clean (struct idpf_tx_queue * tx_q , int napi_budget ,
421
424
int * cleaned )
422
425
{
423
- unsigned int total_bytes = 0 , total_pkts = 0 ;
426
+ struct libeth_sq_napi_stats ss = { } ;
424
427
struct idpf_base_tx_desc * tx_desc ;
425
428
u32 budget = tx_q -> clean_budget ;
426
429
s16 ntc = tx_q -> next_to_clean ;
430
+ struct libeth_cq_pp cp = {
431
+ .dev = tx_q -> dev ,
432
+ .ss = & ss ,
433
+ .napi = napi_budget ,
434
+ };
427
435
struct idpf_netdev_priv * np ;
428
436
struct idpf_tx_buf * tx_buf ;
429
437
struct netdev_queue * nq ;
@@ -441,47 +449,23 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
441
449
* such. We can skip this descriptor since there is no buffer
442
450
* to clean.
443
451
*/
444
- if (tx_buf -> ctx_entry ) {
445
- /* Clear this flag here to avoid stale flag values when
446
- * this buffer is used for actual data in the future.
447
- * There are cases where the tx_buf struct / the flags
448
- * field will not be cleared before being reused.
449
- */
450
- tx_buf -> ctx_entry = false;
452
+ if (unlikely (tx_buf -> type <= LIBETH_SQE_CTX )) {
453
+ tx_buf -> type = LIBETH_SQE_EMPTY ;
451
454
goto fetch_next_txq_desc ;
452
455
}
453
456
454
- /* if next_to_watch is not set then no work pending */
455
- eop_desc = (struct idpf_base_tx_desc * )tx_buf -> next_to_watch ;
456
- if (!eop_desc )
457
- break ;
458
-
459
- /* prevent any other reads prior to eop_desc */
457
+ /* prevent any other reads prior to type */
460
458
smp_rmb ();
461
459
460
+ eop_desc = & tx_q -> base_tx [tx_buf -> rs_idx ];
461
+
462
462
/* if the descriptor isn't done, no work yet to do */
463
463
if (!(eop_desc -> qw1 &
464
464
cpu_to_le64 (IDPF_TX_DESC_DTYPE_DESC_DONE )))
465
465
break ;
466
466
467
- /* clear next_to_watch to prevent false hangs */
468
- tx_buf -> next_to_watch = NULL ;
469
-
470
467
/* update the statistics for this packet */
471
- total_bytes += tx_buf -> bytecount ;
472
- total_pkts += tx_buf -> gso_segs ;
473
-
474
- napi_consume_skb (tx_buf -> skb , napi_budget );
475
-
476
- /* unmap skb header data */
477
- dma_unmap_single (tx_q -> dev ,
478
- dma_unmap_addr (tx_buf , dma ),
479
- dma_unmap_len (tx_buf , len ),
480
- DMA_TO_DEVICE );
481
-
482
- /* clear tx_buf data */
483
- tx_buf -> skb = NULL ;
484
- dma_unmap_len_set (tx_buf , len , 0 );
468
+ libeth_tx_complete (tx_buf , & cp );
485
469
486
470
/* unmap remaining buffers */
487
471
while (tx_desc != eop_desc ) {
@@ -495,13 +479,7 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
495
479
}
496
480
497
481
/* unmap any remaining paged data */
498
- if (dma_unmap_len (tx_buf , len )) {
499
- dma_unmap_page (tx_q -> dev ,
500
- dma_unmap_addr (tx_buf , dma ),
501
- dma_unmap_len (tx_buf , len ),
502
- DMA_TO_DEVICE );
503
- dma_unmap_len_set (tx_buf , len , 0 );
504
- }
482
+ libeth_tx_complete (tx_buf , & cp );
505
483
}
506
484
507
485
/* update budget only if we did something */
@@ -521,19 +499,19 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
521
499
ntc += tx_q -> desc_count ;
522
500
tx_q -> next_to_clean = ntc ;
523
501
524
- * cleaned += total_pkts ;
502
+ * cleaned += ss . packets ;
525
503
526
504
u64_stats_update_begin (& tx_q -> stats_sync );
527
- u64_stats_add (& tx_q -> q_stats .packets , total_pkts );
528
- u64_stats_add (& tx_q -> q_stats .bytes , total_bytes );
505
+ u64_stats_add (& tx_q -> q_stats .packets , ss . packets );
506
+ u64_stats_add (& tx_q -> q_stats .bytes , ss . bytes );
529
507
u64_stats_update_end (& tx_q -> stats_sync );
530
508
531
509
np = netdev_priv (tx_q -> netdev );
532
510
nq = netdev_get_tx_queue (tx_q -> netdev , tx_q -> idx );
533
511
534
512
dont_wake = np -> state != __IDPF_VPORT_UP ||
535
513
!netif_carrier_ok (tx_q -> netdev );
536
- __netif_txq_completed_wake (nq , total_pkts , total_bytes ,
514
+ __netif_txq_completed_wake (nq , ss . packets , ss . bytes ,
537
515
IDPF_DESC_UNUSED (tx_q ), IDPF_TX_WAKE_THRESH ,
538
516
dont_wake );
539
517
0 commit comments