2
2
/* Copyright (C) 2023 Intel Corporation */
3
3
4
4
#include <net/libeth/rx.h>
5
+ #include <net/libeth/tx.h>
5
6
6
7
#include "idpf.h"
7
8
@@ -224,6 +225,7 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
224
225
/* record length, and DMA address */
225
226
dma_unmap_len_set (tx_buf , len , size );
226
227
dma_unmap_addr_set (tx_buf , dma , dma );
228
+ tx_buf -> type = LIBETH_SQE_FRAG ;
227
229
228
230
/* align size to end of page */
229
231
max_data += - dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1 );
@@ -237,14 +239,17 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
237
239
offsets ,
238
240
max_data ,
239
241
td_tag );
240
- tx_desc ++ ;
241
- i ++ ;
242
-
243
- if (i == tx_q -> desc_count ) {
242
+ if (unlikely (++ i == tx_q -> desc_count )) {
243
+ tx_buf = & tx_q -> tx_buf [0 ];
244
244
tx_desc = & tx_q -> base_tx [0 ];
245
245
i = 0 ;
246
+ } else {
247
+ tx_buf ++ ;
248
+ tx_desc ++ ;
246
249
}
247
250
251
+ tx_buf -> type = LIBETH_SQE_EMPTY ;
252
+
248
253
dma += max_data ;
249
254
size -= max_data ;
250
255
@@ -257,21 +262,21 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
257
262
258
263
tx_desc -> qw1 = idpf_tx_singleq_build_ctob (td_cmd , offsets ,
259
264
size , td_tag );
260
- tx_desc ++ ;
261
- i ++ ;
262
265
263
- if (i == tx_q -> desc_count ) {
266
+ if (unlikely (++ i == tx_q -> desc_count )) {
267
+ tx_buf = & tx_q -> tx_buf [0 ];
264
268
tx_desc = & tx_q -> base_tx [0 ];
265
269
i = 0 ;
270
+ } else {
271
+ tx_buf ++ ;
272
+ tx_desc ++ ;
266
273
}
267
274
268
275
size = skb_frag_size (frag );
269
276
data_len -= size ;
270
277
271
278
dma = skb_frag_dma_map (tx_q -> dev , frag , 0 , size ,
272
279
DMA_TO_DEVICE );
273
-
274
- tx_buf = & tx_q -> tx_buf [i ];
275
280
}
276
281
277
282
skb_tx_timestamp (first -> skb );
@@ -282,13 +287,13 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
282
287
tx_desc -> qw1 = idpf_tx_singleq_build_ctob (td_cmd , offsets ,
283
288
size , td_tag );
284
289
285
- IDPF_SINGLEQ_BUMP_RING_IDX (tx_q , i );
290
+ first -> type = LIBETH_SQE_SKB ;
291
+ first -> rs_idx = i ;
286
292
287
- /* set next_to_watch value indicating a packet is present */
288
- first -> next_to_watch = tx_desc ;
293
+ IDPF_SINGLEQ_BUMP_RING_IDX (tx_q , i );
289
294
290
295
nq = netdev_get_tx_queue (tx_q -> netdev , tx_q -> idx );
291
- netdev_tx_sent_queue (nq , first -> bytecount );
296
+ netdev_tx_sent_queue (nq , first -> bytes );
292
297
293
298
idpf_tx_buf_hw_update (tx_q , i , netdev_xmit_more ());
294
299
}
@@ -306,8 +311,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_tx_queue *txq)
306
311
struct idpf_base_tx_ctx_desc * ctx_desc ;
307
312
int ntu = txq -> next_to_use ;
308
313
309
- memset (& txq -> tx_buf [ntu ], 0 , sizeof (struct idpf_tx_buf ));
310
- txq -> tx_buf [ntu ].ctx_entry = true;
314
+ txq -> tx_buf [ntu ].type = LIBETH_SQE_CTX ;
311
315
312
316
ctx_desc = & txq -> base_ctx [ntu ];
313
317
@@ -371,6 +375,10 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
371
375
IDPF_TX_DESCS_FOR_CTX )) {
372
376
idpf_tx_buf_hw_update (tx_q , tx_q -> next_to_use , false);
373
377
378
+ u64_stats_update_begin (& tx_q -> stats_sync );
379
+ u64_stats_inc (& tx_q -> q_stats .q_busy );
380
+ u64_stats_update_end (& tx_q -> stats_sync );
381
+
374
382
return NETDEV_TX_BUSY ;
375
383
}
376
384
@@ -396,11 +404,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
396
404
first -> skb = skb ;
397
405
398
406
if (tso ) {
399
- first -> gso_segs = offload .tso_segs ;
400
- first -> bytecount = skb -> len + ((first -> gso_segs - 1 ) * offload .tso_hdr_len );
407
+ first -> packets = offload .tso_segs ;
408
+ first -> bytes = skb -> len + ((first -> packets - 1 ) * offload .tso_hdr_len );
401
409
} else {
402
- first -> bytecount = max_t (unsigned int , skb -> len , ETH_ZLEN );
403
- first -> gso_segs = 1 ;
410
+ first -> bytes = max_t (unsigned int , skb -> len , ETH_ZLEN );
411
+ first -> packets = 1 ;
404
412
}
405
413
idpf_tx_singleq_map (tx_q , first , & offload );
406
414
@@ -420,10 +428,15 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
420
428
static bool idpf_tx_singleq_clean (struct idpf_tx_queue * tx_q , int napi_budget ,
421
429
int * cleaned )
422
430
{
423
- unsigned int total_bytes = 0 , total_pkts = 0 ;
431
+ struct libeth_sq_napi_stats ss = { } ;
424
432
struct idpf_base_tx_desc * tx_desc ;
425
433
u32 budget = tx_q -> clean_budget ;
426
434
s16 ntc = tx_q -> next_to_clean ;
435
+ struct libeth_cq_pp cp = {
436
+ .dev = tx_q -> dev ,
437
+ .ss = & ss ,
438
+ .napi = napi_budget ,
439
+ };
427
440
struct idpf_netdev_priv * np ;
428
441
struct idpf_tx_buf * tx_buf ;
429
442
struct netdev_queue * nq ;
@@ -441,47 +454,26 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
441
454
* such. We can skip this descriptor since there is no buffer
442
455
* to clean.
443
456
*/
444
- if (tx_buf -> ctx_entry ) {
445
- /* Clear this flag here to avoid stale flag values when
446
- * this buffer is used for actual data in the future.
447
- * There are cases where the tx_buf struct / the flags
448
- * field will not be cleared before being reused.
449
- */
450
- tx_buf -> ctx_entry = false;
457
+ if (unlikely (tx_buf -> type <= LIBETH_SQE_CTX )) {
458
+ tx_buf -> type = LIBETH_SQE_EMPTY ;
451
459
goto fetch_next_txq_desc ;
452
460
}
453
461
454
- /* if next_to_watch is not set then no work pending */
455
- eop_desc = (struct idpf_base_tx_desc * )tx_buf -> next_to_watch ;
456
- if (!eop_desc )
462
+ if (unlikely (tx_buf -> type != LIBETH_SQE_SKB ))
457
463
break ;
458
464
459
- /* prevent any other reads prior to eop_desc */
465
+ /* prevent any other reads prior to type */
460
466
smp_rmb ();
461
467
468
+ eop_desc = & tx_q -> base_tx [tx_buf -> rs_idx ];
469
+
462
470
/* if the descriptor isn't done, no work yet to do */
463
471
if (!(eop_desc -> qw1 &
464
472
cpu_to_le64 (IDPF_TX_DESC_DTYPE_DESC_DONE )))
465
473
break ;
466
474
467
- /* clear next_to_watch to prevent false hangs */
468
- tx_buf -> next_to_watch = NULL ;
469
-
470
475
/* update the statistics for this packet */
471
- total_bytes += tx_buf -> bytecount ;
472
- total_pkts += tx_buf -> gso_segs ;
473
-
474
- napi_consume_skb (tx_buf -> skb , napi_budget );
475
-
476
- /* unmap skb header data */
477
- dma_unmap_single (tx_q -> dev ,
478
- dma_unmap_addr (tx_buf , dma ),
479
- dma_unmap_len (tx_buf , len ),
480
- DMA_TO_DEVICE );
481
-
482
- /* clear tx_buf data */
483
- tx_buf -> skb = NULL ;
484
- dma_unmap_len_set (tx_buf , len , 0 );
476
+ libeth_tx_complete (tx_buf , & cp );
485
477
486
478
/* unmap remaining buffers */
487
479
while (tx_desc != eop_desc ) {
@@ -495,13 +487,7 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
495
487
}
496
488
497
489
/* unmap any remaining paged data */
498
- if (dma_unmap_len (tx_buf , len )) {
499
- dma_unmap_page (tx_q -> dev ,
500
- dma_unmap_addr (tx_buf , dma ),
501
- dma_unmap_len (tx_buf , len ),
502
- DMA_TO_DEVICE );
503
- dma_unmap_len_set (tx_buf , len , 0 );
504
- }
490
+ libeth_tx_complete (tx_buf , & cp );
505
491
}
506
492
507
493
/* update budget only if we did something */
@@ -521,19 +507,19 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
521
507
ntc += tx_q -> desc_count ;
522
508
tx_q -> next_to_clean = ntc ;
523
509
524
- * cleaned += total_pkts ;
510
+ * cleaned += ss . packets ;
525
511
526
512
u64_stats_update_begin (& tx_q -> stats_sync );
527
- u64_stats_add (& tx_q -> q_stats .packets , total_pkts );
528
- u64_stats_add (& tx_q -> q_stats .bytes , total_bytes );
513
+ u64_stats_add (& tx_q -> q_stats .packets , ss . packets );
514
+ u64_stats_add (& tx_q -> q_stats .bytes , ss . bytes );
529
515
u64_stats_update_end (& tx_q -> stats_sync );
530
516
531
517
np = netdev_priv (tx_q -> netdev );
532
518
nq = netdev_get_tx_queue (tx_q -> netdev , tx_q -> idx );
533
519
534
520
dont_wake = np -> state != __IDPF_VPORT_UP ||
535
521
!netif_carrier_ok (tx_q -> netdev );
536
- __netif_txq_completed_wake (nq , total_pkts , total_bytes ,
522
+ __netif_txq_completed_wake (nq , ss . packets , ss . bytes ,
537
523
IDPF_DESC_UNUSED (tx_q ), IDPF_TX_WAKE_THRESH ,
538
524
dont_wake );
539
525
@@ -1134,8 +1120,10 @@ int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget)
1134
1120
& work_done );
1135
1121
1136
1122
/* If work not completed, return budget and polling will return */
1137
- if (!clean_complete )
1123
+ if (!clean_complete ) {
1124
+ idpf_vport_intr_set_wb_on_itr (q_vector );
1138
1125
return budget ;
1126
+ }
1139
1127
1140
1128
work_done = min_t (int , work_done , budget - 1 );
1141
1129
@@ -1144,6 +1132,8 @@ int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget)
1144
1132
*/
1145
1133
if (likely (napi_complete_done (napi , work_done )))
1146
1134
idpf_vport_intr_update_itr_ena_irq (q_vector );
1135
+ else
1136
+ idpf_vport_intr_set_wb_on_itr (q_vector );
1147
1137
1148
1138
return work_done ;
1149
1139
}
0 commit comments