Skip to content

Commit d9028db

Browse files
alobakinanguy11
authored andcommitted
idpf: convert to libeth Tx buffer completion
&idpf_tx_buffer is almost identical to the previous generations, as well as the way it's handled. Moreover, relying on dma_unmap_addr() and !!buf->skb instead of explicit defining of buffer's type was never good. Use the newly added libeth helpers to do it properly and reduce the copy-paste around the Tx code. Reviewed-by: Przemek Kitszel <[email protected]> Signed-off-by: Alexander Lobakin <[email protected]> Signed-off-by: Tony Nguyen <[email protected]>
1 parent 080d72f commit d9028db

File tree

3 files changed

+105
-232
lines changed

3 files changed

+105
-232
lines changed

drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c

Lines changed: 30 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
/* Copyright (C) 2023 Intel Corporation */
33

44
#include <net/libeth/rx.h>
5+
#include <net/libeth/tx.h>
56

67
#include "idpf.h"
78

@@ -224,6 +225,7 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
224225
/* record length, and DMA address */
225226
dma_unmap_len_set(tx_buf, len, size);
226227
dma_unmap_addr_set(tx_buf, dma, dma);
228+
tx_buf->type = LIBETH_SQE_FRAG;
227229

228230
/* align size to end of page */
229231
max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
@@ -245,6 +247,8 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
245247
i = 0;
246248
}
247249

250+
tx_q->tx_buf[i].type = LIBETH_SQE_EMPTY;
251+
248252
dma += max_data;
249253
size -= max_data;
250254

@@ -282,13 +286,13 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
282286
tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets,
283287
size, td_tag);
284288

285-
IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);
289+
first->type = LIBETH_SQE_SKB;
290+
first->rs_idx = i;
286291

287-
/* set next_to_watch value indicating a packet is present */
288-
first->next_to_watch = tx_desc;
292+
IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);
289293

290294
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
291-
netdev_tx_sent_queue(nq, first->bytecount);
295+
netdev_tx_sent_queue(nq, first->bytes);
292296

293297
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
294298
}
@@ -306,8 +310,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_tx_queue *txq)
306310
struct idpf_base_tx_ctx_desc *ctx_desc;
307311
int ntu = txq->next_to_use;
308312

309-
memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf));
310-
txq->tx_buf[ntu].ctx_entry = true;
313+
txq->tx_buf[ntu].type = LIBETH_SQE_CTX;
311314

312315
ctx_desc = &txq->base_ctx[ntu];
313316

@@ -396,11 +399,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
396399
first->skb = skb;
397400

398401
if (tso) {
399-
first->gso_segs = offload.tso_segs;
400-
first->bytecount = skb->len + ((first->gso_segs - 1) * offload.tso_hdr_len);
402+
first->packets = offload.tso_segs;
403+
first->bytes = skb->len + ((first->packets - 1) * offload.tso_hdr_len);
401404
} else {
402-
first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
403-
first->gso_segs = 1;
405+
first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
406+
first->packets = 1;
404407
}
405408
idpf_tx_singleq_map(tx_q, first, &offload);
406409

@@ -420,10 +423,15 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
420423
static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
421424
int *cleaned)
422425
{
423-
unsigned int total_bytes = 0, total_pkts = 0;
426+
struct libeth_sq_napi_stats ss = { };
424427
struct idpf_base_tx_desc *tx_desc;
425428
u32 budget = tx_q->clean_budget;
426429
s16 ntc = tx_q->next_to_clean;
430+
struct libeth_cq_pp cp = {
431+
.dev = tx_q->dev,
432+
.ss = &ss,
433+
.napi = napi_budget,
434+
};
427435
struct idpf_netdev_priv *np;
428436
struct idpf_tx_buf *tx_buf;
429437
struct netdev_queue *nq;
@@ -441,47 +449,23 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
441449
* such. We can skip this descriptor since there is no buffer
442450
* to clean.
443451
*/
444-
if (tx_buf->ctx_entry) {
445-
/* Clear this flag here to avoid stale flag values when
446-
* this buffer is used for actual data in the future.
447-
* There are cases where the tx_buf struct / the flags
448-
* field will not be cleared before being reused.
449-
*/
450-
tx_buf->ctx_entry = false;
452+
if (unlikely(tx_buf->type <= LIBETH_SQE_CTX)) {
453+
tx_buf->type = LIBETH_SQE_EMPTY;
451454
goto fetch_next_txq_desc;
452455
}
453456

454-
/* if next_to_watch is not set then no work pending */
455-
eop_desc = (struct idpf_base_tx_desc *)tx_buf->next_to_watch;
456-
if (!eop_desc)
457-
break;
458-
459-
/* prevent any other reads prior to eop_desc */
457+
/* prevent any other reads prior to type */
460458
smp_rmb();
461459

460+
eop_desc = &tx_q->base_tx[tx_buf->rs_idx];
461+
462462
/* if the descriptor isn't done, no work yet to do */
463463
if (!(eop_desc->qw1 &
464464
cpu_to_le64(IDPF_TX_DESC_DTYPE_DESC_DONE)))
465465
break;
466466

467-
/* clear next_to_watch to prevent false hangs */
468-
tx_buf->next_to_watch = NULL;
469-
470467
/* update the statistics for this packet */
471-
total_bytes += tx_buf->bytecount;
472-
total_pkts += tx_buf->gso_segs;
473-
474-
napi_consume_skb(tx_buf->skb, napi_budget);
475-
476-
/* unmap skb header data */
477-
dma_unmap_single(tx_q->dev,
478-
dma_unmap_addr(tx_buf, dma),
479-
dma_unmap_len(tx_buf, len),
480-
DMA_TO_DEVICE);
481-
482-
/* clear tx_buf data */
483-
tx_buf->skb = NULL;
484-
dma_unmap_len_set(tx_buf, len, 0);
468+
libeth_tx_complete(tx_buf, &cp);
485469

486470
/* unmap remaining buffers */
487471
while (tx_desc != eop_desc) {
@@ -495,13 +479,7 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
495479
}
496480

497481
/* unmap any remaining paged data */
498-
if (dma_unmap_len(tx_buf, len)) {
499-
dma_unmap_page(tx_q->dev,
500-
dma_unmap_addr(tx_buf, dma),
501-
dma_unmap_len(tx_buf, len),
502-
DMA_TO_DEVICE);
503-
dma_unmap_len_set(tx_buf, len, 0);
504-
}
482+
libeth_tx_complete(tx_buf, &cp);
505483
}
506484

507485
/* update budget only if we did something */
@@ -521,19 +499,19 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
521499
ntc += tx_q->desc_count;
522500
tx_q->next_to_clean = ntc;
523501

524-
*cleaned += total_pkts;
502+
*cleaned += ss.packets;
525503

526504
u64_stats_update_begin(&tx_q->stats_sync);
527-
u64_stats_add(&tx_q->q_stats.packets, total_pkts);
528-
u64_stats_add(&tx_q->q_stats.bytes, total_bytes);
505+
u64_stats_add(&tx_q->q_stats.packets, ss.packets);
506+
u64_stats_add(&tx_q->q_stats.bytes, ss.bytes);
529507
u64_stats_update_end(&tx_q->stats_sync);
530508

531509
np = netdev_priv(tx_q->netdev);
532510
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
533511

534512
dont_wake = np->state != __IDPF_VPORT_UP ||
535513
!netif_carrier_ok(tx_q->netdev);
536-
__netif_txq_completed_wake(nq, total_pkts, total_bytes,
514+
__netif_txq_completed_wake(nq, ss.packets, ss.bytes,
537515
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
538516
dont_wake);
539517

0 commit comments

Comments
 (0)