Skip to content

Commit 74d1412

Browse files
alobakinanguy11
authored andcommitted
idpf: use libeth Rx buffer management for payload buffer
idpf uses Page Pool for data buffers with hardcoded buffer lengths of 4k for "classic" buffers and 2k for "short" ones. This is not flexible and does not ensure optimal memory usage. Why would you need 4k buffers when the MTU is 1500? Use libeth for the data buffers and don't hardcode any buffer sizes. Let them be calculated from the MTU for "classics" and then divide the truesize by 2 for "short" ones. The memory usage is now greatly reduced and 2 buffer queues starts make sense: on frames <= 1024, you'll recycle (and resync) a page only after 4 HW writes rather than two. Signed-off-by: Alexander Lobakin <[email protected]> Signed-off-by: Tony Nguyen <[email protected]>
1 parent 90912f9 commit 74d1412

File tree

6 files changed

+120
-246
lines changed

6 files changed

+120
-246
lines changed

drivers/net/ethernet/intel/idpf/Kconfig

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@ config IDPF
66
depends on PCI_MSI
77
select DIMLIB
88
select LIBETH
9-
select PAGE_POOL
109
help
1110
This driver supports Intel(R) Infrastructure Data Path Function
1211
devices.

drivers/net/ethernet/intel/idpf/idpf.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -264,7 +264,6 @@ struct idpf_port_stats {
264264
* the worst case.
265265
* @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping
266266
* @bufq_desc_count: Buffer queue descriptor count
267-
* @bufq_size: Size of buffers in ring (e.g. 2K, 4K, etc)
268267
* @num_rxq_grp: Number of RX queues in a group
269268
* @rxq_grps: Total number of RX groups. Number of groups * number of RX per
270269
* group will yield total number of RX queues.
@@ -308,7 +307,6 @@ struct idpf_vport {
308307
u32 rxq_desc_count;
309308
u8 num_bufqs_per_qgrp;
310309
u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP];
311-
u32 bufq_size[IDPF_MAX_BUFQS_PER_RXQ_GRP];
312310
u16 num_rxq_grp;
313311
struct idpf_rxq_group *rxq_grps;
314312
u32 rxq_model;

drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -857,20 +857,24 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
857857
u16 cleaned_count)
858858
{
859859
struct virtchnl2_singleq_rx_buf_desc *desc;
860+
const struct libeth_fq_fp fq = {
861+
.pp = rx_q->pp,
862+
.fqes = rx_q->rx_buf,
863+
.truesize = rx_q->truesize,
864+
.count = rx_q->desc_count,
865+
};
860866
u16 nta = rx_q->next_to_alloc;
861-
struct idpf_rx_buf *buf;
862867

863868
if (!cleaned_count)
864869
return false;
865870

866871
desc = &rx_q->single_buf[nta];
867-
buf = &rx_q->rx_buf[nta];
868872

869873
do {
870874
dma_addr_t addr;
871875

872-
addr = idpf_alloc_page(rx_q->pp, buf, rx_q->rx_buf_size);
873-
if (unlikely(addr == DMA_MAPPING_ERROR))
876+
addr = libeth_rx_alloc(&fq, nta);
877+
if (addr == DMA_MAPPING_ERROR)
874878
break;
875879

876880
/* Refresh the desc even if buffer_addrs didn't change
@@ -880,11 +884,9 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
880884
desc->hdr_addr = 0;
881885
desc++;
882886

883-
buf++;
884887
nta++;
885888
if (unlikely(nta == rx_q->desc_count)) {
886889
desc = &rx_q->single_buf[0];
887-
buf = rx_q->rx_buf;
888890
nta = 0;
889891
}
890892

@@ -1004,28 +1006,26 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
10041006
idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields);
10051007

10061008
rx_buf = &rx_q->rx_buf[ntc];
1007-
if (!fields.size) {
1008-
idpf_rx_put_page(rx_buf);
1009+
if (!libeth_rx_sync_for_cpu(rx_buf, fields.size))
10091010
goto skip_data;
1010-
}
10111011

1012-
idpf_rx_sync_for_cpu(rx_buf, fields.size);
10131012
if (skb)
10141013
idpf_rx_add_frag(rx_buf, skb, fields.size);
10151014
else
1016-
skb = idpf_rx_construct_skb(rx_q, rx_buf, fields.size);
1015+
skb = idpf_rx_build_skb(rx_buf, fields.size);
10171016

10181017
/* exit if we failed to retrieve a buffer */
10191018
if (!skb)
10201019
break;
10211020

10221021
skip_data:
1023-
IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc);
1022+
rx_buf->page = NULL;
10241023

1024+
IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc);
10251025
cleaned_count++;
10261026

10271027
/* skip if it is non EOP desc */
1028-
if (idpf_rx_singleq_is_non_eop(rx_desc))
1028+
if (idpf_rx_singleq_is_non_eop(rx_desc) || unlikely(!skb))
10291029
continue;
10301030

10311031
#define IDPF_RXD_ERR_S FIELD_PREP(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, \
@@ -1062,6 +1062,7 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
10621062

10631063
rx_q->next_to_clean = ntc;
10641064

1065+
page_pool_nid_changed(rx_q->pp, numa_mem_id());
10651066
if (cleaned_count)
10661067
failure = idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count);
10671068

0 commit comments

Comments
 (0)