Skip to content

Commit 57a070c

Browse files
josh8551021kuba-moo
authored andcommitted
gve: update GQ RX to use buf_size
Commit ebdfae0 ("gve: adopt page pool for DQ RDA mode") introduced a buf_size field to the gve_rx_slot_page_info struct, which can be used in the datapath to take the place of the packet_buffer_size field, as it will already be hot in the cache due to its extensive use. Using the buf_size field in the datapath frees up the packet_buffer_size field in the GQ-specific RX cacheline to be generalized for GQ and DQ (in the next patch), as there is currently no common packet buffer size field between the two queue formats. Reviewed-by: Willem de Bruijn <[email protected]> Signed-off-by: Joshua Washington <[email protected]> Signed-off-by: Harshitha Ramamurthy <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 542a58f commit 57a070c

File tree

1 file changed

+15
-9
lines changed

1 file changed

+15
-9
lines changed

drivers/net/ethernet/google/gve/gve_rx.c

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -141,12 +141,15 @@ void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
141141
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
142142
}
143143

144-
static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
145-
dma_addr_t addr, struct page *page, __be64 *slot_addr)
144+
static void gve_setup_rx_buffer(struct gve_rx_ring *rx,
145+
struct gve_rx_slot_page_info *page_info,
146+
dma_addr_t addr, struct page *page,
147+
__be64 *slot_addr)
146148
{
147149
page_info->page = page;
148150
page_info->page_offset = 0;
149151
page_info->page_address = page_address(page);
152+
page_info->buf_size = rx->packet_buffer_size;
150153
*slot_addr = cpu_to_be64(addr);
151154
/* The page already has 1 ref */
152155
page_ref_add(page, INT_MAX - 1);
@@ -171,7 +174,7 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
171174
return err;
172175
}
173176

174-
gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr);
177+
gve_setup_rx_buffer(rx, page_info, dma, page, &data_slot->addr);
175178
return 0;
176179
}
177180

@@ -199,7 +202,8 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
199202
struct page *page = rx->data.qpl->pages[i];
200203
dma_addr_t addr = i * PAGE_SIZE;
201204

202-
gve_setup_rx_buffer(&rx->data.page_info[i], addr, page,
205+
gve_setup_rx_buffer(rx, &rx->data.page_info[i], addr,
206+
page,
203207
&rx->data.data_ring[i].qpl_offset);
204208
continue;
205209
}
@@ -222,6 +226,7 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
222226
rx->qpl_copy_pool[j].page = page;
223227
rx->qpl_copy_pool[j].page_offset = 0;
224228
rx->qpl_copy_pool[j].page_address = page_address(page);
229+
rx->qpl_copy_pool[j].buf_size = rx->packet_buffer_size;
225230

226231
/* The page already has 1 ref. */
227232
page_ref_add(page, INT_MAX - 1);
@@ -283,6 +288,7 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
283288

284289
rx->gve = priv;
285290
rx->q_num = idx;
291+
rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
286292

287293
rx->mask = slots - 1;
288294
rx->data.raw_addressing = cfg->raw_addressing;
@@ -351,7 +357,6 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
351357
rx->db_threshold = slots / 2;
352358
gve_rx_init_ring_state_gqi(rx);
353359

354-
rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
355360
gve_rx_ctx_clear(&rx->ctx);
356361

357362
return 0;
@@ -590,7 +595,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
590595
copy_page_info->pad = page_info->pad;
591596

592597
skb = gve_rx_add_frags(napi, copy_page_info,
593-
rx->packet_buffer_size, len, ctx);
598+
copy_page_info->buf_size, len, ctx);
594599
if (unlikely(!skb))
595600
return NULL;
596601

@@ -630,7 +635,8 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
630635
* device.
631636
*/
632637
if (page_info->can_flip) {
633-
skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx);
638+
skb = gve_rx_add_frags(napi, page_info, page_info->buf_size,
639+
len, ctx);
634640
/* No point in recycling if we didn't get the skb */
635641
if (skb) {
636642
/* Make sure that the page isn't freed. */
@@ -680,7 +686,7 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
680686
skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev,
681687
page_info, len, napi,
682688
data_slot,
683-
rx->packet_buffer_size, ctx);
689+
page_info->buf_size, ctx);
684690
} else {
685691
skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx,
686692
page_info, len, napi, data_slot);
@@ -855,7 +861,7 @@ static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
855861
void *old_data;
856862
int xdp_act;
857863

858-
xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq);
864+
xdp_init_buff(&xdp, page_info->buf_size, &rx->xdp_rxq);
859865
xdp_prepare_buff(&xdp, page_info->page_address +
860866
page_info->page_offset, GVE_RX_PAD,
861867
len, false);

0 commit comments

Comments
 (0)