Skip to content

Commit 346fb86

Browse files
josh8551021kuba-moo
authored andcommitted
gve: update XDP allocation path support RX buffer posting
In order to support installing an XDP program on DQ, RX buffers need to be reposted using 4K buffers, which is larger than the default packet buffer size of 2K. This is needed to accommodate the extra head and tail that accompanies the data portion of an XDP buffer. Continuing to use 2K buffers would mean that the packet buffer size for the NIC would have to be restricted to 2048 - 320 - 256 = 1472B. However, this is problematic for two reasons: first, 1472 is not a packet buffer size accepted by GVE; second, at least 1474B of buffer space is needed to accommodate an MTU of 1460, which is the default on GCP. As such, we allocate 4K buffers, and post a 2K section of those 4K buffers (offset relative to the XDP headroom) to the NIC for DMA to avoid a potential extra copy. Because the GQ-QPL datapath requires copies regardless, this change was not needed to support XDP in that case. To capture this subtlety, a new field, packet_buffer_truesize, has been added to the rx ring struct to represent size of the allocated buffer, while packet_buffer_size has been left to represent the portion of the buffer posted to the NIC. Reviewed-by: Willem de Bruijn <[email protected]> Signed-off-by: Praveen Kaligineedi <[email protected]> Signed-off-by: Joshua Washington <[email protected]> Signed-off-by: Harshitha Ramamurthy <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 904effd commit 346fb86

File tree

4 files changed

+53
-17
lines changed

4 files changed

+53
-17
lines changed

drivers/net/ethernet/google/gve/gve.h

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,8 @@
5959

6060
#define GVE_MAX_RX_BUFFER_SIZE 4096
6161

62+
#define GVE_XDP_RX_BUFFER_SIZE_DQO 4096
63+
6264
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
6365

6466
#define GVE_PAGE_POOL_SIZE_MULTIPLIER 4
@@ -227,7 +229,11 @@ struct gve_rx_cnts {
227229
/* Contains datapath state used to represent an RX queue. */
228230
struct gve_rx_ring {
229231
struct gve_priv *gve;
230-
u16 packet_buffer_size;
232+
233+
u16 packet_buffer_size; /* Size of buffer posted to NIC */
234+
u16 packet_buffer_truesize; /* Total size of RX buffer */
235+
u16 rx_headroom;
236+
231237
union {
232238
/* GQI fields */
233239
struct {
@@ -688,6 +694,7 @@ struct gve_rx_alloc_rings_cfg {
688694
bool raw_addressing;
689695
bool enable_header_split;
690696
bool reset_rss;
697+
bool xdp;
691698

692699
/* Allocated resources are returned here */
693700
struct gve_rx_ring *rx;
@@ -1218,7 +1225,8 @@ void gve_free_buffer(struct gve_rx_ring *rx,
12181225
struct gve_rx_buf_state_dqo *buf_state);
12191226
int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
12201227
struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
1221-
struct gve_rx_ring *rx);
1228+
struct gve_rx_ring *rx,
1229+
bool xdp);
12221230

12231231
/* Reset */
12241232
void gve_schedule_reset(struct gve_priv *priv);

drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,8 @@ int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
139139
buf_state->page_info.page_offset = 0;
140140
buf_state->page_info.page_address =
141141
page_address(buf_state->page_info.page);
142-
buf_state->page_info.buf_size = rx->packet_buffer_size;
142+
buf_state->page_info.buf_size = rx->packet_buffer_truesize;
143+
buf_state->page_info.pad = rx->rx_headroom;
143144
buf_state->last_single_ref_offset = 0;
144145

145146
/* The page already has 1 ref. */
@@ -162,7 +163,7 @@ void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state)
162163
void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
163164
struct gve_rx_buf_state_dqo *buf_state)
164165
{
165-
const u16 data_buffer_size = rx->packet_buffer_size;
166+
const u16 data_buffer_size = rx->packet_buffer_truesize;
166167
int pagecount;
167168

168169
/* Can't reuse if we only fit one buffer per page */
@@ -219,7 +220,7 @@ static int gve_alloc_from_page_pool(struct gve_rx_ring *rx,
219220
{
220221
netmem_ref netmem;
221222

222-
buf_state->page_info.buf_size = rx->packet_buffer_size;
223+
buf_state->page_info.buf_size = rx->packet_buffer_truesize;
223224
netmem = page_pool_alloc_netmem(rx->dqo.page_pool,
224225
&buf_state->page_info.page_offset,
225226
&buf_state->page_info.buf_size,
@@ -231,12 +232,14 @@ static int gve_alloc_from_page_pool(struct gve_rx_ring *rx,
231232
buf_state->page_info.netmem = netmem;
232233
buf_state->page_info.page_address = netmem_address(netmem);
233234
buf_state->addr = page_pool_get_dma_addr_netmem(netmem);
235+
buf_state->page_info.pad = rx->dqo.page_pool->p.offset;
234236

235237
return 0;
236238
}
237239

238240
struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
239-
struct gve_rx_ring *rx)
241+
struct gve_rx_ring *rx,
242+
bool xdp)
240243
{
241244
u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num);
242245
struct page_pool_params pp = {
@@ -247,7 +250,8 @@ struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
247250
.netdev = priv->dev,
248251
.napi = &priv->ntfy_blocks[ntfy_id].napi,
249252
.max_len = PAGE_SIZE,
250-
.dma_dir = DMA_FROM_DEVICE,
253+
.dma_dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
254+
.offset = xdp ? XDP_PACKET_HEADROOM : 0,
251255
};
252256

253257
return page_pool_create(&pp);
@@ -301,7 +305,8 @@ int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc)
301305
}
302306
desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
303307
desc->buf_addr = cpu_to_le64(buf_state->addr +
304-
buf_state->page_info.page_offset);
308+
buf_state->page_info.page_offset +
309+
buf_state->page_info.pad);
305310

306311
return 0;
307312

drivers/net/ethernet/google/gve/gve_main.c

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1149,8 +1149,14 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
11491149
napi->napi_id);
11501150
if (err)
11511151
goto err;
1152-
err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
1153-
MEM_TYPE_PAGE_SHARED, NULL);
1152+
if (gve_is_qpl(priv))
1153+
err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
1154+
MEM_TYPE_PAGE_SHARED,
1155+
NULL);
1156+
else
1157+
err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
1158+
MEM_TYPE_PAGE_POOL,
1159+
rx->dqo.page_pool);
11541160
if (err)
11551161
goto err;
11561162
rx->xsk_pool = xsk_get_pool_from_qid(dev, i);
@@ -1226,6 +1232,7 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
12261232
cfg->ring_size = priv->rx_desc_cnt;
12271233
cfg->packet_buffer_size = priv->rx_cfg.packet_buffer_size;
12281234
cfg->rx = priv->rx;
1235+
cfg->xdp = !!cfg->qcfg_tx->num_xdp_queues;
12291236
}
12301237

12311238
void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
@@ -1461,6 +1468,7 @@ static int gve_configure_rings_xdp(struct gve_priv *priv,
14611468
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
14621469
tx_alloc_cfg.num_xdp_rings = num_xdp_rings;
14631470

1471+
rx_alloc_cfg.xdp = !!num_xdp_rings;
14641472
return gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
14651473
}
14661474

@@ -1629,6 +1637,7 @@ static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
16291637
static int verify_xdp_configuration(struct net_device *dev)
16301638
{
16311639
struct gve_priv *priv = netdev_priv(dev);
1640+
u16 max_xdp_mtu;
16321641

16331642
if (dev->features & NETIF_F_LRO) {
16341643
netdev_warn(dev, "XDP is not supported when LRO is on.\n");
@@ -1641,7 +1650,11 @@ static int verify_xdp_configuration(struct net_device *dev)
16411650
return -EOPNOTSUPP;
16421651
}
16431652

1644-
if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) {
1653+
max_xdp_mtu = priv->rx_cfg.packet_buffer_size - sizeof(struct ethhdr);
1654+
if (priv->queue_format == GVE_GQI_QPL_FORMAT)
1655+
max_xdp_mtu -= GVE_RX_PAD;
1656+
1657+
if (dev->mtu > max_xdp_mtu) {
16451658
netdev_warn(dev, "XDP is not supported for mtu %d.\n",
16461659
dev->mtu);
16471660
return -EOPNOTSUPP;

drivers/net/ethernet/google/gve/gve_rx_dqo.c

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -225,6 +225,14 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
225225
rx->q_num = idx;
226226
rx->packet_buffer_size = cfg->packet_buffer_size;
227227

228+
if (cfg->xdp) {
229+
rx->packet_buffer_truesize = GVE_XDP_RX_BUFFER_SIZE_DQO;
230+
rx->rx_headroom = XDP_PACKET_HEADROOM;
231+
} else {
232+
rx->packet_buffer_truesize = rx->packet_buffer_size;
233+
rx->rx_headroom = 0;
234+
}
235+
228236
rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots :
229237
gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
230238
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
@@ -254,7 +262,7 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
254262
goto err;
255263

256264
if (cfg->raw_addressing) {
257-
pool = gve_rx_create_page_pool(priv, rx);
265+
pool = gve_rx_create_page_pool(priv, rx, cfg->xdp);
258266
if (IS_ERR(pool))
259267
goto err;
260268

@@ -484,14 +492,15 @@ static void gve_skb_add_rx_frag(struct gve_rx_ring *rx,
484492
if (rx->dqo.page_pool) {
485493
skb_add_rx_frag_netmem(rx->ctx.skb_tail, num_frags,
486494
buf_state->page_info.netmem,
487-
buf_state->page_info.page_offset,
488-
buf_len,
495+
buf_state->page_info.page_offset +
496+
buf_state->page_info.pad, buf_len,
489497
buf_state->page_info.buf_size);
490498
} else {
491499
skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
492500
buf_state->page_info.page,
493-
buf_state->page_info.page_offset,
494-
buf_len, buf_state->page_info.buf_size);
501+
buf_state->page_info.page_offset +
502+
buf_state->page_info.pad, buf_len,
503+
buf_state->page_info.buf_size);
495504
}
496505
}
497506

@@ -611,7 +620,8 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
611620

612621
/* Sync the portion of dma buffer for CPU to read. */
613622
dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
614-
buf_state->page_info.page_offset,
623+
buf_state->page_info.page_offset +
624+
buf_state->page_info.pad,
615625
buf_len, DMA_FROM_DEVICE);
616626

617627
/* Append to current skb if one exists. */

0 commit comments

Comments
 (0)