Skip to content

Commit 62d7f40

Browse files
minakuba-moo
authored andcommitted
gve: support unreadable netmem
Declare PP_FLAG_ALLOW_UNREADABLE_NETMEM to turn on unreadable netmem support in GVE. We also drop any net_iov packets where header split is not enabled. We're unable to process packets where the header landed in unreadable netmem. Use page_pool_dma_sync_netmem_for_cpu in lieu of dma_sync_single_range_for_cpu to correctly handle unreadable netmem that should not be dma-sync'd. Disable rx_copybreak optimization if payload is unreadable netmem as that needs access to the payload. Signed-off-by: Mina Almasry <[email protected]> Signed-off-by: Ziwei Xiao <[email protected]> Signed-off-by: Harshitha Ramamurthy <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent c343966 commit 62d7f40

File tree

2 files changed

+35
-5
lines changed

2 files changed

+35
-5
lines changed

drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -260,6 +260,11 @@ struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
260260
.offset = xdp ? XDP_PACKET_HEADROOM : 0,
261261
};
262262

263+
if (priv->header_split_enabled) {
264+
pp.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
265+
pp.queue_idx = rx->q_num;
266+
}
267+
263268
return page_pool_create(&pp);
264269
}
265270

drivers/net/ethernet/google/gve/gve_rx_dqo.c

Lines changed: 30 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -718,6 +718,24 @@ static int gve_rx_xsk_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
718718
return 0;
719719
}
720720

721+
static void gve_dma_sync(struct gve_priv *priv, struct gve_rx_ring *rx,
722+
struct gve_rx_buf_state_dqo *buf_state, u16 buf_len)
723+
{
724+
struct gve_rx_slot_page_info *page_info = &buf_state->page_info;
725+
726+
if (rx->dqo.page_pool) {
727+
page_pool_dma_sync_netmem_for_cpu(rx->dqo.page_pool,
728+
page_info->netmem,
729+
page_info->page_offset,
730+
buf_len);
731+
} else {
732+
dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
733+
page_info->page_offset +
734+
page_info->pad,
735+
buf_len, DMA_FROM_DEVICE);
736+
}
737+
}
738+
721739
/* Returns 0 if descriptor is completed successfully.
722740
* Returns -EINVAL if descriptor is invalid.
723741
* Returns -ENOMEM if data cannot be copied to skb.
@@ -793,13 +811,18 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
793811
rx->rx_hsplit_unsplit_pkt += unsplit;
794812
rx->rx_hsplit_bytes += hdr_len;
795813
u64_stats_update_end(&rx->statss);
814+
} else if (!rx->ctx.skb_head && rx->dqo.page_pool &&
815+
netmem_is_net_iov(buf_state->page_info.netmem)) {
816+
/* when header split is disabled, the header went to the packet
817+
* buffer. If the packet buffer is a net_iov, those can't be
818+
* easily mapped into the kernel space to access the header
819+
* required to process the packet.
820+
*/
821+
goto error;
796822
}
797823

798824
/* Sync the portion of dma buffer for CPU to read. */
799-
dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
800-
buf_state->page_info.page_offset +
801-
buf_state->page_info.pad,
802-
buf_len, DMA_FROM_DEVICE);
825+
gve_dma_sync(priv, rx, buf_state, buf_len);
803826

804827
/* Append to current skb if one exists. */
805828
if (rx->ctx.skb_head) {
@@ -837,7 +860,9 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
837860
u64_stats_update_end(&rx->statss);
838861
}
839862

840-
if (eop && buf_len <= priv->rx_copybreak) {
863+
if (eop && buf_len <= priv->rx_copybreak &&
864+
!(rx->dqo.page_pool &&
865+
netmem_is_net_iov(buf_state->page_info.netmem))) {
841866
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
842867
&buf_state->page_info, buf_len);
843868
if (unlikely(!rx->ctx.skb_head))

0 commit comments

Comments
 (0)