Skip to content

Commit d5a9ae2

Browse files
committed
Merge branch 'gve-improve-rx-buffer-length-management'
Ankit Garg says: ==================== gve: Improve RX buffer length management This patch series improves the management of the RX buffer length for the DQO queue format in the gve driver. The goal is to make RX buffer length config more explicit, easy to change, and performant by default. We accomplish that in four patches: 1. Currently, the buffer length is implicitly coupled with the header split setting, which is an unintuitive and restrictive design. The first patch decouples the RX buffer length from the header split configuration. 2. The second patch is a preparatory step for third. It converts the XDP config verification method to use extack for better error reporting. 3. The third patch exposes the `rx_buf_len` parameter to userspace via ethtool, allowing user to directly view or modify the RX buffer length if supported by the device. 4. The final patch improves the out-of-the-box RX single stream throughput by >10% by changing the driver's default behavior to select the maximum supported RX buffer length advertised by the device during initialization. ==================== Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents 71bc986 + 09a81a0 commit d5a9ae2

File tree

4 files changed

+78
-24
lines changed

4 files changed

+78
-24
lines changed

drivers/net/ethernet/google/gve/gve.h

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,8 +59,6 @@
5959

6060
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
6161

62-
#define GVE_MAX_RX_BUFFER_SIZE 4096
63-
6462
#define GVE_XDP_RX_BUFFER_SIZE_DQO 4096
6563

6664
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
@@ -1169,6 +1167,12 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
11691167
priv->queue_format == GVE_GQI_QPL_FORMAT;
11701168
}
11711169

1170+
static inline bool gve_is_dqo(struct gve_priv *priv)
1171+
{
1172+
return priv->queue_format == GVE_DQO_RDA_FORMAT ||
1173+
priv->queue_format == GVE_DQO_QPL_FORMAT;
1174+
}
1175+
11721176
static inline u32 gve_num_tx_queues(struct gve_priv *priv)
11731177
{
11741178
return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues;
@@ -1249,8 +1253,10 @@ void gve_rx_free_rings_gqi(struct gve_priv *priv,
12491253
struct gve_rx_alloc_rings_cfg *cfg);
12501254
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
12511255
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
1252-
u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
12531256
bool gve_header_split_supported(const struct gve_priv *priv);
1257+
int gve_set_rx_buf_len_config(struct gve_priv *priv, u32 rx_buf_len,
1258+
struct netlink_ext_ack *extack,
1259+
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
12541260
int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split,
12551261
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
12561262
/* rx buffer handling */

drivers/net/ethernet/google/gve/gve_adminq.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -987,6 +987,10 @@ static void gve_enable_supported_features(struct gve_priv *priv,
987987
dev_info(&priv->pdev->dev,
988988
"BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
989989
priv->max_rx_buffer_size, priv->header_buf_size);
990+
if (gve_is_dqo(priv) &&
991+
priv->max_rx_buffer_size > GVE_DEFAULT_RX_BUFFER_SIZE)
992+
priv->rx_cfg.packet_buffer_size =
993+
priv->max_rx_buffer_size;
990994
}
991995

992996
/* Read and store ring size ranges given by device */

drivers/net/ethernet/google/gve/gve_ethtool.c

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -529,6 +529,8 @@ static void gve_get_ringparam(struct net_device *netdev,
529529
cmd->rx_pending = priv->rx_desc_cnt;
530530
cmd->tx_pending = priv->tx_desc_cnt;
531531

532+
kernel_cmd->rx_buf_len = priv->rx_cfg.packet_buffer_size;
533+
532534
if (!gve_header_split_supported(priv))
533535
kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
534536
else if (priv->header_split_enabled)
@@ -589,6 +591,12 @@ static int gve_set_ringparam(struct net_device *netdev,
589591
int err;
590592

591593
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
594+
595+
err = gve_set_rx_buf_len_config(priv, kernel_cmd->rx_buf_len, extack,
596+
&rx_alloc_cfg);
597+
if (err)
598+
return err;
599+
592600
err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split,
593601
&rx_alloc_cfg);
594602
if (err)
@@ -605,9 +613,9 @@ static int gve_set_ringparam(struct net_device *netdev,
605613
return err;
606614
} else {
607615
/* Set ring params for the next up */
608-
priv->header_split_enabled = rx_alloc_cfg.enable_header_split;
609616
priv->rx_cfg.packet_buffer_size =
610617
rx_alloc_cfg.packet_buffer_size;
618+
priv->header_split_enabled = rx_alloc_cfg.enable_header_split;
611619
priv->tx_desc_cnt = tx_alloc_cfg.ring_size;
612620
priv->rx_desc_cnt = rx_alloc_cfg.ring_size;
613621
}
@@ -946,7 +954,8 @@ static int gve_get_ts_info(struct net_device *netdev,
946954

947955
const struct ethtool_ops gve_ethtool_ops = {
948956
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
949-
.supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
957+
.supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
958+
ETHTOOL_RING_USE_RX_BUF_LEN,
950959
.get_drvinfo = gve_get_drvinfo,
951960
.get_strings = gve_get_strings,
952961
.get_sset_count = gve_get_sset_count,

drivers/net/ethernet/google/gve/gve_main.c

Lines changed: 54 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1707,18 +1707,28 @@ static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
17071707
return 0;
17081708
}
17091709

1710-
static int verify_xdp_configuration(struct net_device *dev)
1710+
static int gve_verify_xdp_configuration(struct net_device *dev,
1711+
struct netlink_ext_ack *extack)
17111712
{
17121713
struct gve_priv *priv = netdev_priv(dev);
17131714
u16 max_xdp_mtu;
17141715

17151716
if (dev->features & NETIF_F_LRO) {
1716-
netdev_warn(dev, "XDP is not supported when LRO is on.\n");
1717+
NL_SET_ERR_MSG_MOD(extack,
1718+
"XDP is not supported when LRO is on.");
17171719
return -EOPNOTSUPP;
17181720
}
17191721

17201722
if (priv->header_split_enabled) {
1721-
netdev_warn(dev, "XDP is not supported when header-data split is enabled.\n");
1723+
NL_SET_ERR_MSG_MOD(extack,
1724+
"XDP is not supported when header-data split is enabled.");
1725+
return -EOPNOTSUPP;
1726+
}
1727+
1728+
if (priv->rx_cfg.packet_buffer_size != SZ_2K) {
1729+
NL_SET_ERR_MSG_FMT_MOD(extack,
1730+
"XDP is not supported for Rx buf len %d, only %d supported.",
1731+
priv->rx_cfg.packet_buffer_size, SZ_2K);
17221732
return -EOPNOTSUPP;
17231733
}
17241734

@@ -1727,17 +1737,20 @@ static int verify_xdp_configuration(struct net_device *dev)
17271737
max_xdp_mtu -= GVE_RX_PAD;
17281738

17291739
if (dev->mtu > max_xdp_mtu) {
1730-
netdev_warn(dev, "XDP is not supported for mtu %d.\n",
1731-
dev->mtu);
1740+
NL_SET_ERR_MSG_FMT_MOD(extack,
1741+
"XDP is not supported for mtu %d.",
1742+
dev->mtu);
17321743
return -EOPNOTSUPP;
17331744
}
17341745

17351746
if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues ||
17361747
(2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) {
1737-
netdev_warn(dev, "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d",
1738-
priv->rx_cfg.num_queues,
1739-
priv->tx_cfg.num_queues,
1748+
netdev_warn(dev,
1749+
"XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d.",
1750+
priv->rx_cfg.num_queues, priv->tx_cfg.num_queues,
17401751
priv->tx_cfg.max_queues);
1752+
NL_SET_ERR_MSG_MOD(extack,
1753+
"XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
17411754
return -EINVAL;
17421755
}
17431756
return 0;
@@ -1748,7 +1761,7 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
17481761
struct gve_priv *priv = netdev_priv(dev);
17491762
int err;
17501763

1751-
err = verify_xdp_configuration(dev);
1764+
err = gve_verify_xdp_configuration(dev, xdp->extack);
17521765
if (err)
17531766
return err;
17541767
switch (xdp->command) {
@@ -2041,14 +2054,6 @@ static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
20412054
priv->tx_timeo_cnt++;
20422055
}
20432056

2044-
u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit)
2045-
{
2046-
if (enable_hsplit && priv->max_rx_buffer_size >= GVE_MAX_RX_BUFFER_SIZE)
2047-
return GVE_MAX_RX_BUFFER_SIZE;
2048-
else
2049-
return GVE_DEFAULT_RX_BUFFER_SIZE;
2050-
}
2051-
20522057
/* Header split is only supported on DQ RDA queue format. If XDP is enabled,
20532058
* header split is not allowed.
20542059
*/
@@ -2058,6 +2063,38 @@ bool gve_header_split_supported(const struct gve_priv *priv)
20582063
priv->queue_format == GVE_DQO_RDA_FORMAT && !priv->xdp_prog;
20592064
}
20602065

2066+
int gve_set_rx_buf_len_config(struct gve_priv *priv, u32 rx_buf_len,
2067+
struct netlink_ext_ack *extack,
2068+
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
2069+
{
2070+
u32 old_rx_buf_len = rx_alloc_cfg->packet_buffer_size;
2071+
2072+
if (rx_buf_len == old_rx_buf_len)
2073+
return 0;
2074+
2075+
/* device options may not always contain support for 4K buffers */
2076+
if (!gve_is_dqo(priv) || priv->max_rx_buffer_size < SZ_4K) {
2077+
NL_SET_ERR_MSG_MOD(extack,
2078+
"Modifying Rx buf len is not supported");
2079+
return -EOPNOTSUPP;
2080+
}
2081+
2082+
if (priv->xdp_prog && rx_buf_len != SZ_2K) {
2083+
NL_SET_ERR_MSG_MOD(extack,
2084+
"Rx buf len can only be 2048 when XDP is on");
2085+
return -EINVAL;
2086+
}
2087+
2088+
if (rx_buf_len != SZ_2K && rx_buf_len != SZ_4K) {
2089+
NL_SET_ERR_MSG_MOD(extack,
2090+
"Rx buf len can only be 2048 or 4096");
2091+
return -EINVAL;
2092+
}
2093+
rx_alloc_cfg->packet_buffer_size = rx_buf_len;
2094+
2095+
return 0;
2096+
}
2097+
20612098
int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split,
20622099
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
20632100
{
@@ -2080,8 +2117,6 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split,
20802117
return 0;
20812118

20822119
rx_alloc_cfg->enable_header_split = enable_hdr_split;
2083-
rx_alloc_cfg->packet_buffer_size =
2084-
gve_get_pkt_buf_size(priv, enable_hdr_split);
20852120

20862121
return 0;
20872122
}

0 commit comments

Comments
 (0)