Skip to content

Commit 542a58f

Browse files
josh8551021kuba-moo
authored andcommitted
gve: introduce config-based allocation for XDP
An earlier patch series[1] introduced RX/TX ring allocation configuration structs which contained metadata used to allocate and configure new RX and TX rings. This led to a much cleaner and safer allocation pattern wherein queue resources were not deallocated until new queue resources were successfully allocated. Migrate the XDP allocation path to use the same pattern to allow for the existence of a single allocation path instead of relying on XDP-specific allocation methods. These extra allocation methods result in the duplication of many existing behaviors while being prone to error when configuration changes unrelated to XDP occur. Link: https://lore.kernel.org/netdev/[email protected]/ [1] Reviewed-by: Praveen Kaligineedi <[email protected]> Reviewed-by: Willem de Bruijn <[email protected]> Signed-off-by: Joshua Washington <[email protected]> Signed-off-by: Harshitha Ramamurthy <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent c2b9009 commit 542a58f

File tree

7 files changed

+118
-294
lines changed

7 files changed

+118
-294
lines changed

drivers/net/ethernet/google/gve/gve.h

Lines changed: 26 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -631,10 +631,17 @@ struct gve_notify_block {
631631
u32 irq;
632632
};
633633

634-
/* Tracks allowed and current queue settings */
635-
struct gve_queue_config {
634+
/* Tracks allowed and current rx queue settings */
635+
struct gve_rx_queue_config {
636636
u16 max_queues;
637-
u16 num_queues; /* current */
637+
u16 num_queues;
638+
};
639+
640+
/* Tracks allowed and current tx queue settings */
641+
struct gve_tx_queue_config {
642+
u16 max_queues;
643+
u16 num_queues; /* number of TX queues, excluding XDP queues */
644+
u16 num_xdp_queues;
638645
};
639646

640647
/* Tracks the available and used qpl IDs */
@@ -658,11 +665,11 @@ struct gve_ptype_lut {
658665

659666
/* Parameters for allocating resources for tx queues */
660667
struct gve_tx_alloc_rings_cfg {
661-
struct gve_queue_config *qcfg;
668+
struct gve_tx_queue_config *qcfg;
669+
670+
u16 num_xdp_rings;
662671

663672
u16 ring_size;
664-
u16 start_idx;
665-
u16 num_rings;
666673
bool raw_addressing;
667674

668675
/* Allocated resources are returned here */
@@ -672,8 +679,8 @@ struct gve_tx_alloc_rings_cfg {
672679
/* Parameters for allocating resources for rx queues */
673680
struct gve_rx_alloc_rings_cfg {
674681
/* tx config is also needed to determine QPL ids */
675-
struct gve_queue_config *qcfg;
676-
struct gve_queue_config *qcfg_tx;
682+
struct gve_rx_queue_config *qcfg_rx;
683+
struct gve_tx_queue_config *qcfg_tx;
677684

678685
u16 ring_size;
679686
u16 packet_buffer_size;
@@ -764,9 +771,8 @@ struct gve_priv {
764771
u32 rx_copybreak; /* copy packets smaller than this */
765772
u16 default_num_queues; /* default num queues to set up */
766773

767-
u16 num_xdp_queues;
768-
struct gve_queue_config tx_cfg;
769-
struct gve_queue_config rx_cfg;
774+
struct gve_tx_queue_config tx_cfg;
775+
struct gve_rx_queue_config rx_cfg;
770776
u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
771777

772778
struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
@@ -1039,27 +1045,16 @@ static inline bool gve_is_qpl(struct gve_priv *priv)
10391045
}
10401046

10411047
/* Returns the number of tx queue page lists */
1042-
static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
1043-
int num_xdp_queues,
1048+
static inline u32 gve_num_tx_qpls(const struct gve_tx_queue_config *tx_cfg,
10441049
bool is_qpl)
10451050
{
10461051
if (!is_qpl)
10471052
return 0;
1048-
return tx_cfg->num_queues + num_xdp_queues;
1049-
}
1050-
1051-
/* Returns the number of XDP tx queue page lists
1052-
*/
1053-
static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
1054-
{
1055-
if (priv->queue_format != GVE_GQI_QPL_FORMAT)
1056-
return 0;
1057-
1058-
return priv->num_xdp_queues;
1053+
return tx_cfg->num_queues + tx_cfg->num_xdp_queues;
10591054
}
10601055

10611056
/* Returns the number of rx queue page lists */
1062-
static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
1057+
static inline u32 gve_num_rx_qpls(const struct gve_rx_queue_config *rx_cfg,
10631058
bool is_qpl)
10641059
{
10651060
if (!is_qpl)
@@ -1077,7 +1072,8 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
10771072
return priv->tx_cfg.max_queues + rx_qid;
10781073
}
10791074

1080-
static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
1075+
static inline u32 gve_get_rx_qpl_id(const struct gve_tx_queue_config *tx_cfg,
1076+
int rx_qid)
10811077
{
10821078
return tx_cfg->max_queues + rx_qid;
10831079
}
@@ -1087,7 +1083,7 @@ static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
10871083
return gve_tx_qpl_id(priv, 0);
10881084
}
10891085

1090-
static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
1086+
static inline u32 gve_rx_start_qpl_id(const struct gve_tx_queue_config *tx_cfg)
10911087
{
10921088
return gve_get_rx_qpl_id(tx_cfg, 0);
10931089
}
@@ -1118,7 +1114,7 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
11181114

11191115
static inline u32 gve_num_tx_queues(struct gve_priv *priv)
11201116
{
1121-
return priv->tx_cfg.num_queues + priv->num_xdp_queues;
1117+
return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues;
11221118
}
11231119

11241120
static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
@@ -1234,8 +1230,8 @@ int gve_adjust_config(struct gve_priv *priv,
12341230
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
12351231
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
12361232
int gve_adjust_queues(struct gve_priv *priv,
1237-
struct gve_queue_config new_rx_config,
1238-
struct gve_queue_config new_tx_config,
1233+
struct gve_rx_queue_config new_rx_config,
1234+
struct gve_tx_queue_config new_tx_config,
12391235
bool reset_rss);
12401236
/* flow steering rule */
12411237
int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);

drivers/net/ethernet/google/gve/gve_ethtool.c

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -475,8 +475,8 @@ static int gve_set_channels(struct net_device *netdev,
475475
struct ethtool_channels *cmd)
476476
{
477477
struct gve_priv *priv = netdev_priv(netdev);
478-
struct gve_queue_config new_tx_cfg = priv->tx_cfg;
479-
struct gve_queue_config new_rx_cfg = priv->rx_cfg;
478+
struct gve_tx_queue_config new_tx_cfg = priv->tx_cfg;
479+
struct gve_rx_queue_config new_rx_cfg = priv->rx_cfg;
480480
struct ethtool_channels old_settings;
481481
int new_tx = cmd->tx_count;
482482
int new_rx = cmd->rx_count;
@@ -491,10 +491,17 @@ static int gve_set_channels(struct net_device *netdev,
491491
if (!new_rx || !new_tx)
492492
return -EINVAL;
493493

494-
if (priv->num_xdp_queues &&
495-
(new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) {
496-
dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
497-
return -EINVAL;
494+
if (priv->xdp_prog) {
495+
if (new_tx != new_rx ||
496+
(2 * new_tx > priv->tx_cfg.max_queues)) {
497+
dev_err(&priv->pdev->dev, "The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues when XDP program is installed");
498+
return -EINVAL;
499+
}
500+
501+
/* One XDP TX queue per RX queue. */
502+
new_tx_cfg.num_xdp_queues = new_rx;
503+
} else {
504+
new_tx_cfg.num_xdp_queues = 0;
498505
}
499506

500507
if (new_rx != priv->rx_cfg.num_queues &&

0 commit comments

Comments
 (0)