Skip to content

Commit e2ac75a

Browse files
committed
Merge branch 'basic-xdp-support-for-dqo-rda-queue-format'
Joshua Washington says: ==================== Basic XDP Support for DQO RDA Queue Format This patch series updates the GVE XDP infrastructure and introduces XDP_PASS and XDP_DROP support for the DQO RDA queue format. The infrastructure changes of note include an allocation path refactor for XDP queues, and a unification of RX buffer sizes across queue formats. This patch series will be followed by more patch series to introduce XDP_TX and XDP_REDIRECT support, as well as zero-copy and multi-buffer support. ==================== Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents f1ae32a + 293b493 commit e2ac75a

File tree

9 files changed

+250
-345
lines changed

9 files changed

+250
-345
lines changed

drivers/net/ethernet/google/gve/gve.h

Lines changed: 37 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,8 @@
5959

6060
#define GVE_MAX_RX_BUFFER_SIZE 4096
6161

62+
#define GVE_XDP_RX_BUFFER_SIZE_DQO 4096
63+
6264
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
6365

6466
#define GVE_PAGE_POOL_SIZE_MULTIPLIER 4
@@ -227,6 +229,11 @@ struct gve_rx_cnts {
227229
/* Contains datapath state used to represent an RX queue. */
228230
struct gve_rx_ring {
229231
struct gve_priv *gve;
232+
233+
u16 packet_buffer_size; /* Size of buffer posted to NIC */
234+
u16 packet_buffer_truesize; /* Total size of RX buffer */
235+
u16 rx_headroom;
236+
230237
union {
231238
/* GQI fields */
232239
struct {
@@ -235,7 +242,6 @@ struct gve_rx_ring {
235242

236243
/* threshold for posting new buffs and descs */
237244
u32 db_threshold;
238-
u16 packet_buffer_size;
239245

240246
u32 qpl_copy_pool_mask;
241247
u32 qpl_copy_pool_head;
@@ -613,8 +619,6 @@ struct gve_tx_ring {
613619
dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
614620
struct u64_stats_sync statss; /* sync stats for 32bit archs */
615621
struct xsk_buff_pool *xsk_pool;
616-
u32 xdp_xsk_wakeup;
617-
u32 xdp_xsk_done;
618622
u64 xdp_xsk_sent;
619623
u64 xdp_xmit;
620624
u64 xdp_xmit_errors;
@@ -633,10 +637,18 @@ struct gve_notify_block {
633637
u32 irq;
634638
};
635639

636-
/* Tracks allowed and current queue settings */
637-
struct gve_queue_config {
640+
/* Tracks allowed and current rx queue settings */
641+
struct gve_rx_queue_config {
642+
u16 max_queues;
643+
u16 num_queues;
644+
u16 packet_buffer_size;
645+
};
646+
647+
/* Tracks allowed and current tx queue settings */
648+
struct gve_tx_queue_config {
638649
u16 max_queues;
639-
u16 num_queues; /* current */
650+
u16 num_queues; /* number of TX queues, excluding XDP queues */
651+
u16 num_xdp_queues;
640652
};
641653

642654
/* Tracks the available and used qpl IDs */
@@ -660,11 +672,11 @@ struct gve_ptype_lut {
660672

661673
/* Parameters for allocating resources for tx queues */
662674
struct gve_tx_alloc_rings_cfg {
663-
struct gve_queue_config *qcfg;
675+
struct gve_tx_queue_config *qcfg;
676+
677+
u16 num_xdp_rings;
664678

665679
u16 ring_size;
666-
u16 start_idx;
667-
u16 num_rings;
668680
bool raw_addressing;
669681

670682
/* Allocated resources are returned here */
@@ -674,14 +686,15 @@ struct gve_tx_alloc_rings_cfg {
674686
/* Parameters for allocating resources for rx queues */
675687
struct gve_rx_alloc_rings_cfg {
676688
/* tx config is also needed to determine QPL ids */
677-
struct gve_queue_config *qcfg;
678-
struct gve_queue_config *qcfg_tx;
689+
struct gve_rx_queue_config *qcfg_rx;
690+
struct gve_tx_queue_config *qcfg_tx;
679691

680692
u16 ring_size;
681693
u16 packet_buffer_size;
682694
bool raw_addressing;
683695
bool enable_header_split;
684696
bool reset_rss;
697+
bool xdp;
685698

686699
/* Allocated resources are returned here */
687700
struct gve_rx_ring *rx;
@@ -766,9 +779,8 @@ struct gve_priv {
766779
u32 rx_copybreak; /* copy packets smaller than this */
767780
u16 default_num_queues; /* default num queues to set up */
768781

769-
u16 num_xdp_queues;
770-
struct gve_queue_config tx_cfg;
771-
struct gve_queue_config rx_cfg;
782+
struct gve_tx_queue_config tx_cfg;
783+
struct gve_rx_queue_config rx_cfg;
772784
u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
773785

774786
struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
@@ -838,7 +850,6 @@ struct gve_priv {
838850
struct gve_ptype_lut *ptype_lut_dqo;
839851

840852
/* Must be a power of two. */
841-
u16 data_buffer_size_dqo;
842853
u16 max_rx_buffer_size; /* device limit */
843854

844855
enum gve_queue_format queue_format;
@@ -1041,27 +1052,16 @@ static inline bool gve_is_qpl(struct gve_priv *priv)
10411052
}
10421053

10431054
/* Returns the number of tx queue page lists */
1044-
static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
1045-
int num_xdp_queues,
1055+
static inline u32 gve_num_tx_qpls(const struct gve_tx_queue_config *tx_cfg,
10461056
bool is_qpl)
10471057
{
10481058
if (!is_qpl)
10491059
return 0;
1050-
return tx_cfg->num_queues + num_xdp_queues;
1051-
}
1052-
1053-
/* Returns the number of XDP tx queue page lists
1054-
*/
1055-
static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
1056-
{
1057-
if (priv->queue_format != GVE_GQI_QPL_FORMAT)
1058-
return 0;
1059-
1060-
return priv->num_xdp_queues;
1060+
return tx_cfg->num_queues + tx_cfg->num_xdp_queues;
10611061
}
10621062

10631063
/* Returns the number of rx queue page lists */
1064-
static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
1064+
static inline u32 gve_num_rx_qpls(const struct gve_rx_queue_config *rx_cfg,
10651065
bool is_qpl)
10661066
{
10671067
if (!is_qpl)
@@ -1079,7 +1079,8 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
10791079
return priv->tx_cfg.max_queues + rx_qid;
10801080
}
10811081

1082-
static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
1082+
static inline u32 gve_get_rx_qpl_id(const struct gve_tx_queue_config *tx_cfg,
1083+
int rx_qid)
10831084
{
10841085
return tx_cfg->max_queues + rx_qid;
10851086
}
@@ -1089,7 +1090,7 @@ static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
10891090
return gve_tx_qpl_id(priv, 0);
10901091
}
10911092

1092-
static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
1093+
static inline u32 gve_rx_start_qpl_id(const struct gve_tx_queue_config *tx_cfg)
10931094
{
10941095
return gve_get_rx_qpl_id(tx_cfg, 0);
10951096
}
@@ -1120,7 +1121,7 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
11201121

11211122
static inline u32 gve_num_tx_queues(struct gve_priv *priv)
11221123
{
1123-
return priv->tx_cfg.num_queues + priv->num_xdp_queues;
1124+
return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues;
11241125
}
11251126

11261127
static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
@@ -1224,7 +1225,8 @@ void gve_free_buffer(struct gve_rx_ring *rx,
12241225
struct gve_rx_buf_state_dqo *buf_state);
12251226
int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
12261227
struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
1227-
struct gve_rx_ring *rx);
1228+
struct gve_rx_ring *rx,
1229+
bool xdp);
12281230

12291231
/* Reset */
12301232
void gve_schedule_reset(struct gve_priv *priv);
@@ -1236,8 +1238,8 @@ int gve_adjust_config(struct gve_priv *priv,
12361238
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
12371239
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
12381240
int gve_adjust_queues(struct gve_priv *priv,
1239-
struct gve_queue_config new_rx_config,
1240-
struct gve_queue_config new_tx_config,
1241+
struct gve_rx_queue_config new_rx_config,
1242+
struct gve_tx_queue_config new_tx_config,
12411243
bool reset_rss);
12421244
/* flow steering rule */
12431245
int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);

drivers/net/ethernet/google/gve/gve_adminq.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -731,6 +731,7 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
731731
.ntfy_id = cpu_to_be32(rx->ntfy_id),
732732
.queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
733733
.rx_ring_size = cpu_to_be16(priv->rx_desc_cnt),
734+
.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size),
734735
};
735736

736737
if (gve_is_gqi(priv)) {
@@ -743,7 +744,6 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
743744
cpu_to_be64(rx->data.data_bus);
744745
cmd->create_rx_queue.index = cpu_to_be32(queue_index);
745746
cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
746-
cmd->create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
747747
} else {
748748
u32 qpl_id = 0;
749749

@@ -756,8 +756,6 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
756756
cpu_to_be64(rx->dqo.complq.bus);
757757
cmd->create_rx_queue.rx_data_ring_addr =
758758
cpu_to_be64(rx->dqo.bufq.bus);
759-
cmd->create_rx_queue.packet_buffer_size =
760-
cpu_to_be16(priv->data_buffer_size_dqo);
761759
cmd->create_rx_queue.rx_buff_ring_size =
762760
cpu_to_be16(priv->rx_desc_cnt);
763761
cmd->create_rx_queue.enable_rsc =

drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,8 @@ int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
139139
buf_state->page_info.page_offset = 0;
140140
buf_state->page_info.page_address =
141141
page_address(buf_state->page_info.page);
142-
buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
142+
buf_state->page_info.buf_size = rx->packet_buffer_truesize;
143+
buf_state->page_info.pad = rx->rx_headroom;
143144
buf_state->last_single_ref_offset = 0;
144145

145146
/* The page already has 1 ref. */
@@ -162,7 +163,7 @@ void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state)
162163
void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
163164
struct gve_rx_buf_state_dqo *buf_state)
164165
{
165-
const u16 data_buffer_size = priv->data_buffer_size_dqo;
166+
const u16 data_buffer_size = rx->packet_buffer_truesize;
166167
int pagecount;
167168

168169
/* Can't reuse if we only fit one buffer per page */
@@ -217,10 +218,9 @@ void gve_free_to_page_pool(struct gve_rx_ring *rx,
217218
static int gve_alloc_from_page_pool(struct gve_rx_ring *rx,
218219
struct gve_rx_buf_state_dqo *buf_state)
219220
{
220-
struct gve_priv *priv = rx->gve;
221221
netmem_ref netmem;
222222

223-
buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
223+
buf_state->page_info.buf_size = rx->packet_buffer_truesize;
224224
netmem = page_pool_alloc_netmem(rx->dqo.page_pool,
225225
&buf_state->page_info.page_offset,
226226
&buf_state->page_info.buf_size,
@@ -232,12 +232,14 @@ static int gve_alloc_from_page_pool(struct gve_rx_ring *rx,
232232
buf_state->page_info.netmem = netmem;
233233
buf_state->page_info.page_address = netmem_address(netmem);
234234
buf_state->addr = page_pool_get_dma_addr_netmem(netmem);
235+
buf_state->page_info.pad = rx->dqo.page_pool->p.offset;
235236

236237
return 0;
237238
}
238239

239240
struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
240-
struct gve_rx_ring *rx)
241+
struct gve_rx_ring *rx,
242+
bool xdp)
241243
{
242244
u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num);
243245
struct page_pool_params pp = {
@@ -248,7 +250,8 @@ struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
248250
.netdev = priv->dev,
249251
.napi = &priv->ntfy_blocks[ntfy_id].napi,
250252
.max_len = PAGE_SIZE,
251-
.dma_dir = DMA_FROM_DEVICE,
253+
.dma_dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
254+
.offset = xdp ? XDP_PACKET_HEADROOM : 0,
252255
};
253256

254257
return page_pool_create(&pp);
@@ -302,7 +305,8 @@ int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc)
302305
}
303306
desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
304307
desc->buf_addr = cpu_to_le64(buf_state->addr +
305-
buf_state->page_info.page_offset);
308+
buf_state->page_info.page_offset +
309+
buf_state->page_info.pad);
306310

307311
return 0;
308312

drivers/net/ethernet/google/gve/gve_ethtool.c

Lines changed: 17 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -63,8 +63,8 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
6363
static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
6464
"tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
6565
"tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
66-
"tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]",
67-
"tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
66+
"tx_dma_mapping_error[%u]",
67+
"tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
6868
};
6969

7070
static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
@@ -417,9 +417,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
417417
data[i++] = value;
418418
}
419419
}
420-
/* XDP xsk counters */
421-
data[i++] = tx->xdp_xsk_wakeup;
422-
data[i++] = tx->xdp_xsk_done;
420+
/* XDP counters */
423421
do {
424422
start = u64_stats_fetch_begin(&priv->tx[ring].statss);
425423
data[i] = tx->xdp_xsk_sent;
@@ -477,8 +475,8 @@ static int gve_set_channels(struct net_device *netdev,
477475
struct ethtool_channels *cmd)
478476
{
479477
struct gve_priv *priv = netdev_priv(netdev);
480-
struct gve_queue_config new_tx_cfg = priv->tx_cfg;
481-
struct gve_queue_config new_rx_cfg = priv->rx_cfg;
478+
struct gve_tx_queue_config new_tx_cfg = priv->tx_cfg;
479+
struct gve_rx_queue_config new_rx_cfg = priv->rx_cfg;
482480
struct ethtool_channels old_settings;
483481
int new_tx = cmd->tx_count;
484482
int new_rx = cmd->rx_count;
@@ -493,10 +491,17 @@ static int gve_set_channels(struct net_device *netdev,
493491
if (!new_rx || !new_tx)
494492
return -EINVAL;
495493

496-
if (priv->num_xdp_queues &&
497-
(new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) {
498-
dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
499-
return -EINVAL;
494+
if (priv->xdp_prog) {
495+
if (new_tx != new_rx ||
496+
(2 * new_tx > priv->tx_cfg.max_queues)) {
497+
dev_err(&priv->pdev->dev, "The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues when XDP program is installed");
498+
return -EINVAL;
499+
}
500+
501+
/* One XDP TX queue per RX queue. */
502+
new_tx_cfg.num_xdp_queues = new_rx;
503+
} else {
504+
new_tx_cfg.num_xdp_queues = 0;
500505
}
501506

502507
if (new_rx != priv->rx_cfg.num_queues &&
@@ -642,8 +647,7 @@ static int gve_set_tunable(struct net_device *netdev,
642647
switch (etuna->id) {
643648
case ETHTOOL_RX_COPYBREAK:
644649
{
645-
u32 max_copybreak = gve_is_gqi(priv) ?
646-
GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo;
650+
u32 max_copybreak = priv->rx_cfg.packet_buffer_size;
647651

648652
len = *(u32 *)value;
649653
if (len > max_copybreak)

0 commit comments

Comments
 (0)