Skip to content

Commit cdc74c9

Browse files
committed
Merge branch 'gve-queue-api'
Shailend Chand says: ==================== gve: Implement queue api Following the discussion on https://patchwork.kernel.org/project/linux-media/patch/[email protected]/, the queue api defined by Mina is implemented for gve. The first patch is just Mina's introduction of the api. The rest of the patches make surgical changes in gve to enable it to work correctly with only a subset of queues present (thus far it had assumed that either all queues are up or all are down). The final patch has the api implementation. Changes since v1: clang warning fixes, kdoc warning fix, and addressed review comments. ==================== Reviewed-by: Willem de Bruijn <[email protected]> Signed-off-by: David S. Miller <[email protected]>
2 parents 173e762 + ee24284 commit cdc74c9

File tree

11 files changed

+489
-406
lines changed

11 files changed

+489
-406
lines changed

drivers/net/ethernet/google/gve/gve.h

Lines changed: 7 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -610,6 +610,7 @@ struct gve_notify_block {
610610
struct gve_priv *priv;
611611
struct gve_tx_ring *tx; /* tx rings on this block */
612612
struct gve_rx_ring *rx; /* rx rings on this block */
613+
u32 irq;
613614
};
614615

615616
/* Tracks allowed and current queue settings */
@@ -637,26 +638,10 @@ struct gve_ptype_lut {
637638
struct gve_ptype ptypes[GVE_NUM_PTYPES];
638639
};
639640

640-
/* Parameters for allocating queue page lists */
641-
struct gve_qpls_alloc_cfg {
642-
struct gve_queue_config *tx_cfg;
643-
struct gve_queue_config *rx_cfg;
644-
645-
u16 num_xdp_queues;
646-
bool raw_addressing;
647-
bool is_gqi;
648-
649-
/* Allocated resources are returned here */
650-
struct gve_queue_page_list *qpls;
651-
};
652-
653641
/* Parameters for allocating resources for tx queues */
654642
struct gve_tx_alloc_rings_cfg {
655643
struct gve_queue_config *qcfg;
656644

657-
/* qpls must already be allocated */
658-
struct gve_queue_page_list *qpls;
659-
660645
u16 ring_size;
661646
u16 start_idx;
662647
u16 num_rings;
@@ -672,9 +657,6 @@ struct gve_rx_alloc_rings_cfg {
672657
struct gve_queue_config *qcfg;
673658
struct gve_queue_config *qcfg_tx;
674659

675-
/* qpls must already be allocated */
676-
struct gve_queue_page_list *qpls;
677-
678660
u16 ring_size;
679661
u16 packet_buffer_size;
680662
bool raw_addressing;
@@ -700,7 +682,6 @@ struct gve_priv {
700682
struct net_device *dev;
701683
struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
702684
struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
703-
struct gve_queue_page_list *qpls; /* array of num qpls */
704685
struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
705686
struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
706687
dma_addr_t irq_db_indices_bus;
@@ -1024,7 +1005,6 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
10241005
return priv->tx_cfg.max_queues + rx_qid;
10251006
}
10261007

1027-
/* Returns the index into priv->qpls where a certain rx queue's QPL resides */
10281008
static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
10291009
{
10301010
return tx_cfg->max_queues + rx_qid;
@@ -1035,7 +1015,6 @@ static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
10351015
return gve_tx_qpl_id(priv, 0);
10361016
}
10371017

1038-
/* Returns the index into priv->qpls where the first rx queue's QPL resides */
10391018
static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
10401019
{
10411020
return gve_get_rx_qpl_id(tx_cfg, 0);
@@ -1089,6 +1068,12 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
10891068
enum dma_data_direction, gfp_t gfp_flags);
10901069
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
10911070
enum dma_data_direction);
1071+
/* qpls */
1072+
struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
1073+
u32 id, int pages);
1074+
void gve_free_queue_page_list(struct gve_priv *priv,
1075+
struct gve_queue_page_list *qpl,
1076+
u32 id);
10921077
/* tx handling */
10931078
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
10941079
int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
@@ -1125,11 +1110,9 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
11251110
void gve_schedule_reset(struct gve_priv *priv);
11261111
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
11271112
void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
1128-
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
11291113
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
11301114
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
11311115
int gve_adjust_config(struct gve_priv *priv,
1132-
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
11331116
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
11341117
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
11351118
int gve_adjust_queues(struct gve_priv *priv,

drivers/net/ethernet/google/gve/gve_adminq.c

Lines changed: 52 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -630,14 +630,15 @@ int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_que
630630
return gve_adminq_kick_and_wait(priv);
631631
}
632632

633-
static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
633+
static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
634+
union gve_adminq_command *cmd,
635+
u32 queue_index)
634636
{
635637
struct gve_rx_ring *rx = &priv->rx[queue_index];
636-
union gve_adminq_command cmd;
637638

638-
memset(&cmd, 0, sizeof(cmd));
639-
cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
640-
cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
639+
memset(cmd, 0, sizeof(*cmd));
640+
cmd->opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
641+
cmd->create_rx_queue = (struct gve_adminq_create_rx_queue) {
641642
.queue_id = cpu_to_be32(queue_index),
642643
.ntfy_id = cpu_to_be32(rx->ntfy_id),
643644
.queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
@@ -648,39 +649,54 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
648649
u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
649650
GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
650651

651-
cmd.create_rx_queue.rx_desc_ring_addr =
652+
cmd->create_rx_queue.rx_desc_ring_addr =
652653
cpu_to_be64(rx->desc.bus),
653-
cmd.create_rx_queue.rx_data_ring_addr =
654+
cmd->create_rx_queue.rx_data_ring_addr =
654655
cpu_to_be64(rx->data.data_bus),
655-
cmd.create_rx_queue.index = cpu_to_be32(queue_index);
656-
cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
657-
cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
656+
cmd->create_rx_queue.index = cpu_to_be32(queue_index);
657+
cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
658+
cmd->create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
658659
} else {
659660
u32 qpl_id = 0;
660661

661662
if (priv->queue_format == GVE_DQO_RDA_FORMAT)
662663
qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
663664
else
664665
qpl_id = rx->dqo.qpl->id;
665-
cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
666-
cmd.create_rx_queue.rx_desc_ring_addr =
666+
cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
667+
cmd->create_rx_queue.rx_desc_ring_addr =
667668
cpu_to_be64(rx->dqo.complq.bus);
668-
cmd.create_rx_queue.rx_data_ring_addr =
669+
cmd->create_rx_queue.rx_data_ring_addr =
669670
cpu_to_be64(rx->dqo.bufq.bus);
670-
cmd.create_rx_queue.packet_buffer_size =
671+
cmd->create_rx_queue.packet_buffer_size =
671672
cpu_to_be16(priv->data_buffer_size_dqo);
672-
cmd.create_rx_queue.rx_buff_ring_size =
673+
cmd->create_rx_queue.rx_buff_ring_size =
673674
cpu_to_be16(priv->rx_desc_cnt);
674-
cmd.create_rx_queue.enable_rsc =
675+
cmd->create_rx_queue.enable_rsc =
675676
!!(priv->dev->features & NETIF_F_LRO);
676677
if (priv->header_split_enabled)
677-
cmd.create_rx_queue.header_buffer_size =
678+
cmd->create_rx_queue.header_buffer_size =
678679
cpu_to_be16(priv->header_buf_size);
679680
}
681+
}
682+
683+
static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
684+
{
685+
union gve_adminq_command cmd;
680686

687+
gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index);
681688
return gve_adminq_issue_cmd(priv, &cmd);
682689
}
683690

691+
/* Unlike gve_adminq_create_rx_queue, this actually rings the doorbell */
692+
int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index)
693+
{
694+
union gve_adminq_command cmd;
695+
696+
gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index);
697+
return gve_adminq_execute_cmd(priv, &cmd);
698+
}
699+
684700
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
685701
{
686702
int err;
@@ -727,22 +743,31 @@ int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_qu
727743
return gve_adminq_kick_and_wait(priv);
728744
}
729745

746+
static void gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command *cmd,
747+
u32 queue_index)
748+
{
749+
memset(cmd, 0, sizeof(*cmd));
750+
cmd->opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
751+
cmd->destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
752+
.queue_id = cpu_to_be32(queue_index),
753+
};
754+
}
755+
730756
static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
731757
{
732758
union gve_adminq_command cmd;
733-
int err;
734759

735-
memset(&cmd, 0, sizeof(cmd));
736-
cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
737-
cmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
738-
.queue_id = cpu_to_be32(queue_index),
739-
};
760+
gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index);
761+
return gve_adminq_issue_cmd(priv, &cmd);
762+
}
740763

741-
err = gve_adminq_issue_cmd(priv, &cmd);
742-
if (err)
743-
return err;
764+
/* Unlike gve_adminq_destroy_rx_queue, this actually rings the doorbell */
765+
int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index)
766+
{
767+
union gve_adminq_command cmd;
744768

745-
return 0;
769+
gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index);
770+
return gve_adminq_execute_cmd(priv, &cmd);
746771
}
747772

748773
int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)

drivers/net/ethernet/google/gve/gve_adminq.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -451,7 +451,9 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
451451
int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
452452
int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
453453
int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
454+
int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index);
454455
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
456+
int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index);
455457
int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
456458
int gve_adminq_register_page_list(struct gve_priv *priv,
457459
struct gve_queue_page_list *qpl);

drivers/net/ethernet/google/gve/gve_ethtool.c

Lines changed: 37 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
#include "gve.h"
99
#include "gve_adminq.h"
1010
#include "gve_dqo.h"
11+
#include "gve_utils.h"
1112

1213
static void gve_get_drvinfo(struct net_device *netdev,
1314
struct ethtool_drvinfo *info)
@@ -165,6 +166,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
165166
struct stats *report_stats;
166167
int *rx_qid_to_stats_idx;
167168
int *tx_qid_to_stats_idx;
169+
int num_stopped_rxqs = 0;
170+
int num_stopped_txqs = 0;
168171
struct gve_priv *priv;
169172
bool skip_nic_stats;
170173
unsigned int start;
@@ -181,12 +184,23 @@ gve_get_ethtool_stats(struct net_device *netdev,
181184
sizeof(int), GFP_KERNEL);
182185
if (!rx_qid_to_stats_idx)
183186
return;
187+
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
188+
rx_qid_to_stats_idx[ring] = -1;
189+
if (!gve_rx_was_added_to_block(priv, ring))
190+
num_stopped_rxqs++;
191+
}
184192
tx_qid_to_stats_idx = kmalloc_array(num_tx_queues,
185193
sizeof(int), GFP_KERNEL);
186194
if (!tx_qid_to_stats_idx) {
187195
kfree(rx_qid_to_stats_idx);
188196
return;
189197
}
198+
for (ring = 0; ring < num_tx_queues; ring++) {
199+
tx_qid_to_stats_idx[ring] = -1;
200+
if (!gve_tx_was_added_to_block(priv, ring))
201+
num_stopped_txqs++;
202+
}
203+
190204
for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
191205
rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
192206
rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
@@ -260,7 +274,13 @@ gve_get_ethtool_stats(struct net_device *netdev,
260274
/* For rx cross-reporting stats, start from nic rx stats in report */
261275
base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues +
262276
GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
263-
max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
277+
/* The boundary between driver stats and NIC stats shifts if there are
278+
* stopped queues.
279+
*/
280+
base_stats_idx += NIC_RX_STATS_REPORT_NUM * num_stopped_rxqs +
281+
NIC_TX_STATS_REPORT_NUM * num_stopped_txqs;
282+
max_stats_idx = NIC_RX_STATS_REPORT_NUM *
283+
(priv->rx_cfg.num_queues - num_stopped_rxqs) +
264284
base_stats_idx;
265285
/* Preprocess the stats report for rx, map queue id to start index */
266286
skip_nic_stats = false;
@@ -274,6 +294,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
274294
skip_nic_stats = true;
275295
break;
276296
}
297+
if (queue_id < 0 || queue_id >= priv->rx_cfg.num_queues) {
298+
net_err_ratelimited("Invalid rxq id in NIC stats\n");
299+
continue;
300+
}
277301
rx_qid_to_stats_idx[queue_id] = stats_idx;
278302
}
279303
/* walk RX rings */
@@ -308,11 +332,11 @@ gve_get_ethtool_stats(struct net_device *netdev,
308332
data[i++] = rx->rx_copybreak_pkt;
309333
data[i++] = rx->rx_copied_pkt;
310334
/* stats from NIC */
311-
if (skip_nic_stats) {
335+
stats_idx = rx_qid_to_stats_idx[ring];
336+
if (skip_nic_stats || stats_idx < 0) {
312337
/* skip NIC rx stats */
313338
i += NIC_RX_STATS_REPORT_NUM;
314339
} else {
315-
stats_idx = rx_qid_to_stats_idx[ring];
316340
for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
317341
u64 value =
318342
be64_to_cpu(report_stats[stats_idx + j].value);
@@ -338,7 +362,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
338362

339363
/* For tx cross-reporting stats, start from nic tx stats in report */
340364
base_stats_idx = max_stats_idx;
341-
max_stats_idx = NIC_TX_STATS_REPORT_NUM * num_tx_queues +
365+
max_stats_idx = NIC_TX_STATS_REPORT_NUM *
366+
(num_tx_queues - num_stopped_txqs) +
342367
max_stats_idx;
343368
/* Preprocess the stats report for tx, map queue id to start index */
344369
skip_nic_stats = false;
@@ -352,6 +377,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
352377
skip_nic_stats = true;
353378
break;
354379
}
380+
if (queue_id < 0 || queue_id >= num_tx_queues) {
381+
net_err_ratelimited("Invalid txq id in NIC stats\n");
382+
continue;
383+
}
355384
tx_qid_to_stats_idx[queue_id] = stats_idx;
356385
}
357386
/* walk TX rings */
@@ -383,11 +412,11 @@ gve_get_ethtool_stats(struct net_device *netdev,
383412
data[i++] = gve_tx_load_event_counter(priv, tx);
384413
data[i++] = tx->dma_mapping_error;
385414
/* stats from NIC */
386-
if (skip_nic_stats) {
415+
stats_idx = tx_qid_to_stats_idx[ring];
416+
if (skip_nic_stats || stats_idx < 0) {
387417
/* skip NIC tx stats */
388418
i += NIC_TX_STATS_REPORT_NUM;
389419
} else {
390-
stats_idx = tx_qid_to_stats_idx[ring];
391420
for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
392421
u64 value =
393422
be64_to_cpu(report_stats[stats_idx + j].value);
@@ -509,20 +538,17 @@ static int gve_adjust_ring_sizes(struct gve_priv *priv,
509538
{
510539
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
511540
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
512-
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
513541
int err;
514542

515543
/* get current queue configuration */
516-
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
517-
&tx_alloc_cfg, &rx_alloc_cfg);
544+
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
518545

519546
/* copy over the new ring_size from ethtool */
520547
tx_alloc_cfg.ring_size = new_tx_desc_cnt;
521548
rx_alloc_cfg.ring_size = new_rx_desc_cnt;
522549

523550
if (netif_running(priv->dev)) {
524-
err = gve_adjust_config(priv, &qpls_alloc_cfg,
525-
&tx_alloc_cfg, &rx_alloc_cfg);
551+
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
526552
if (err)
527553
return err;
528554
}

0 commit comments

Comments
 (0)