Skip to content

Commit f3f9150

Browse files
nrescobarkuba-moo
authored andcommitted
enic: Collect per queue statistics
Collect and per rq/wq statistics. Signed-off-by: Nelson Escobar <[email protected]> Signed-off-by: John Daley <[email protected]> Signed-off-by: Satish Kharat <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent a59571a commit f3f9150

File tree

2 files changed

+121
-18
lines changed

2 files changed

+121
-18
lines changed

drivers/net/ethernet/cisco/enic/enic.h

Lines changed: 36 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -128,6 +128,40 @@ struct vxlan_offload {
128128
u8 flags;
129129
};
130130

131+
struct enic_wq_stats {
132+
u64 packets; /* pkts queued for Tx */
133+
u64 stopped; /* Tx ring almost full, queue stopped */
134+
u64 wake; /* Tx ring no longer full, queue woken up*/
135+
u64 tso; /* non-encap tso pkt */
136+
u64 encap_tso; /* encap tso pkt */
137+
u64 encap_csum; /* encap HW csum */
138+
u64 csum_partial; /* skb->ip_summed = CHECKSUM_PARTIAL */
139+
u64 csum_none; /* HW csum not required */
140+
u64 bytes; /* bytes queued for Tx */
141+
u64 add_vlan; /* HW adds vlan tag */
142+
u64 cq_work; /* Tx completions processed */
143+
u64 cq_bytes; /* Tx bytes processed */
144+
u64 null_pkt; /* skb length <= 0 */
145+
u64 skb_linear_fail; /* linearize failures */
146+
u64 desc_full_awake; /* TX ring full while queue awake */
147+
};
148+
149+
struct enic_rq_stats {
150+
u64 packets; /* pkts received */
151+
u64 bytes; /* bytes received */
152+
u64 l4_rss_hash; /* hashed on l4 */
153+
u64 l3_rss_hash; /* hashed on l3 */
154+
u64 csum_unnecessary; /* HW verified csum */
155+
u64 csum_unnecessary_encap; /* HW verified csum on encap packet */
156+
u64 vlan_stripped; /* HW stripped vlan */
157+
u64 napi_complete; /* napi complete intr reenabled */
158+
u64 napi_repoll; /* napi poll again */
159+
u64 bad_fcs; /* bad pkts */
160+
u64 pkt_truncated; /* truncated pkts */
161+
u64 no_skb; /* out of skbs */
162+
u64 desc_skip; /* Rx pkt went into later buffer */
163+
};
164+
131165
/* Per-instance private data structure */
132166
struct enic {
133167
struct net_device *netdev;
@@ -162,16 +196,16 @@ struct enic {
162196
/* work queue cache line section */
163197
____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
164198
spinlock_t wq_lock[ENIC_WQ_MAX];
199+
struct enic_wq_stats wq_stats[ENIC_WQ_MAX];
165200
unsigned int wq_count;
166201
u16 loop_enable;
167202
u16 loop_tag;
168203

169204
/* receive queue cache line section */
170205
____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
206+
struct enic_rq_stats rq_stats[ENIC_RQ_MAX];
171207
unsigned int rq_count;
172208
struct vxlan_offload vxlan;
173-
u64 rq_truncated_pkts;
174-
u64 rq_bad_fcs;
175209
struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX];
176210

177211
/* interrupt resource cache line section */

drivers/net/ethernet/cisco/enic/enic_main.c

Lines changed: 85 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -339,6 +339,10 @@ static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
339339
static void enic_wq_free_buf(struct vnic_wq *wq,
340340
struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
341341
{
342+
struct enic *enic = vnic_dev_priv(wq->vdev);
343+
344+
enic->wq_stats[wq->index].cq_work++;
345+
enic->wq_stats[wq->index].cq_bytes += buf->len;
342346
enic_free_wq_buf(wq, buf);
343347
}
344348

@@ -355,8 +359,10 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
355359

356360
if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
357361
vnic_wq_desc_avail(&enic->wq[q_number]) >=
358-
(MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
362+
(MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
359363
netif_wake_subqueue(enic->netdev, q_number);
364+
enic->wq_stats[q_number].wake++;
365+
}
360366

361367
spin_unlock(&enic->wq_lock[q_number]);
362368

@@ -590,6 +596,11 @@ static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
590596
if (!eop)
591597
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
592598

599+
/* The enic_queue_wq_desc() above does not do HW checksum */
600+
enic->wq_stats[wq->index].csum_none++;
601+
enic->wq_stats[wq->index].packets++;
602+
enic->wq_stats[wq->index].bytes += skb->len;
603+
593604
return err;
594605
}
595606

@@ -622,6 +633,10 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
622633
if (!eop)
623634
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
624635

636+
enic->wq_stats[wq->index].csum_partial++;
637+
enic->wq_stats[wq->index].packets++;
638+
enic->wq_stats[wq->index].bytes += skb->len;
639+
625640
return err;
626641
}
627642

@@ -676,15 +691,18 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
676691
unsigned int offset = 0;
677692
unsigned int hdr_len;
678693
dma_addr_t dma_addr;
694+
unsigned int pkts;
679695
unsigned int len;
680696
skb_frag_t *frag;
681697

682698
if (skb->encapsulation) {
683699
hdr_len = skb_inner_tcp_all_headers(skb);
684700
enic_preload_tcp_csum_encap(skb);
701+
enic->wq_stats[wq->index].encap_tso++;
685702
} else {
686703
hdr_len = skb_tcp_all_headers(skb);
687704
enic_preload_tcp_csum(skb);
705+
enic->wq_stats[wq->index].tso++;
688706
}
689707

690708
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
@@ -705,7 +723,7 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
705723
}
706724

707725
if (eop)
708-
return 0;
726+
goto tso_out_stats;
709727

710728
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
711729
* for additional data fragments
@@ -732,6 +750,15 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
732750
}
733751
}
734752

753+
tso_out_stats:
754+
/* calculate how many packets tso sent */
755+
len = skb->len - hdr_len;
756+
pkts = len / mss;
757+
if ((len % mss) > 0)
758+
pkts++;
759+
enic->wq_stats[wq->index].packets += pkts;
760+
enic->wq_stats[wq->index].bytes += (len + (pkts * hdr_len));
761+
735762
return 0;
736763
}
737764

@@ -764,6 +791,10 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
764791
if (!eop)
765792
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
766793

794+
enic->wq_stats[wq->index].encap_csum++;
795+
enic->wq_stats[wq->index].packets++;
796+
enic->wq_stats[wq->index].bytes += skb->len;
797+
767798
return err;
768799
}
769800

@@ -780,6 +811,7 @@ static inline int enic_queue_wq_skb(struct enic *enic,
780811
/* VLAN tag from trunking driver */
781812
vlan_tag_insert = 1;
782813
vlan_tag = skb_vlan_tag_get(skb);
814+
enic->wq_stats[wq->index].add_vlan++;
783815
} else if (enic->loop_enable) {
784816
vlan_tag = enic->loop_tag;
785817
loopback = 1;
@@ -792,7 +824,7 @@ static inline int enic_queue_wq_skb(struct enic *enic,
792824
else if (skb->encapsulation)
793825
err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert,
794826
vlan_tag, loopback);
795-
else if (skb->ip_summed == CHECKSUM_PARTIAL)
827+
else if (skb->ip_summed == CHECKSUM_PARTIAL)
796828
err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
797829
vlan_tag, loopback);
798830
else
@@ -825,13 +857,15 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
825857
unsigned int txq_map;
826858
struct netdev_queue *txq;
827859

860+
txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
861+
wq = &enic->wq[txq_map];
862+
828863
if (skb->len <= 0) {
829864
dev_kfree_skb_any(skb);
865+
enic->wq_stats[wq->index].null_pkt++;
830866
return NETDEV_TX_OK;
831867
}
832868

833-
txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
834-
wq = &enic->wq[txq_map];
835869
txq = netdev_get_tx_queue(netdev, txq_map);
836870

837871
/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
@@ -843,6 +877,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
843877
skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
844878
skb_linearize(skb)) {
845879
dev_kfree_skb_any(skb);
880+
enic->wq_stats[wq->index].skb_linear_fail++;
846881
return NETDEV_TX_OK;
847882
}
848883

@@ -854,14 +889,17 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
854889
/* This is a hard error, log it */
855890
netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
856891
spin_unlock(&enic->wq_lock[txq_map]);
892+
enic->wq_stats[wq->index].desc_full_awake++;
857893
return NETDEV_TX_BUSY;
858894
}
859895

860896
if (enic_queue_wq_skb(enic, wq, skb))
861897
goto error;
862898

863-
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
899+
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) {
864900
netif_tx_stop_queue(txq);
901+
enic->wq_stats[wq->index].stopped++;
902+
}
865903
skb_tx_timestamp(skb);
866904
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
867905
vnic_wq_doorbell(wq);
@@ -878,7 +916,10 @@ static void enic_get_stats(struct net_device *netdev,
878916
{
879917
struct enic *enic = netdev_priv(netdev);
880918
struct vnic_stats *stats;
919+
u64 pkt_truncated = 0;
920+
u64 bad_fcs = 0;
881921
int err;
922+
int i;
882923

883924
err = enic_dev_stats_dump(enic, &stats);
884925
/* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
@@ -897,8 +938,17 @@ static void enic_get_stats(struct net_device *netdev,
897938
net_stats->rx_bytes = stats->rx.rx_bytes_ok;
898939
net_stats->rx_errors = stats->rx.rx_errors;
899940
net_stats->multicast = stats->rx.rx_multicast_frames_ok;
900-
net_stats->rx_over_errors = enic->rq_truncated_pkts;
901-
net_stats->rx_crc_errors = enic->rq_bad_fcs;
941+
942+
for (i = 0; i < ENIC_RQ_MAX; i++) {
943+
struct enic_rq_stats *rqs = &enic->rq_stats[i];
944+
945+
if (!enic->rq->ctrl)
946+
break;
947+
pkt_truncated += rqs->pkt_truncated;
948+
bad_fcs += rqs->bad_fcs;
949+
}
950+
net_stats->rx_over_errors = pkt_truncated;
951+
net_stats->rx_crc_errors = bad_fcs;
902952
net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
903953
}
904954

@@ -1261,8 +1311,10 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
12611311
return 0;
12621312
}
12631313
skb = netdev_alloc_skb_ip_align(netdev, len);
1264-
if (!skb)
1314+
if (!skb) {
1315+
enic->rq_stats[rq->index].no_skb++;
12651316
return -ENOMEM;
1317+
}
12661318

12671319
dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
12681320
DMA_FROM_DEVICE);
@@ -1313,6 +1365,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
13131365
struct net_device *netdev = enic->netdev;
13141366
struct sk_buff *skb;
13151367
struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1368+
struct enic_rq_stats *rqstats = &enic->rq_stats[rq->index];
13161369

13171370
u8 type, color, eop, sop, ingress_port, vlan_stripped;
13181371
u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
@@ -1323,8 +1376,11 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
13231376
u32 rss_hash;
13241377
bool outer_csum_ok = true, encap = false;
13251378

1326-
if (skipped)
1379+
rqstats->packets++;
1380+
if (skipped) {
1381+
rqstats->desc_skip++;
13271382
return;
1383+
}
13281384

13291385
skb = buf->os_buf;
13301386

@@ -1342,9 +1398,9 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
13421398

13431399
if (!fcs_ok) {
13441400
if (bytes_written > 0)
1345-
enic->rq_bad_fcs++;
1401+
rqstats->bad_fcs++;
13461402
else if (bytes_written == 0)
1347-
enic->rq_truncated_pkts++;
1403+
rqstats->pkt_truncated++;
13481404
}
13491405

13501406
dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
@@ -1359,7 +1415,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
13591415

13601416
/* Good receive
13611417
*/
1362-
1418+
rqstats->bytes += bytes_written;
13631419
if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
13641420
buf->os_buf = NULL;
13651421
dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
@@ -1377,11 +1433,13 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
13771433
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
13781434
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
13791435
skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
1436+
rqstats->l4_rss_hash++;
13801437
break;
13811438
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
13821439
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
13831440
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
13841441
skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
1442+
rqstats->l3_rss_hash++;
13851443
break;
13861444
}
13871445
}
@@ -1418,11 +1476,16 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
14181476
(ipv4_csum_ok || ipv6)) {
14191477
skb->ip_summed = CHECKSUM_UNNECESSARY;
14201478
skb->csum_level = encap;
1479+
if (encap)
1480+
rqstats->csum_unnecessary_encap++;
1481+
else
1482+
rqstats->csum_unnecessary++;
14211483
}
14221484

1423-
if (vlan_stripped)
1485+
if (vlan_stripped) {
14241486
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1425-
1487+
rqstats->vlan_stripped++;
1488+
}
14261489
skb_mark_napi_id(skb, &enic->napi[rq->index]);
14271490
if (!(netdev->features & NETIF_F_GRO))
14281491
netif_receive_skb(skb);
@@ -1435,7 +1498,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
14351498

14361499
/* Buffer overflow
14371500
*/
1438-
1501+
rqstats->pkt_truncated++;
14391502
dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
14401503
DMA_FROM_DEVICE);
14411504
dev_kfree_skb_any(skb);
@@ -1568,6 +1631,9 @@ static int enic_poll(struct napi_struct *napi, int budget)
15681631
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
15691632
enic_set_int_moderation(enic, &enic->rq[0]);
15701633
vnic_intr_unmask(&enic->intr[intr]);
1634+
enic->rq_stats[0].napi_complete++;
1635+
} else {
1636+
enic->rq_stats[0].napi_repoll++;
15711637
}
15721638

15731639
return rq_work_done;
@@ -1693,6 +1759,9 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
16931759
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
16941760
enic_set_int_moderation(enic, &enic->rq[rq]);
16951761
vnic_intr_unmask(&enic->intr[intr]);
1762+
enic->rq_stats[rq].napi_complete++;
1763+
} else {
1764+
enic->rq_stats[rq].napi_repoll++;
16961765
}
16971766

16981767
return work_done;

0 commit comments

Comments
 (0)