@@ -339,6 +339,10 @@ static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
339
339
static void enic_wq_free_buf (struct vnic_wq * wq ,
340
340
struct cq_desc * cq_desc , struct vnic_wq_buf * buf , void * opaque )
341
341
{
342
+ struct enic * enic = vnic_dev_priv (wq -> vdev );
343
+
344
+ enic -> wq_stats [wq -> index ].cq_work ++ ;
345
+ enic -> wq_stats [wq -> index ].cq_bytes += buf -> len ;
342
346
enic_free_wq_buf (wq , buf );
343
347
}
344
348
@@ -355,8 +359,10 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
355
359
356
360
if (netif_tx_queue_stopped (netdev_get_tx_queue (enic -> netdev , q_number )) &&
357
361
vnic_wq_desc_avail (& enic -> wq [q_number ]) >=
358
- (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS ))
362
+ (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS )) {
359
363
netif_wake_subqueue (enic -> netdev , q_number );
364
+ enic -> wq_stats [q_number ].wake ++ ;
365
+ }
360
366
361
367
spin_unlock (& enic -> wq_lock [q_number ]);
362
368
@@ -590,6 +596,11 @@ static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
590
596
if (!eop )
591
597
err = enic_queue_wq_skb_cont (enic , wq , skb , len_left , loopback );
592
598
599
+ /* The enic_queue_wq_desc() above does not do HW checksum */
600
+ enic -> wq_stats [wq -> index ].csum_none ++ ;
601
+ enic -> wq_stats [wq -> index ].packets ++ ;
602
+ enic -> wq_stats [wq -> index ].bytes += skb -> len ;
603
+
593
604
return err ;
594
605
}
595
606
@@ -622,6 +633,10 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
622
633
if (!eop )
623
634
err = enic_queue_wq_skb_cont (enic , wq , skb , len_left , loopback );
624
635
636
+ enic -> wq_stats [wq -> index ].csum_partial ++ ;
637
+ enic -> wq_stats [wq -> index ].packets ++ ;
638
+ enic -> wq_stats [wq -> index ].bytes += skb -> len ;
639
+
625
640
return err ;
626
641
}
627
642
@@ -676,15 +691,18 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
676
691
unsigned int offset = 0 ;
677
692
unsigned int hdr_len ;
678
693
dma_addr_t dma_addr ;
694
+ unsigned int pkts ;
679
695
unsigned int len ;
680
696
skb_frag_t * frag ;
681
697
682
698
if (skb -> encapsulation ) {
683
699
hdr_len = skb_inner_tcp_all_headers (skb );
684
700
enic_preload_tcp_csum_encap (skb );
701
+ enic -> wq_stats [wq -> index ].encap_tso ++ ;
685
702
} else {
686
703
hdr_len = skb_tcp_all_headers (skb );
687
704
enic_preload_tcp_csum (skb );
705
+ enic -> wq_stats [wq -> index ].tso ++ ;
688
706
}
689
707
690
708
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
@@ -705,7 +723,7 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
705
723
}
706
724
707
725
if (eop )
708
- return 0 ;
726
+ goto tso_out_stats ;
709
727
710
728
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
711
729
* for additional data fragments
@@ -732,6 +750,15 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
732
750
}
733
751
}
734
752
753
+ tso_out_stats :
754
+ /* calculate how many packets tso sent */
755
+ len = skb -> len - hdr_len ;
756
+ pkts = len / mss ;
757
+ if ((len % mss ) > 0 )
758
+ pkts ++ ;
759
+ enic -> wq_stats [wq -> index ].packets += pkts ;
760
+ enic -> wq_stats [wq -> index ].bytes += (len + (pkts * hdr_len ));
761
+
735
762
return 0 ;
736
763
}
737
764
@@ -764,6 +791,10 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
764
791
if (!eop )
765
792
err = enic_queue_wq_skb_cont (enic , wq , skb , len_left , loopback );
766
793
794
+ enic -> wq_stats [wq -> index ].encap_csum ++ ;
795
+ enic -> wq_stats [wq -> index ].packets ++ ;
796
+ enic -> wq_stats [wq -> index ].bytes += skb -> len ;
797
+
767
798
return err ;
768
799
}
769
800
@@ -780,6 +811,7 @@ static inline int enic_queue_wq_skb(struct enic *enic,
780
811
/* VLAN tag from trunking driver */
781
812
vlan_tag_insert = 1 ;
782
813
vlan_tag = skb_vlan_tag_get (skb );
814
+ enic -> wq_stats [wq -> index ].add_vlan ++ ;
783
815
} else if (enic -> loop_enable ) {
784
816
vlan_tag = enic -> loop_tag ;
785
817
loopback = 1 ;
@@ -792,7 +824,7 @@ static inline int enic_queue_wq_skb(struct enic *enic,
792
824
else if (skb -> encapsulation )
793
825
err = enic_queue_wq_skb_encap (enic , wq , skb , vlan_tag_insert ,
794
826
vlan_tag , loopback );
795
- else if (skb -> ip_summed == CHECKSUM_PARTIAL )
827
+ else if (skb -> ip_summed == CHECKSUM_PARTIAL )
796
828
err = enic_queue_wq_skb_csum_l4 (enic , wq , skb , vlan_tag_insert ,
797
829
vlan_tag , loopback );
798
830
else
@@ -825,13 +857,15 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
825
857
unsigned int txq_map ;
826
858
struct netdev_queue * txq ;
827
859
860
+ txq_map = skb_get_queue_mapping (skb ) % enic -> wq_count ;
861
+ wq = & enic -> wq [txq_map ];
862
+
828
863
if (skb -> len <= 0 ) {
829
864
dev_kfree_skb_any (skb );
865
+ enic -> wq_stats [wq -> index ].null_pkt ++ ;
830
866
return NETDEV_TX_OK ;
831
867
}
832
868
833
- txq_map = skb_get_queue_mapping (skb ) % enic -> wq_count ;
834
- wq = & enic -> wq [txq_map ];
835
869
txq = netdev_get_tx_queue (netdev , txq_map );
836
870
837
871
/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
@@ -843,6 +877,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
843
877
skb_shinfo (skb )-> nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
844
878
skb_linearize (skb )) {
845
879
dev_kfree_skb_any (skb );
880
+ enic -> wq_stats [wq -> index ].skb_linear_fail ++ ;
846
881
return NETDEV_TX_OK ;
847
882
}
848
883
@@ -854,14 +889,17 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
854
889
/* This is a hard error, log it */
855
890
netdev_err (netdev , "BUG! Tx ring full when queue awake!\n" );
856
891
spin_unlock (& enic -> wq_lock [txq_map ]);
892
+ enic -> wq_stats [wq -> index ].desc_full_awake ++ ;
857
893
return NETDEV_TX_BUSY ;
858
894
}
859
895
860
896
if (enic_queue_wq_skb (enic , wq , skb ))
861
897
goto error ;
862
898
863
- if (vnic_wq_desc_avail (wq ) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS )
899
+ if (vnic_wq_desc_avail (wq ) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS ) {
864
900
netif_tx_stop_queue (txq );
901
+ enic -> wq_stats [wq -> index ].stopped ++ ;
902
+ }
865
903
skb_tx_timestamp (skb );
866
904
if (!netdev_xmit_more () || netif_xmit_stopped (txq ))
867
905
vnic_wq_doorbell (wq );
@@ -878,7 +916,10 @@ static void enic_get_stats(struct net_device *netdev,
878
916
{
879
917
struct enic * enic = netdev_priv (netdev );
880
918
struct vnic_stats * stats ;
919
+ u64 pkt_truncated = 0 ;
920
+ u64 bad_fcs = 0 ;
881
921
int err ;
922
+ int i ;
882
923
883
924
err = enic_dev_stats_dump (enic , & stats );
884
925
/* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
@@ -897,8 +938,17 @@ static void enic_get_stats(struct net_device *netdev,
897
938
net_stats -> rx_bytes = stats -> rx .rx_bytes_ok ;
898
939
net_stats -> rx_errors = stats -> rx .rx_errors ;
899
940
net_stats -> multicast = stats -> rx .rx_multicast_frames_ok ;
900
- net_stats -> rx_over_errors = enic -> rq_truncated_pkts ;
901
- net_stats -> rx_crc_errors = enic -> rq_bad_fcs ;
941
+
942
+ for (i = 0 ; i < ENIC_RQ_MAX ; i ++ ) {
943
+ struct enic_rq_stats * rqs = & enic -> rq_stats [i ];
944
+
945
+ if (!enic -> rq -> ctrl )
946
+ break ;
947
+ pkt_truncated += rqs -> pkt_truncated ;
948
+ bad_fcs += rqs -> bad_fcs ;
949
+ }
950
+ net_stats -> rx_over_errors = pkt_truncated ;
951
+ net_stats -> rx_crc_errors = bad_fcs ;
902
952
net_stats -> rx_dropped = stats -> rx .rx_no_bufs + stats -> rx .rx_drop ;
903
953
}
904
954
@@ -1261,8 +1311,10 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
1261
1311
return 0 ;
1262
1312
}
1263
1313
skb = netdev_alloc_skb_ip_align (netdev , len );
1264
- if (!skb )
1314
+ if (!skb ) {
1315
+ enic -> rq_stats [rq -> index ].no_skb ++ ;
1265
1316
return - ENOMEM ;
1317
+ }
1266
1318
1267
1319
dma_addr = dma_map_single (& enic -> pdev -> dev , skb -> data , len ,
1268
1320
DMA_FROM_DEVICE );
@@ -1313,6 +1365,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1313
1365
struct net_device * netdev = enic -> netdev ;
1314
1366
struct sk_buff * skb ;
1315
1367
struct vnic_cq * cq = & enic -> cq [enic_cq_rq (enic , rq -> index )];
1368
+ struct enic_rq_stats * rqstats = & enic -> rq_stats [rq -> index ];
1316
1369
1317
1370
u8 type , color , eop , sop , ingress_port , vlan_stripped ;
1318
1371
u8 fcoe , fcoe_sof , fcoe_fc_crc_ok , fcoe_enc_error , fcoe_eof ;
@@ -1323,8 +1376,11 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1323
1376
u32 rss_hash ;
1324
1377
bool outer_csum_ok = true, encap = false;
1325
1378
1326
- if (skipped )
1379
+ rqstats -> packets ++ ;
1380
+ if (skipped ) {
1381
+ rqstats -> desc_skip ++ ;
1327
1382
return ;
1383
+ }
1328
1384
1329
1385
skb = buf -> os_buf ;
1330
1386
@@ -1342,9 +1398,9 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1342
1398
1343
1399
if (!fcs_ok ) {
1344
1400
if (bytes_written > 0 )
1345
- enic -> rq_bad_fcs ++ ;
1401
+ rqstats -> bad_fcs ++ ;
1346
1402
else if (bytes_written == 0 )
1347
- enic -> rq_truncated_pkts ++ ;
1403
+ rqstats -> pkt_truncated ++ ;
1348
1404
}
1349
1405
1350
1406
dma_unmap_single (& enic -> pdev -> dev , buf -> dma_addr , buf -> len ,
@@ -1359,7 +1415,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1359
1415
1360
1416
/* Good receive
1361
1417
*/
1362
-
1418
+ rqstats -> bytes += bytes_written ;
1363
1419
if (!enic_rxcopybreak (netdev , & skb , buf , bytes_written )) {
1364
1420
buf -> os_buf = NULL ;
1365
1421
dma_unmap_single (& enic -> pdev -> dev , buf -> dma_addr ,
@@ -1377,11 +1433,13 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1377
1433
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 :
1378
1434
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX :
1379
1435
skb_set_hash (skb , rss_hash , PKT_HASH_TYPE_L4 );
1436
+ rqstats -> l4_rss_hash ++ ;
1380
1437
break ;
1381
1438
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 :
1382
1439
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 :
1383
1440
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX :
1384
1441
skb_set_hash (skb , rss_hash , PKT_HASH_TYPE_L3 );
1442
+ rqstats -> l3_rss_hash ++ ;
1385
1443
break ;
1386
1444
}
1387
1445
}
@@ -1418,11 +1476,16 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1418
1476
(ipv4_csum_ok || ipv6 )) {
1419
1477
skb -> ip_summed = CHECKSUM_UNNECESSARY ;
1420
1478
skb -> csum_level = encap ;
1479
+ if (encap )
1480
+ rqstats -> csum_unnecessary_encap ++ ;
1481
+ else
1482
+ rqstats -> csum_unnecessary ++ ;
1421
1483
}
1422
1484
1423
- if (vlan_stripped )
1485
+ if (vlan_stripped ) {
1424
1486
__vlan_hwaccel_put_tag (skb , htons (ETH_P_8021Q ), vlan_tci );
1425
-
1487
+ rqstats -> vlan_stripped ++ ;
1488
+ }
1426
1489
skb_mark_napi_id (skb , & enic -> napi [rq -> index ]);
1427
1490
if (!(netdev -> features & NETIF_F_GRO ))
1428
1491
netif_receive_skb (skb );
@@ -1435,7 +1498,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1435
1498
1436
1499
/* Buffer overflow
1437
1500
*/
1438
-
1501
+ rqstats -> pkt_truncated ++ ;
1439
1502
dma_unmap_single (& enic -> pdev -> dev , buf -> dma_addr , buf -> len ,
1440
1503
DMA_FROM_DEVICE );
1441
1504
dev_kfree_skb_any (skb );
@@ -1568,6 +1631,9 @@ static int enic_poll(struct napi_struct *napi, int budget)
1568
1631
if (enic -> rx_coalesce_setting .use_adaptive_rx_coalesce )
1569
1632
enic_set_int_moderation (enic , & enic -> rq [0 ]);
1570
1633
vnic_intr_unmask (& enic -> intr [intr ]);
1634
+ enic -> rq_stats [0 ].napi_complete ++ ;
1635
+ } else {
1636
+ enic -> rq_stats [0 ].napi_repoll ++ ;
1571
1637
}
1572
1638
1573
1639
return rq_work_done ;
@@ -1693,6 +1759,9 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
1693
1759
if (enic -> rx_coalesce_setting .use_adaptive_rx_coalesce )
1694
1760
enic_set_int_moderation (enic , & enic -> rq [rq ]);
1695
1761
vnic_intr_unmask (& enic -> intr [intr ]);
1762
+ enic -> rq_stats [rq ].napi_complete ++ ;
1763
+ } else {
1764
+ enic -> rq_stats [rq ].napi_repoll ++ ;
1696
1765
}
1697
1766
1698
1767
return work_done ;
0 commit comments