@@ -81,24 +81,24 @@ struct virtnet_stat_desc {
81
81
82
82
struct virtnet_sq_stats {
83
83
struct u64_stats_sync syncp ;
84
- u64 packets ;
85
- u64 bytes ;
86
- u64 xdp_tx ;
87
- u64 xdp_tx_drops ;
88
- u64 kicks ;
89
- u64 tx_timeouts ;
84
+ u64_stats_t packets ;
85
+ u64_stats_t bytes ;
86
+ u64_stats_t xdp_tx ;
87
+ u64_stats_t xdp_tx_drops ;
88
+ u64_stats_t kicks ;
89
+ u64_stats_t tx_timeouts ;
90
90
};
91
91
92
92
struct virtnet_rq_stats {
93
93
struct u64_stats_sync syncp ;
94
- u64 packets ;
95
- u64 bytes ;
96
- u64 drops ;
97
- u64 xdp_packets ;
98
- u64 xdp_tx ;
99
- u64 xdp_redirects ;
100
- u64 xdp_drops ;
101
- u64 kicks ;
94
+ u64_stats_t packets ;
95
+ u64_stats_t bytes ;
96
+ u64_stats_t drops ;
97
+ u64_stats_t xdp_packets ;
98
+ u64_stats_t xdp_tx ;
99
+ u64_stats_t xdp_redirects ;
100
+ u64_stats_t xdp_drops ;
101
+ u64_stats_t kicks ;
102
102
};
103
103
104
104
#define VIRTNET_SQ_STAT (m ) offsetof(struct virtnet_sq_stats, m)
@@ -775,8 +775,8 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
775
775
return ;
776
776
777
777
u64_stats_update_begin (& sq -> stats .syncp );
778
- sq -> stats .bytes += bytes ;
779
- sq -> stats .packets += packets ;
778
+ u64_stats_add ( & sq -> stats .bytes , bytes ) ;
779
+ u64_stats_add ( & sq -> stats .packets , packets ) ;
780
780
u64_stats_update_end (& sq -> stats .syncp );
781
781
}
782
782
@@ -975,11 +975,11 @@ static int virtnet_xdp_xmit(struct net_device *dev,
975
975
}
976
976
out :
977
977
u64_stats_update_begin (& sq -> stats .syncp );
978
- sq -> stats .bytes += bytes ;
979
- sq -> stats .packets += packets ;
980
- sq -> stats .xdp_tx += n ;
981
- sq -> stats .xdp_tx_drops += n - nxmit ;
982
- sq -> stats .kicks += kicks ;
978
+ u64_stats_add ( & sq -> stats .bytes , bytes ) ;
979
+ u64_stats_add ( & sq -> stats .packets , packets ) ;
980
+ u64_stats_add ( & sq -> stats .xdp_tx , n ) ;
981
+ u64_stats_add ( & sq -> stats .xdp_tx_drops , n - nxmit ) ;
982
+ u64_stats_add ( & sq -> stats .kicks , kicks ) ;
983
983
u64_stats_update_end (& sq -> stats .syncp );
984
984
985
985
virtnet_xdp_put_sq (vi , sq );
@@ -1011,14 +1011,14 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
1011
1011
u32 act ;
1012
1012
1013
1013
act = bpf_prog_run_xdp (xdp_prog , xdp );
1014
- stats -> xdp_packets ++ ;
1014
+ u64_stats_inc ( & stats -> xdp_packets ) ;
1015
1015
1016
1016
switch (act ) {
1017
1017
case XDP_PASS :
1018
1018
return act ;
1019
1019
1020
1020
case XDP_TX :
1021
- stats -> xdp_tx ++ ;
1021
+ u64_stats_inc ( & stats -> xdp_tx ) ;
1022
1022
xdpf = xdp_convert_buff_to_frame (xdp );
1023
1023
if (unlikely (!xdpf )) {
1024
1024
netdev_dbg (dev , "convert buff to frame failed for xdp\n" );
@@ -1036,7 +1036,7 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
1036
1036
return act ;
1037
1037
1038
1038
case XDP_REDIRECT :
1039
- stats -> xdp_redirects ++ ;
1039
+ u64_stats_inc ( & stats -> xdp_redirects ) ;
1040
1040
err = xdp_do_redirect (dev , xdp , xdp_prog );
1041
1041
if (err )
1042
1042
return XDP_DROP ;
@@ -1232,9 +1232,9 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev,
1232
1232
return skb ;
1233
1233
1234
1234
err_xdp :
1235
- stats -> xdp_drops ++ ;
1235
+ u64_stats_inc ( & stats -> xdp_drops ) ;
1236
1236
err :
1237
- stats -> drops ++ ;
1237
+ u64_stats_inc ( & stats -> drops ) ;
1238
1238
put_page (page );
1239
1239
xdp_xmit :
1240
1240
return NULL ;
@@ -1253,7 +1253,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
1253
1253
struct sk_buff * skb ;
1254
1254
1255
1255
len -= vi -> hdr_len ;
1256
- stats -> bytes += len ;
1256
+ u64_stats_add ( & stats -> bytes , len ) ;
1257
1257
1258
1258
if (unlikely (len > GOOD_PACKET_LEN )) {
1259
1259
pr_debug ("%s: rx error: len %u exceeds max size %d\n" ,
@@ -1282,7 +1282,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
1282
1282
return skb ;
1283
1283
1284
1284
err :
1285
- stats -> drops ++ ;
1285
+ u64_stats_inc ( & stats -> drops ) ;
1286
1286
put_page (page );
1287
1287
return NULL ;
1288
1288
}
@@ -1298,14 +1298,14 @@ static struct sk_buff *receive_big(struct net_device *dev,
1298
1298
struct sk_buff * skb =
1299
1299
page_to_skb (vi , rq , page , 0 , len , PAGE_SIZE , 0 );
1300
1300
1301
- stats -> bytes += len - vi -> hdr_len ;
1301
+ u64_stats_add ( & stats -> bytes , len - vi -> hdr_len ) ;
1302
1302
if (unlikely (!skb ))
1303
1303
goto err ;
1304
1304
1305
1305
return skb ;
1306
1306
1307
1307
err :
1308
- stats -> drops ++ ;
1308
+ u64_stats_inc ( & stats -> drops ) ;
1309
1309
give_pages (rq , page );
1310
1310
return NULL ;
1311
1311
}
@@ -1326,7 +1326,7 @@ static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
1326
1326
DEV_STATS_INC (dev , rx_length_errors );
1327
1327
break ;
1328
1328
}
1329
- stats -> bytes += len ;
1329
+ u64_stats_add ( & stats -> bytes , len ) ;
1330
1330
page = virt_to_head_page (buf );
1331
1331
put_page (page );
1332
1332
}
@@ -1436,7 +1436,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
1436
1436
goto err ;
1437
1437
}
1438
1438
1439
- stats -> bytes += len ;
1439
+ u64_stats_add ( & stats -> bytes , len ) ;
1440
1440
page = virt_to_head_page (buf );
1441
1441
offset = buf - page_address (page );
1442
1442
@@ -1600,8 +1600,8 @@ static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
1600
1600
put_page (page );
1601
1601
mergeable_buf_free (rq , num_buf , dev , stats );
1602
1602
1603
- stats -> xdp_drops ++ ;
1604
- stats -> drops ++ ;
1603
+ u64_stats_inc ( & stats -> xdp_drops ) ;
1604
+ u64_stats_inc ( & stats -> drops ) ;
1605
1605
return NULL ;
1606
1606
}
1607
1607
@@ -1625,7 +1625,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
1625
1625
unsigned int room = SKB_DATA_ALIGN (headroom + tailroom );
1626
1626
1627
1627
head_skb = NULL ;
1628
- stats -> bytes += len - vi -> hdr_len ;
1628
+ u64_stats_add ( & stats -> bytes , len - vi -> hdr_len ) ;
1629
1629
1630
1630
if (unlikely (len > truesize - room )) {
1631
1631
pr_debug ("%s: rx error: len %u exceeds truesize %lu\n" ,
@@ -1666,7 +1666,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
1666
1666
goto err_buf ;
1667
1667
}
1668
1668
1669
- stats -> bytes += len ;
1669
+ u64_stats_add ( & stats -> bytes , len ) ;
1670
1670
page = virt_to_head_page (buf );
1671
1671
1672
1672
truesize = mergeable_ctx_to_truesize (ctx );
@@ -1718,7 +1718,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
1718
1718
mergeable_buf_free (rq , num_buf , dev , stats );
1719
1719
1720
1720
err_buf :
1721
- stats -> drops ++ ;
1721
+ u64_stats_inc ( & stats -> drops ) ;
1722
1722
dev_kfree_skb (head_skb );
1723
1723
return NULL ;
1724
1724
}
@@ -1985,7 +1985,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1985
1985
unsigned long flags ;
1986
1986
1987
1987
flags = u64_stats_update_begin_irqsave (& rq -> stats .syncp );
1988
- rq -> stats .kicks ++ ;
1988
+ u64_stats_inc ( & rq -> stats .kicks ) ;
1989
1989
u64_stats_update_end_irqrestore (& rq -> stats .syncp , flags );
1990
1990
}
1991
1991
@@ -2065,22 +2065,23 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
2065
2065
struct virtnet_info * vi = rq -> vq -> vdev -> priv ;
2066
2066
struct virtnet_rq_stats stats = {};
2067
2067
unsigned int len ;
2068
+ int packets = 0 ;
2068
2069
void * buf ;
2069
2070
int i ;
2070
2071
2071
2072
if (!vi -> big_packets || vi -> mergeable_rx_bufs ) {
2072
2073
void * ctx ;
2073
2074
2074
- while (stats . packets < budget &&
2075
+ while (packets < budget &&
2075
2076
(buf = virtnet_rq_get_buf (rq , & len , & ctx ))) {
2076
2077
receive_buf (vi , rq , buf , len , ctx , xdp_xmit , & stats );
2077
- stats . packets ++ ;
2078
+ packets ++ ;
2078
2079
}
2079
2080
} else {
2080
- while (stats . packets < budget &&
2081
+ while (packets < budget &&
2081
2082
(buf = virtnet_rq_get_buf (rq , & len , NULL )) != NULL ) {
2082
2083
receive_buf (vi , rq , buf , len , NULL , xdp_xmit , & stats );
2083
- stats . packets ++ ;
2084
+ packets ++ ;
2084
2085
}
2085
2086
}
2086
2087
@@ -2093,17 +2094,19 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
2093
2094
}
2094
2095
}
2095
2096
2097
+ u64_stats_set (& stats .packets , packets );
2096
2098
u64_stats_update_begin (& rq -> stats .syncp );
2097
2099
for (i = 0 ; i < VIRTNET_RQ_STATS_LEN ; i ++ ) {
2098
2100
size_t offset = virtnet_rq_stats_desc [i ].offset ;
2099
- u64 * item ;
2101
+ u64_stats_t * item , * src ;
2100
2102
2101
- item = (u64 * )((u8 * )& rq -> stats + offset );
2102
- * item += * (u64 * )((u8 * )& stats + offset );
2103
+ item = (u64_stats_t * )((u8 * )& rq -> stats + offset );
2104
+ src = (u64_stats_t * )((u8 * )& stats + offset );
2105
+ u64_stats_add (item , u64_stats_read (src ));
2103
2106
}
2104
2107
u64_stats_update_end (& rq -> stats .syncp );
2105
2108
2106
- return stats . packets ;
2109
+ return packets ;
2107
2110
}
2108
2111
2109
2112
static void virtnet_poll_cleantx (struct receive_queue * rq )
@@ -2158,7 +2161,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
2158
2161
sq = virtnet_xdp_get_sq (vi );
2159
2162
if (virtqueue_kick_prepare (sq -> vq ) && virtqueue_notify (sq -> vq )) {
2160
2163
u64_stats_update_begin (& sq -> stats .syncp );
2161
- sq -> stats .kicks ++ ;
2164
+ u64_stats_inc ( & sq -> stats .kicks ) ;
2162
2165
u64_stats_update_end (& sq -> stats .syncp );
2163
2166
}
2164
2167
virtnet_xdp_put_sq (vi , sq );
@@ -2370,7 +2373,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
2370
2373
if (kick || netif_xmit_stopped (txq )) {
2371
2374
if (virtqueue_kick_prepare (sq -> vq ) && virtqueue_notify (sq -> vq )) {
2372
2375
u64_stats_update_begin (& sq -> stats .syncp );
2373
- sq -> stats .kicks ++ ;
2376
+ u64_stats_inc ( & sq -> stats .kicks ) ;
2374
2377
u64_stats_update_end (& sq -> stats .syncp );
2375
2378
}
2376
2379
}
@@ -2553,16 +2556,16 @@ static void virtnet_stats(struct net_device *dev,
2553
2556
2554
2557
do {
2555
2558
start = u64_stats_fetch_begin (& sq -> stats .syncp );
2556
- tpackets = sq -> stats .packets ;
2557
- tbytes = sq -> stats .bytes ;
2558
- terrors = sq -> stats .tx_timeouts ;
2559
+ tpackets = u64_stats_read ( & sq -> stats .packets ) ;
2560
+ tbytes = u64_stats_read ( & sq -> stats .bytes ) ;
2561
+ terrors = u64_stats_read ( & sq -> stats .tx_timeouts ) ;
2559
2562
} while (u64_stats_fetch_retry (& sq -> stats .syncp , start ));
2560
2563
2561
2564
do {
2562
2565
start = u64_stats_fetch_begin (& rq -> stats .syncp );
2563
- rpackets = rq -> stats .packets ;
2564
- rbytes = rq -> stats .bytes ;
2565
- rdrops = rq -> stats .drops ;
2566
+ rpackets = u64_stats_read ( & rq -> stats .packets ) ;
2567
+ rbytes = u64_stats_read ( & rq -> stats .bytes ) ;
2568
+ rdrops = u64_stats_read ( & rq -> stats .drops ) ;
2566
2569
} while (u64_stats_fetch_retry (& rq -> stats .syncp , start ));
2567
2570
2568
2571
tot -> rx_packets += rpackets ;
@@ -3191,17 +3194,19 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
3191
3194
struct virtnet_info * vi = netdev_priv (dev );
3192
3195
unsigned int idx = 0 , start , i , j ;
3193
3196
const u8 * stats_base ;
3197
+ const u64_stats_t * p ;
3194
3198
size_t offset ;
3195
3199
3196
3200
for (i = 0 ; i < vi -> curr_queue_pairs ; i ++ ) {
3197
3201
struct receive_queue * rq = & vi -> rq [i ];
3198
3202
3199
- stats_base = (u8 * )& rq -> stats ;
3203
+ stats_base = (const u8 * )& rq -> stats ;
3200
3204
do {
3201
3205
start = u64_stats_fetch_begin (& rq -> stats .syncp );
3202
3206
for (j = 0 ; j < VIRTNET_RQ_STATS_LEN ; j ++ ) {
3203
3207
offset = virtnet_rq_stats_desc [j ].offset ;
3204
- data [idx + j ] = * (u64 * )(stats_base + offset );
3208
+ p = (const u64_stats_t * )(stats_base + offset );
3209
+ data [idx + j ] = u64_stats_read (p );
3205
3210
}
3206
3211
} while (u64_stats_fetch_retry (& rq -> stats .syncp , start ));
3207
3212
idx += VIRTNET_RQ_STATS_LEN ;
@@ -3210,12 +3215,13 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
3210
3215
for (i = 0 ; i < vi -> curr_queue_pairs ; i ++ ) {
3211
3216
struct send_queue * sq = & vi -> sq [i ];
3212
3217
3213
- stats_base = (u8 * )& sq -> stats ;
3218
+ stats_base = (const u8 * )& sq -> stats ;
3214
3219
do {
3215
3220
start = u64_stats_fetch_begin (& sq -> stats .syncp );
3216
3221
for (j = 0 ; j < VIRTNET_SQ_STATS_LEN ; j ++ ) {
3217
3222
offset = virtnet_sq_stats_desc [j ].offset ;
3218
- data [idx + j ] = * (u64 * )(stats_base + offset );
3223
+ p = (const u64_stats_t * )(stats_base + offset );
3224
+ data [idx + j ] = u64_stats_read (p );
3219
3225
}
3220
3226
} while (u64_stats_fetch_retry (& sq -> stats .syncp , start ));
3221
3227
idx += VIRTNET_SQ_STATS_LEN ;
@@ -3898,7 +3904,7 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
3898
3904
struct netdev_queue * txq = netdev_get_tx_queue (dev , txqueue );
3899
3905
3900
3906
u64_stats_update_begin (& sq -> stats .syncp );
3901
- sq -> stats .tx_timeouts ++ ;
3907
+ u64_stats_inc ( & sq -> stats .tx_timeouts ) ;
3902
3908
u64_stats_update_end (& sq -> stats .syncp );
3903
3909
3904
3910
netdev_err (dev , "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n" ,
0 commit comments