Skip to content

Commit 61217d8

Browse files
edumazetdavem330
authored andcommitted
virtio_net: use u64_stats_t infra to avoid data-races
syzbot reported a data-race in virtnet_poll / virtnet_stats [1] u64_stats_t infra has very nice accessors that must be used to avoid potential load-store tearing. [1] BUG: KCSAN: data-race in virtnet_poll / virtnet_stats read-write to 0xffff88810271b1a0 of 8 bytes by interrupt on cpu 0: virtnet_receive drivers/net/virtio_net.c:2102 [inline] virtnet_poll+0x6c8/0xb40 drivers/net/virtio_net.c:2148 __napi_poll+0x60/0x3b0 net/core/dev.c:6527 napi_poll net/core/dev.c:6594 [inline] net_rx_action+0x32b/0x750 net/core/dev.c:6727 __do_softirq+0xc1/0x265 kernel/softirq.c:553 invoke_softirq kernel/softirq.c:427 [inline] __irq_exit_rcu kernel/softirq.c:632 [inline] irq_exit_rcu+0x3b/0x90 kernel/softirq.c:644 common_interrupt+0x7f/0x90 arch/x86/kernel/irq.c:247 asm_common_interrupt+0x26/0x40 arch/x86/include/asm/idtentry.h:636 __sanitizer_cov_trace_const_cmp8+0x0/0x80 kernel/kcov.c:306 jbd2_write_access_granted fs/jbd2/transaction.c:1174 [inline] jbd2_journal_get_write_access+0x94/0x1c0 fs/jbd2/transaction.c:1239 __ext4_journal_get_write_access+0x154/0x3f0 fs/ext4/ext4_jbd2.c:241 ext4_reserve_inode_write+0x14e/0x200 fs/ext4/inode.c:5745 __ext4_mark_inode_dirty+0x8e/0x440 fs/ext4/inode.c:5919 ext4_evict_inode+0xaf0/0xdc0 fs/ext4/inode.c:299 evict+0x1aa/0x410 fs/inode.c:664 iput_final fs/inode.c:1775 [inline] iput+0x42c/0x5b0 fs/inode.c:1801 do_unlinkat+0x2b9/0x4f0 fs/namei.c:4405 __do_sys_unlink fs/namei.c:4446 [inline] __se_sys_unlink fs/namei.c:4444 [inline] __x64_sys_unlink+0x30/0x40 fs/namei.c:4444 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd read to 0xffff88810271b1a0 of 8 bytes by task 2814 on cpu 1: virtnet_stats+0x1b3/0x340 drivers/net/virtio_net.c:2564 dev_get_stats+0x6d/0x860 net/core/dev.c:10511 rtnl_fill_stats+0x45/0x320 net/core/rtnetlink.c:1261 rtnl_fill_ifinfo+0xd0e/0x1120 net/core/rtnetlink.c:1867 rtnl_dump_ifinfo+0x7f9/0xc20 net/core/rtnetlink.c:2240 netlink_dump+0x390/0x720 net/netlink/af_netlink.c:2266 netlink_recvmsg+0x425/0x780 net/netlink/af_netlink.c:1992 sock_recvmsg_nosec net/socket.c:1027 [inline] sock_recvmsg net/socket.c:1049 [inline] ____sys_recvmsg+0x156/0x310 net/socket.c:2760 ___sys_recvmsg net/socket.c:2802 [inline] __sys_recvmsg+0x1ea/0x270 net/socket.c:2832 __do_sys_recvmsg net/socket.c:2842 [inline] __se_sys_recvmsg net/socket.c:2839 [inline] __x64_sys_recvmsg+0x46/0x50 net/socket.c:2839 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd value changed: 0x000000000045c334 -> 0x000000000045c376 Fixes: 3fa2a1d ("virtio-net: per cpu 64 bit stats (v2)") Signed-off-by: Eric Dumazet <[email protected]> Acked-by: Michael S. Tsirkin <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent bc4c48e commit 61217d8

File tree

1 file changed

+65
-59
lines changed

1 file changed

+65
-59
lines changed

drivers/net/virtio_net.c

Lines changed: 65 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -81,24 +81,24 @@ struct virtnet_stat_desc {
8181

8282
struct virtnet_sq_stats {
8383
struct u64_stats_sync syncp;
84-
u64 packets;
85-
u64 bytes;
86-
u64 xdp_tx;
87-
u64 xdp_tx_drops;
88-
u64 kicks;
89-
u64 tx_timeouts;
84+
u64_stats_t packets;
85+
u64_stats_t bytes;
86+
u64_stats_t xdp_tx;
87+
u64_stats_t xdp_tx_drops;
88+
u64_stats_t kicks;
89+
u64_stats_t tx_timeouts;
9090
};
9191

9292
struct virtnet_rq_stats {
9393
struct u64_stats_sync syncp;
94-
u64 packets;
95-
u64 bytes;
96-
u64 drops;
97-
u64 xdp_packets;
98-
u64 xdp_tx;
99-
u64 xdp_redirects;
100-
u64 xdp_drops;
101-
u64 kicks;
94+
u64_stats_t packets;
95+
u64_stats_t bytes;
96+
u64_stats_t drops;
97+
u64_stats_t xdp_packets;
98+
u64_stats_t xdp_tx;
99+
u64_stats_t xdp_redirects;
100+
u64_stats_t xdp_drops;
101+
u64_stats_t kicks;
102102
};
103103

104104
#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
@@ -775,8 +775,8 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
775775
return;
776776

777777
u64_stats_update_begin(&sq->stats.syncp);
778-
sq->stats.bytes += bytes;
779-
sq->stats.packets += packets;
778+
u64_stats_add(&sq->stats.bytes, bytes);
779+
u64_stats_add(&sq->stats.packets, packets);
780780
u64_stats_update_end(&sq->stats.syncp);
781781
}
782782

@@ -975,11 +975,11 @@ static int virtnet_xdp_xmit(struct net_device *dev,
975975
}
976976
out:
977977
u64_stats_update_begin(&sq->stats.syncp);
978-
sq->stats.bytes += bytes;
979-
sq->stats.packets += packets;
980-
sq->stats.xdp_tx += n;
981-
sq->stats.xdp_tx_drops += n - nxmit;
982-
sq->stats.kicks += kicks;
978+
u64_stats_add(&sq->stats.bytes, bytes);
979+
u64_stats_add(&sq->stats.packets, packets);
980+
u64_stats_add(&sq->stats.xdp_tx, n);
981+
u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
982+
u64_stats_add(&sq->stats.kicks, kicks);
983983
u64_stats_update_end(&sq->stats.syncp);
984984

985985
virtnet_xdp_put_sq(vi, sq);
@@ -1011,14 +1011,14 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
10111011
u32 act;
10121012

10131013
act = bpf_prog_run_xdp(xdp_prog, xdp);
1014-
stats->xdp_packets++;
1014+
u64_stats_inc(&stats->xdp_packets);
10151015

10161016
switch (act) {
10171017
case XDP_PASS:
10181018
return act;
10191019

10201020
case XDP_TX:
1021-
stats->xdp_tx++;
1021+
u64_stats_inc(&stats->xdp_tx);
10221022
xdpf = xdp_convert_buff_to_frame(xdp);
10231023
if (unlikely(!xdpf)) {
10241024
netdev_dbg(dev, "convert buff to frame failed for xdp\n");
@@ -1036,7 +1036,7 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
10361036
return act;
10371037

10381038
case XDP_REDIRECT:
1039-
stats->xdp_redirects++;
1039+
u64_stats_inc(&stats->xdp_redirects);
10401040
err = xdp_do_redirect(dev, xdp, xdp_prog);
10411041
if (err)
10421042
return XDP_DROP;
@@ -1232,9 +1232,9 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev,
12321232
return skb;
12331233

12341234
err_xdp:
1235-
stats->xdp_drops++;
1235+
u64_stats_inc(&stats->xdp_drops);
12361236
err:
1237-
stats->drops++;
1237+
u64_stats_inc(&stats->drops);
12381238
put_page(page);
12391239
xdp_xmit:
12401240
return NULL;
@@ -1253,7 +1253,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
12531253
struct sk_buff *skb;
12541254

12551255
len -= vi->hdr_len;
1256-
stats->bytes += len;
1256+
u64_stats_add(&stats->bytes, len);
12571257

12581258
if (unlikely(len > GOOD_PACKET_LEN)) {
12591259
pr_debug("%s: rx error: len %u exceeds max size %d\n",
@@ -1282,7 +1282,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
12821282
return skb;
12831283

12841284
err:
1285-
stats->drops++;
1285+
u64_stats_inc(&stats->drops);
12861286
put_page(page);
12871287
return NULL;
12881288
}
@@ -1298,14 +1298,14 @@ static struct sk_buff *receive_big(struct net_device *dev,
12981298
struct sk_buff *skb =
12991299
page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
13001300

1301-
stats->bytes += len - vi->hdr_len;
1301+
u64_stats_add(&stats->bytes, len - vi->hdr_len);
13021302
if (unlikely(!skb))
13031303
goto err;
13041304

13051305
return skb;
13061306

13071307
err:
1308-
stats->drops++;
1308+
u64_stats_inc(&stats->drops);
13091309
give_pages(rq, page);
13101310
return NULL;
13111311
}
@@ -1326,7 +1326,7 @@ static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
13261326
DEV_STATS_INC(dev, rx_length_errors);
13271327
break;
13281328
}
1329-
stats->bytes += len;
1329+
u64_stats_add(&stats->bytes, len);
13301330
page = virt_to_head_page(buf);
13311331
put_page(page);
13321332
}
@@ -1436,7 +1436,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
14361436
goto err;
14371437
}
14381438

1439-
stats->bytes += len;
1439+
u64_stats_add(&stats->bytes, len);
14401440
page = virt_to_head_page(buf);
14411441
offset = buf - page_address(page);
14421442

@@ -1600,8 +1600,8 @@ static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
16001600
put_page(page);
16011601
mergeable_buf_free(rq, num_buf, dev, stats);
16021602

1603-
stats->xdp_drops++;
1604-
stats->drops++;
1603+
u64_stats_inc(&stats->xdp_drops);
1604+
u64_stats_inc(&stats->drops);
16051605
return NULL;
16061606
}
16071607

@@ -1625,7 +1625,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
16251625
unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
16261626

16271627
head_skb = NULL;
1628-
stats->bytes += len - vi->hdr_len;
1628+
u64_stats_add(&stats->bytes, len - vi->hdr_len);
16291629

16301630
if (unlikely(len > truesize - room)) {
16311631
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
@@ -1666,7 +1666,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
16661666
goto err_buf;
16671667
}
16681668

1669-
stats->bytes += len;
1669+
u64_stats_add(&stats->bytes, len);
16701670
page = virt_to_head_page(buf);
16711671

16721672
truesize = mergeable_ctx_to_truesize(ctx);
@@ -1718,7 +1718,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
17181718
mergeable_buf_free(rq, num_buf, dev, stats);
17191719

17201720
err_buf:
1721-
stats->drops++;
1721+
u64_stats_inc(&stats->drops);
17221722
dev_kfree_skb(head_skb);
17231723
return NULL;
17241724
}
@@ -1985,7 +1985,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
19851985
unsigned long flags;
19861986

19871987
flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
1988-
rq->stats.kicks++;
1988+
u64_stats_inc(&rq->stats.kicks);
19891989
u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
19901990
}
19911991

@@ -2065,22 +2065,23 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
20652065
struct virtnet_info *vi = rq->vq->vdev->priv;
20662066
struct virtnet_rq_stats stats = {};
20672067
unsigned int len;
2068+
int packets = 0;
20682069
void *buf;
20692070
int i;
20702071

20712072
if (!vi->big_packets || vi->mergeable_rx_bufs) {
20722073
void *ctx;
20732074

2074-
while (stats.packets < budget &&
2075+
while (packets < budget &&
20752076
(buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
20762077
receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
2077-
stats.packets++;
2078+
packets++;
20782079
}
20792080
} else {
2080-
while (stats.packets < budget &&
2081+
while (packets < budget &&
20812082
(buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
20822083
receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
2083-
stats.packets++;
2084+
packets++;
20842085
}
20852086
}
20862087

@@ -2093,17 +2094,19 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
20932094
}
20942095
}
20952096

2097+
u64_stats_set(&stats.packets, packets);
20962098
u64_stats_update_begin(&rq->stats.syncp);
20972099
for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
20982100
size_t offset = virtnet_rq_stats_desc[i].offset;
2099-
u64 *item;
2101+
u64_stats_t *item, *src;
21002102

2101-
item = (u64 *)((u8 *)&rq->stats + offset);
2102-
*item += *(u64 *)((u8 *)&stats + offset);
2103+
item = (u64_stats_t *)((u8 *)&rq->stats + offset);
2104+
src = (u64_stats_t *)((u8 *)&stats + offset);
2105+
u64_stats_add(item, u64_stats_read(src));
21032106
}
21042107
u64_stats_update_end(&rq->stats.syncp);
21052108

2106-
return stats.packets;
2109+
return packets;
21072110
}
21082111

21092112
static void virtnet_poll_cleantx(struct receive_queue *rq)
@@ -2158,7 +2161,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
21582161
sq = virtnet_xdp_get_sq(vi);
21592162
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
21602163
u64_stats_update_begin(&sq->stats.syncp);
2161-
sq->stats.kicks++;
2164+
u64_stats_inc(&sq->stats.kicks);
21622165
u64_stats_update_end(&sq->stats.syncp);
21632166
}
21642167
virtnet_xdp_put_sq(vi, sq);
@@ -2370,7 +2373,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
23702373
if (kick || netif_xmit_stopped(txq)) {
23712374
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
23722375
u64_stats_update_begin(&sq->stats.syncp);
2373-
sq->stats.kicks++;
2376+
u64_stats_inc(&sq->stats.kicks);
23742377
u64_stats_update_end(&sq->stats.syncp);
23752378
}
23762379
}
@@ -2553,16 +2556,16 @@ static void virtnet_stats(struct net_device *dev,
25532556

25542557
do {
25552558
start = u64_stats_fetch_begin(&sq->stats.syncp);
2556-
tpackets = sq->stats.packets;
2557-
tbytes = sq->stats.bytes;
2558-
terrors = sq->stats.tx_timeouts;
2559+
tpackets = u64_stats_read(&sq->stats.packets);
2560+
tbytes = u64_stats_read(&sq->stats.bytes);
2561+
terrors = u64_stats_read(&sq->stats.tx_timeouts);
25592562
} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
25602563

25612564
do {
25622565
start = u64_stats_fetch_begin(&rq->stats.syncp);
2563-
rpackets = rq->stats.packets;
2564-
rbytes = rq->stats.bytes;
2565-
rdrops = rq->stats.drops;
2566+
rpackets = u64_stats_read(&rq->stats.packets);
2567+
rbytes = u64_stats_read(&rq->stats.bytes);
2568+
rdrops = u64_stats_read(&rq->stats.drops);
25662569
} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
25672570

25682571
tot->rx_packets += rpackets;
@@ -3191,17 +3194,19 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
31913194
struct virtnet_info *vi = netdev_priv(dev);
31923195
unsigned int idx = 0, start, i, j;
31933196
const u8 *stats_base;
3197+
const u64_stats_t *p;
31943198
size_t offset;
31953199

31963200
for (i = 0; i < vi->curr_queue_pairs; i++) {
31973201
struct receive_queue *rq = &vi->rq[i];
31983202

3199-
stats_base = (u8 *)&rq->stats;
3203+
stats_base = (const u8 *)&rq->stats;
32003204
do {
32013205
start = u64_stats_fetch_begin(&rq->stats.syncp);
32023206
for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
32033207
offset = virtnet_rq_stats_desc[j].offset;
3204-
data[idx + j] = *(u64 *)(stats_base + offset);
3208+
p = (const u64_stats_t *)(stats_base + offset);
3209+
data[idx + j] = u64_stats_read(p);
32053210
}
32063211
} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
32073212
idx += VIRTNET_RQ_STATS_LEN;
@@ -3210,12 +3215,13 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
32103215
for (i = 0; i < vi->curr_queue_pairs; i++) {
32113216
struct send_queue *sq = &vi->sq[i];
32123217

3213-
stats_base = (u8 *)&sq->stats;
3218+
stats_base = (const u8 *)&sq->stats;
32143219
do {
32153220
start = u64_stats_fetch_begin(&sq->stats.syncp);
32163221
for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
32173222
offset = virtnet_sq_stats_desc[j].offset;
3218-
data[idx + j] = *(u64 *)(stats_base + offset);
3223+
p = (const u64_stats_t *)(stats_base + offset);
3224+
data[idx + j] = u64_stats_read(p);
32193225
}
32203226
} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
32213227
idx += VIRTNET_SQ_STATS_LEN;
@@ -3898,7 +3904,7 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
38983904
struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
38993905

39003906
u64_stats_update_begin(&sq->stats.syncp);
3901-
sq->stats.tx_timeouts++;
3907+
u64_stats_inc(&sq->stats.tx_timeouts);
39023908
u64_stats_update_end(&sq->stats.syncp);
39033909

39043910
netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",

0 commit comments

Comments
 (0)