Skip to content

Commit 411c0ea

Browse files
author
Paolo Abeni
committed
Merge branch 'af_unix-fix-lockless-access-of-sk-sk_state-and-others-fields'
Kuniyuki Iwashima says: ==================== af_unix: Fix lockless access of sk->sk_state and others fields. The patch 1 fixes a bug where SOCK_DGRAM's sk->sk_state is changed to TCP_CLOSE even if the socket is connect()ed to another socket. The rest of this series annotates lockless accesses to the following fields. * sk->sk_state * sk->sk_sndbuf * net->unx.sysctl_max_dgram_qlen * sk->sk_receive_queue.qlen * sk->sk_shutdown Note that with this series there is skb_queue_empty() left in unix_dgram_disconnected() that needs to be changed to lockless version, and unix_peer(other) access there should be protected by unix_state_lock(). This will require some refactoring, so another series will follow. Changes: v2: * Patch 1: Fix wrong double lock v1: https://lore.kernel.org/netdev/[email protected]/ ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Paolo Abeni <[email protected]>
2 parents b0c9a26 + efaf24e commit 411c0ea

File tree

2 files changed

+50
-52
lines changed

2 files changed

+50
-52
lines changed

net/unix/af_unix.c

Lines changed: 44 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -221,15 +221,9 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
221221
return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
222222
}
223223

224-
static inline int unix_recvq_full(const struct sock *sk)
225-
{
226-
return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
227-
}
228-
229224
static inline int unix_recvq_full_lockless(const struct sock *sk)
230225
{
231-
return skb_queue_len_lockless(&sk->sk_receive_queue) >
232-
READ_ONCE(sk->sk_max_ack_backlog);
226+
return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
233227
}
234228

235229
struct sock *unix_peer_get(struct sock *s)
@@ -530,18 +524,18 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
530524
return 0;
531525
}
532526

533-
static int unix_writable(const struct sock *sk)
527+
static int unix_writable(const struct sock *sk, unsigned char state)
534528
{
535-
return sk->sk_state != TCP_LISTEN &&
536-
(refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
529+
return state != TCP_LISTEN &&
530+
(refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);
537531
}
538532

539533
static void unix_write_space(struct sock *sk)
540534
{
541535
struct socket_wq *wq;
542536

543537
rcu_read_lock();
544-
if (unix_writable(sk)) {
538+
if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
545539
wq = rcu_dereference(sk->sk_wq);
546540
if (skwq_has_sleeper(wq))
547541
wake_up_interruptible_sync_poll(&wq->wait,
@@ -570,7 +564,6 @@ static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
570564
sk_error_report(other);
571565
}
572566
}
573-
other->sk_state = TCP_CLOSE;
574567
}
575568

576569
static void unix_sock_destructor(struct sock *sk)
@@ -617,7 +610,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
617610
u->path.dentry = NULL;
618611
u->path.mnt = NULL;
619612
state = sk->sk_state;
620-
sk->sk_state = TCP_CLOSE;
613+
WRITE_ONCE(sk->sk_state, TCP_CLOSE);
621614

622615
skpair = unix_peer(sk);
623616
unix_peer(sk) = NULL;
@@ -638,7 +631,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
638631
unix_state_lock(skpair);
639632
/* No more writes */
640633
WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
641-
if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
634+
if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)
642635
WRITE_ONCE(skpair->sk_err, ECONNRESET);
643636
unix_state_unlock(skpair);
644637
skpair->sk_state_change(skpair);
@@ -739,7 +732,8 @@ static int unix_listen(struct socket *sock, int backlog)
739732
if (backlog > sk->sk_max_ack_backlog)
740733
wake_up_interruptible_all(&u->peer_wait);
741734
sk->sk_max_ack_backlog = backlog;
742-
sk->sk_state = TCP_LISTEN;
735+
WRITE_ONCE(sk->sk_state, TCP_LISTEN);
736+
743737
/* set credentials so connect can copy them */
744738
init_peercred(sk);
745739
err = 0;
@@ -976,7 +970,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
976970
sk->sk_hash = unix_unbound_hash(sk);
977971
sk->sk_allocation = GFP_KERNEL_ACCOUNT;
978972
sk->sk_write_space = unix_write_space;
979-
sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
973+
sk->sk_max_ack_backlog = READ_ONCE(net->unx.sysctl_max_dgram_qlen);
980974
sk->sk_destruct = unix_sock_destructor;
981975
u = unix_sk(sk);
982976
u->listener = NULL;
@@ -1402,7 +1396,8 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
14021396
if (err)
14031397
goto out_unlock;
14041398

1405-
sk->sk_state = other->sk_state = TCP_ESTABLISHED;
1399+
WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
1400+
WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
14061401
} else {
14071402
/*
14081403
* 1003.1g breaking connected state with AF_UNSPEC
@@ -1419,13 +1414,20 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
14191414

14201415
unix_peer(sk) = other;
14211416
if (!other)
1422-
sk->sk_state = TCP_CLOSE;
1417+
WRITE_ONCE(sk->sk_state, TCP_CLOSE);
14231418
unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
14241419

14251420
unix_state_double_unlock(sk, other);
14261421

1427-
if (other != old_peer)
1422+
if (other != old_peer) {
14281423
unix_dgram_disconnected(sk, old_peer);
1424+
1425+
unix_state_lock(old_peer);
1426+
if (!unix_peer(old_peer))
1427+
WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
1428+
unix_state_unlock(old_peer);
1429+
}
1430+
14291431
sock_put(old_peer);
14301432
} else {
14311433
unix_peer(sk) = other;
@@ -1473,7 +1475,6 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
14731475
struct sk_buff *skb = NULL;
14741476
long timeo;
14751477
int err;
1476-
int st;
14771478

14781479
err = unix_validate_addr(sunaddr, addr_len);
14791480
if (err)
@@ -1538,7 +1539,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
15381539
if (other->sk_shutdown & RCV_SHUTDOWN)
15391540
goto out_unlock;
15401541

1541-
if (unix_recvq_full(other)) {
1542+
if (unix_recvq_full_lockless(other)) {
15421543
err = -EAGAIN;
15431544
if (!timeo)
15441545
goto out_unlock;
@@ -1563,9 +1564,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
15631564
15641565
Well, and we have to recheck the state after socket locked.
15651566
*/
1566-
st = sk->sk_state;
1567-
1568-
switch (st) {
1567+
switch (READ_ONCE(sk->sk_state)) {
15691568
case TCP_CLOSE:
15701569
/* This is ok... continue with connect */
15711570
break;
@@ -1580,7 +1579,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
15801579

15811580
unix_state_lock_nested(sk, U_LOCK_SECOND);
15821581

1583-
if (sk->sk_state != st) {
1582+
if (sk->sk_state != TCP_CLOSE) {
15841583
unix_state_unlock(sk);
15851584
unix_state_unlock(other);
15861585
sock_put(other);
@@ -1633,7 +1632,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
16331632
copy_peercred(sk, other);
16341633

16351634
sock->state = SS_CONNECTED;
1636-
sk->sk_state = TCP_ESTABLISHED;
1635+
WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
16371636
sock_hold(newsk);
16381637

16391638
smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
@@ -1705,7 +1704,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock,
17051704
goto out;
17061705

17071706
arg->err = -EINVAL;
1708-
if (sk->sk_state != TCP_LISTEN)
1707+
if (READ_ONCE(sk->sk_state) != TCP_LISTEN)
17091708
goto out;
17101709

17111710
/* If socket state is TCP_LISTEN it cannot change (for now...),
@@ -1962,7 +1961,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
19621961
}
19631962

19641963
err = -EMSGSIZE;
1965-
if (len > sk->sk_sndbuf - 32)
1964+
if (len > READ_ONCE(sk->sk_sndbuf) - 32)
19661965
goto out;
19671966

19681967
if (len > SKB_MAX_ALLOC) {
@@ -2044,7 +2043,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
20442043
unix_peer(sk) = NULL;
20452044
unix_dgram_peer_wake_disconnect_wakeup(sk, other);
20462045

2047-
sk->sk_state = TCP_CLOSE;
2046+
WRITE_ONCE(sk->sk_state, TCP_CLOSE);
20482047
unix_state_unlock(sk);
20492048

20502049
unix_dgram_disconnected(sk, other);
@@ -2221,7 +2220,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
22212220
}
22222221

22232222
if (msg->msg_namelen) {
2224-
err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2223+
err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
22252224
goto out_err;
22262225
} else {
22272226
err = -ENOTCONN;
@@ -2242,7 +2241,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
22422241
&err, 0);
22432242
} else {
22442243
/* Keep two messages in the pipe so it schedules better */
2245-
size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
2244+
size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);
22462245

22472246
/* allow fallback to order-0 allocations */
22482247
size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
@@ -2335,7 +2334,7 @@ static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
23352334
if (err)
23362335
return err;
23372336

2338-
if (sk->sk_state != TCP_ESTABLISHED)
2337+
if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
23392338
return -ENOTCONN;
23402339

23412340
if (msg->msg_namelen)
@@ -2349,7 +2348,7 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
23492348
{
23502349
struct sock *sk = sock->sk;
23512350

2352-
if (sk->sk_state != TCP_ESTABLISHED)
2351+
if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
23532352
return -ENOTCONN;
23542353

23552354
return unix_dgram_recvmsg(sock, msg, size, flags);
@@ -2654,7 +2653,7 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
26542653

26552654
static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
26562655
{
2657-
if (unlikely(sk->sk_state != TCP_ESTABLISHED))
2656+
if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
26582657
return -ENOTCONN;
26592658

26602659
return unix_read_skb(sk, recv_actor);
@@ -2678,7 +2677,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
26782677
size_t size = state->size;
26792678
unsigned int last_len;
26802679

2681-
if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2680+
if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
26822681
err = -EINVAL;
26832682
goto out;
26842683
}
@@ -3009,7 +3008,7 @@ long unix_inq_len(struct sock *sk)
30093008
struct sk_buff *skb;
30103009
long amount = 0;
30113010

3012-
if (sk->sk_state == TCP_LISTEN)
3011+
if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
30133012
return -EINVAL;
30143013

30153014
spin_lock(&sk->sk_receive_queue.lock);
@@ -3121,12 +3120,14 @@ static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
31213120
static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
31223121
{
31233122
struct sock *sk = sock->sk;
3123+
unsigned char state;
31243124
__poll_t mask;
31253125
u8 shutdown;
31263126

31273127
sock_poll_wait(file, sock, wait);
31283128
mask = 0;
31293129
shutdown = READ_ONCE(sk->sk_shutdown);
3130+
state = READ_ONCE(sk->sk_state);
31303131

31313132
/* exceptional events? */
31323133
if (READ_ONCE(sk->sk_err))
@@ -3148,14 +3149,14 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
31483149

31493150
/* Connection-based need to check for termination and startup */
31503151
if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3151-
sk->sk_state == TCP_CLOSE)
3152+
state == TCP_CLOSE)
31523153
mask |= EPOLLHUP;
31533154

31543155
/*
31553156
* we set writable also when the other side has shut down the
31563157
* connection. This prevents stuck sockets.
31573158
*/
3158-
if (unix_writable(sk))
3159+
if (unix_writable(sk, state))
31593160
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
31603161

31613162
return mask;
@@ -3166,12 +3167,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
31663167
{
31673168
struct sock *sk = sock->sk, *other;
31683169
unsigned int writable;
3170+
unsigned char state;
31693171
__poll_t mask;
31703172
u8 shutdown;
31713173

31723174
sock_poll_wait(file, sock, wait);
31733175
mask = 0;
31743176
shutdown = READ_ONCE(sk->sk_shutdown);
3177+
state = READ_ONCE(sk->sk_state);
31753178

31763179
/* exceptional events? */
31773180
if (READ_ONCE(sk->sk_err) ||
@@ -3191,19 +3194,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
31913194
mask |= EPOLLIN | EPOLLRDNORM;
31923195

31933196
/* Connection-based need to check for termination and startup */
3194-
if (sk->sk_type == SOCK_SEQPACKET) {
3195-
if (sk->sk_state == TCP_CLOSE)
3196-
mask |= EPOLLHUP;
3197-
/* connection hasn't started yet? */
3198-
if (sk->sk_state == TCP_SYN_SENT)
3199-
return mask;
3200-
}
3197+
if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
3198+
mask |= EPOLLHUP;
32013199

32023200
/* No write status requested, avoid expensive OUT tests. */
32033201
if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
32043202
return mask;
32053203

3206-
writable = unix_writable(sk);
3204+
writable = unix_writable(sk, state);
32073205
if (writable) {
32083206
unix_state_lock(sk);
32093207

net/unix/diag.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
6565
u32 *buf;
6666
int i;
6767

68-
if (sk->sk_state == TCP_LISTEN) {
68+
if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
6969
spin_lock(&sk->sk_receive_queue.lock);
7070

7171
attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
@@ -103,8 +103,8 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
103103
{
104104
struct unix_diag_rqlen rql;
105105

106-
if (sk->sk_state == TCP_LISTEN) {
107-
rql.udiag_rqueue = sk->sk_receive_queue.qlen;
106+
if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
107+
rql.udiag_rqueue = skb_queue_len_lockless(&sk->sk_receive_queue);
108108
rql.udiag_wqueue = sk->sk_max_ack_backlog;
109109
} else {
110110
rql.udiag_rqueue = (u32) unix_inq_len(sk);
@@ -136,7 +136,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
136136
rep = nlmsg_data(nlh);
137137
rep->udiag_family = AF_UNIX;
138138
rep->udiag_type = sk->sk_type;
139-
rep->udiag_state = sk->sk_state;
139+
rep->udiag_state = READ_ONCE(sk->sk_state);
140140
rep->pad = 0;
141141
rep->udiag_ino = sk_ino;
142142
sock_diag_save_cookie(sk, rep->udiag_cookie);
@@ -165,7 +165,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
165165
sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
166166
goto out_nlmsg_trim;
167167

168-
if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
168+
if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, READ_ONCE(sk->sk_shutdown)))
169169
goto out_nlmsg_trim;
170170

171171
if ((req->udiag_show & UDIAG_SHOW_UID) &&
@@ -215,7 +215,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
215215
sk_for_each(sk, &net->unx.table.buckets[slot]) {
216216
if (num < s_num)
217217
goto next;
218-
if (!(req->udiag_states & (1 << sk->sk_state)))
218+
if (!(req->udiag_states & (1 << READ_ONCE(sk->sk_state))))
219219
goto next;
220220
if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
221221
NETLINK_CB(cb->skb).portid,

0 commit comments

Comments
 (0)