Skip to content

Commit 38f3cd3

Browse files
committed
Merge branch 'net-avoid-ehash-lookup-races'
Xuanqiang Luo says: ==================== net: Avoid ehash lookup races After replacing R/W locks with RCU in commit 3ab5aee ("net: Convert TCP & DCCP hash tables to use RCU / hlist_nulls"), a race window emerged during the switch from reqsk/sk to sk/tw. Now that both timewait sock (tw) and full sock (sk) reside on the same ehash chain, it is appropriate to introduce hlist_nulls replace operations, to eliminate the race conditions caused by this window. Before this series of patches, I previously sent another version of the patch, attempting to avoid the issue using a lock mechanism. However, it seems there are some problems with that approach now, so I've switched to the "replace" method in the current patches to resolve the issue. For details, refer to: https://lore.kernel.org/netdev/[email protected]/ Before I encountered this type of issue recently, I found there had been several historical discussions about it. Therefore, I'm adding this background information for those interested to reference: 1. https://lore.kernel.org/lkml/[email protected]/ 2. https://lore.kernel.org/netdev/[email protected]/ ==================== Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents 1c17f43 + b8ec80b commit 38f3cd3

File tree

4 files changed

+90
-25
lines changed

4 files changed

+90
-25
lines changed

include/linux/rculist_nulls.h

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,13 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
5252
#define hlist_nulls_next_rcu(node) \
5353
(*((struct hlist_nulls_node __rcu __force **)&(node)->next))
5454

55+
/**
56+
* hlist_nulls_pprev_rcu - returns the dereferenced pprev of @node.
57+
* @node: element of the list.
58+
*/
59+
#define hlist_nulls_pprev_rcu(node) \
60+
(*((struct hlist_nulls_node __rcu __force **)(node)->pprev))
61+
5562
/**
5663
* hlist_nulls_del_rcu - deletes entry from hash list without re-initialization
5764
* @n: the element to delete from the hash list.
@@ -152,6 +159,58 @@ static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n)
152159
n->next = (struct hlist_nulls_node *)NULLS_MARKER(NULL);
153160
}
154161

162+
/**
163+
* hlist_nulls_replace_rcu - replace an old entry by a new one
164+
* @old: the element to be replaced
165+
* @new: the new element to insert
166+
*
167+
* Description:
168+
* Replace the old entry with the new one in a RCU-protected hlist_nulls, while
169+
* permitting racing traversals.
170+
*
171+
* The caller must take whatever precautions are necessary (such as holding
172+
* appropriate locks) to avoid racing with another list-mutation primitive, such
173+
* as hlist_nulls_add_head_rcu() or hlist_nulls_del_rcu(), running on this same
174+
* list. However, it is perfectly legal to run concurrently with the _rcu
175+
* list-traversal primitives, such as hlist_nulls_for_each_entry_rcu().
176+
*/
177+
static inline void hlist_nulls_replace_rcu(struct hlist_nulls_node *old,
178+
struct hlist_nulls_node *new)
179+
{
180+
struct hlist_nulls_node *next = old->next;
181+
182+
WRITE_ONCE(new->next, next);
183+
WRITE_ONCE(new->pprev, old->pprev);
184+
rcu_assign_pointer(hlist_nulls_pprev_rcu(new), new);
185+
if (!is_a_nulls(next))
186+
WRITE_ONCE(next->pprev, &new->next);
187+
}
188+
189+
/**
190+
* hlist_nulls_replace_init_rcu - replace an old entry by a new one and
191+
* initialize the old
192+
* @old: the element to be replaced
193+
* @new: the new element to insert
194+
*
195+
* Description:
196+
* Replace the old entry with the new one in a RCU-protected hlist_nulls, while
197+
* permitting racing traversals, and reinitialize the old entry.
198+
*
199+
* Note: @old must be hashed.
200+
*
201+
* The caller must take whatever precautions are necessary (such as holding
202+
* appropriate locks) to avoid racing with another list-mutation primitive, such
203+
* as hlist_nulls_add_head_rcu() or hlist_nulls_del_rcu(), running on this same
204+
* list. However, it is perfectly legal to run concurrently with the _rcu
205+
* list-traversal primitives, such as hlist_nulls_for_each_entry_rcu().
206+
*/
207+
static inline void hlist_nulls_replace_init_rcu(struct hlist_nulls_node *old,
208+
struct hlist_nulls_node *new)
209+
{
210+
hlist_nulls_replace_rcu(old, new);
211+
WRITE_ONCE(old->pprev, NULL);
212+
}
213+
155214
/**
156215
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
157216
* @tpos: the type * to use as a loop cursor.

include/net/sock.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -856,6 +856,19 @@ static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
856856
return rc;
857857
}
858858

859+
static inline bool sk_nulls_replace_node_init_rcu(struct sock *old,
860+
struct sock *new)
861+
{
862+
if (sk_hashed(old)) {
863+
hlist_nulls_replace_init_rcu(&old->sk_nulls_node,
864+
&new->sk_nulls_node);
865+
__sock_put(old);
866+
return true;
867+
}
868+
869+
return false;
870+
}
871+
859872
static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
860873
{
861874
hlist_add_head(&sk->sk_node, list);

net/ipv4/inet_hashtables.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -720,8 +720,11 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
720720
spin_lock(lock);
721721
if (osk) {
722722
WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
723-
ret = sk_nulls_del_node_init_rcu(osk);
724-
} else if (found_dup_sk) {
723+
ret = sk_nulls_replace_node_init_rcu(osk, sk);
724+
goto unlock;
725+
}
726+
727+
if (found_dup_sk) {
725728
*found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
726729
if (*found_dup_sk)
727730
ret = false;
@@ -730,6 +733,7 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
730733
if (ret)
731734
__sk_nulls_add_node_rcu(sk, list);
732735

736+
unlock:
733737
spin_unlock(lock);
734738

735739
return ret;

net/ipv4/inet_timewait_sock.c

Lines changed: 12 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -88,12 +88,6 @@ void inet_twsk_put(struct inet_timewait_sock *tw)
8888
}
8989
EXPORT_SYMBOL_GPL(inet_twsk_put);
9090

91-
static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
92-
struct hlist_nulls_head *list)
93-
{
94-
hlist_nulls_add_head_rcu(&tw->tw_node, list);
95-
}
96-
9791
static void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
9892
{
9993
__inet_twsk_schedule(tw, timeo, false);
@@ -113,13 +107,12 @@ void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
113107
{
114108
const struct inet_sock *inet = inet_sk(sk);
115109
const struct inet_connection_sock *icsk = inet_csk(sk);
116-
struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
117110
spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
118111
struct inet_bind_hashbucket *bhead, *bhead2;
119112

120-
/* Step 1: Put TW into bind hash. Original socket stays there too.
121-
Note, that any socket with inet->num != 0 MUST be bound in
122-
binding cache, even if it is closed.
113+
/* Put TW into bind hash. Original socket stays there too.
114+
* Note, that any socket with inet->num != 0 MUST be bound in
115+
* binding cache, even if it is closed.
123116
*/
124117
bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
125118
hashinfo->bhash_size)];
@@ -141,19 +134,6 @@ void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
141134

142135
spin_lock(lock);
143136

144-
/* Step 2: Hash TW into tcp ehash chain */
145-
inet_twsk_add_node_rcu(tw, &ehead->chain);
146-
147-
/* Step 3: Remove SK from hash chain */
148-
if (__sk_nulls_del_node_init_rcu(sk))
149-
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
150-
151-
152-
/* Ensure above writes are committed into memory before updating the
153-
* refcount.
154-
* Provides ordering vs later refcount_inc().
155-
*/
156-
smp_wmb();
157137
/* tw_refcnt is set to 3 because we have :
158138
* - one reference for bhash chain.
159139
* - one reference for ehash chain.
@@ -163,6 +143,15 @@ void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
163143
*/
164144
refcount_set(&tw->tw_refcnt, 3);
165145

146+
/* Ensure tw_refcnt has been set before tw is published.
147+
* smp_wmb() provides the necessary memory barrier to enforce this
148+
* ordering.
149+
*/
150+
smp_wmb();
151+
152+
hlist_nulls_replace_init_rcu(&sk->sk_nulls_node, &tw->tw_node);
153+
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
154+
166155
inet_twsk_schedule(tw, timeo);
167156

168157
spin_unlock(lock);

0 commit comments

Comments
 (0)