@@ -159,8 +159,11 @@ static bool inet_use_bhash2_on_bind(const struct sock *sk)
159
159
if (sk -> sk_family == AF_INET6 ) {
160
160
int addr_type = ipv6_addr_type (& sk -> sk_v6_rcv_saddr );
161
161
162
- return addr_type != IPV6_ADDR_ANY &&
163
- addr_type != IPV6_ADDR_MAPPED ;
162
+ if (addr_type == IPV6_ADDR_ANY )
163
+ return false;
164
+
165
+ if (addr_type != IPV6_ADDR_MAPPED )
166
+ return true;
164
167
}
165
168
#endif
166
169
return sk -> sk_rcv_saddr != htonl (INADDR_ANY );
@@ -213,18 +216,9 @@ static bool inet_bhash2_conflict(const struct sock *sk,
213
216
bool relax , bool reuseport_cb_ok ,
214
217
bool reuseport_ok )
215
218
{
216
- struct inet_timewait_sock * tw2 ;
217
219
struct sock * sk2 ;
218
220
219
- sk_for_each_bound_bhash2 (sk2 , & tb2 -> owners ) {
220
- if (__inet_bhash2_conflict (sk , sk2 , sk_uid , relax ,
221
- reuseport_cb_ok , reuseport_ok ))
222
- return true;
223
- }
224
-
225
- twsk_for_each_bound_bhash2 (tw2 , & tb2 -> deathrow ) {
226
- sk2 = (struct sock * )tw2 ;
227
-
221
+ sk_for_each_bound (sk2 , & tb2 -> owners ) {
228
222
if (__inet_bhash2_conflict (sk , sk2 , sk_uid , relax ,
229
223
reuseport_cb_ok , reuseport_ok ))
230
224
return true;
@@ -233,48 +227,50 @@ static bool inet_bhash2_conflict(const struct sock *sk,
233
227
return false;
234
228
}
235
229
230
+ #define sk_for_each_bound_bhash (__sk , __tb2 , __tb ) \
231
+ hlist_for_each_entry(__tb2, &(__tb)->bhash2, bhash_node) \
232
+ sk_for_each_bound(sk2, &(__tb2)->owners)
233
+
236
234
/* This should be called only when the tb and tb2 hashbuckets' locks are held */
237
235
static int inet_csk_bind_conflict (const struct sock * sk ,
238
236
const struct inet_bind_bucket * tb ,
239
237
const struct inet_bind2_bucket * tb2 , /* may be null */
240
238
bool relax , bool reuseport_ok )
241
239
{
242
- bool reuseport_cb_ok ;
243
- struct sock_reuseport * reuseport_cb ;
244
240
kuid_t uid = sock_i_uid ((struct sock * )sk );
241
+ struct sock_reuseport * reuseport_cb ;
242
+ bool reuseport_cb_ok ;
243
+ struct sock * sk2 ;
245
244
246
245
rcu_read_lock ();
247
246
reuseport_cb = rcu_dereference (sk -> sk_reuseport_cb );
248
247
/* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
249
248
reuseport_cb_ok = !reuseport_cb || READ_ONCE (reuseport_cb -> num_closed_socks );
250
249
rcu_read_unlock ();
251
250
252
- /*
253
- * Unlike other sk lookup places we do not check
251
+ /* Conflicts with an existing IPV6_ADDR_ANY (if ipv6) or INADDR_ANY (if
252
+ * ipv4) should have been checked already. We need to do these two
253
+ * checks separately because their spinlocks have to be acquired/released
254
+ * independently of each other, to prevent possible deadlocks
255
+ */
256
+ if (inet_use_bhash2_on_bind (sk ))
257
+ return tb2 && inet_bhash2_conflict (sk , tb2 , uid , relax ,
258
+ reuseport_cb_ok , reuseport_ok );
259
+
260
+ /* Unlike other sk lookup places we do not check
254
261
* for sk_net here, since _all_ the socks listed
255
262
* in tb->owners and tb2->owners list belong
256
263
* to the same net - the one this bucket belongs to.
257
264
*/
265
+ sk_for_each_bound_bhash (sk2 , tb2 , tb ) {
266
+ if (!inet_bind_conflict (sk , sk2 , uid , relax , reuseport_cb_ok , reuseport_ok ))
267
+ continue ;
258
268
259
- if (!inet_use_bhash2_on_bind (sk )) {
260
- struct sock * sk2 ;
261
-
262
- sk_for_each_bound (sk2 , & tb -> owners )
263
- if (inet_bind_conflict (sk , sk2 , uid , relax ,
264
- reuseport_cb_ok , reuseport_ok ) &&
265
- inet_rcv_saddr_equal (sk , sk2 , true))
266
- return true;
267
-
268
- return false;
269
+ if (inet_rcv_saddr_equal (sk , sk2 , true))
270
+ return true;
269
271
}
270
272
271
- /* Conflicts with an existing IPV6_ADDR_ANY (if ipv6) or INADDR_ANY (if
272
- * ipv4) should have been checked already. We need to do these two
273
- * checks separately because their spinlocks have to be acquired/released
274
- * independently of each other, to prevent possible deadlocks
275
- */
276
- return tb2 && inet_bhash2_conflict (sk , tb2 , uid , relax , reuseport_cb_ok ,
277
- reuseport_ok );
273
+ return false;
278
274
}
279
275
280
276
/* Determine if there is a bind conflict with an existing IPV6_ADDR_ANY (if ipv6) or
@@ -457,7 +453,7 @@ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
457
453
kuid_t uid = sock_i_uid (sk );
458
454
bool reuse = sk -> sk_reuse && sk -> sk_state != TCP_LISTEN ;
459
455
460
- if (hlist_empty (& tb -> owners )) {
456
+ if (hlist_empty (& tb -> bhash2 )) {
461
457
tb -> fastreuse = reuse ;
462
458
if (sk -> sk_reuseport ) {
463
459
tb -> fastreuseport = FASTREUSEPORT_ANY ;
@@ -549,7 +545,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
549
545
}
550
546
551
547
if (!found_port ) {
552
- if (!hlist_empty (& tb -> owners )) {
548
+ if (!hlist_empty (& tb -> bhash2 )) {
553
549
if (sk -> sk_reuse == SK_FORCE_REUSE ||
554
550
(tb -> fastreuse > 0 && reuse ) ||
555
551
sk_reuseport_match (tb , sk ))
@@ -569,7 +565,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
569
565
570
566
if (!tb2 ) {
571
567
tb2 = inet_bind2_bucket_create (hinfo -> bind2_bucket_cachep ,
572
- net , head2 , port , l3mdev , sk );
568
+ net , head2 , tb , sk );
573
569
if (!tb2 )
574
570
goto fail_unlock ;
575
571
bhash2_created = true;
@@ -591,11 +587,10 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
591
587
592
588
fail_unlock :
593
589
if (ret ) {
590
+ if (bhash2_created )
591
+ inet_bind2_bucket_destroy (hinfo -> bind2_bucket_cachep , tb2 );
594
592
if (bhash_created )
595
593
inet_bind_bucket_destroy (hinfo -> bind_bucket_cachep , tb );
596
- if (bhash2_created )
597
- inet_bind2_bucket_destroy (hinfo -> bind2_bucket_cachep ,
598
- tb2 );
599
594
}
600
595
if (head2_lock_acquired )
601
596
spin_unlock (& head2 -> lock );
0 commit comments