@@ -368,6 +368,43 @@ static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net,
368
368
}
369
369
}
370
370
371
+ static void neigh_flush_one (struct neighbour * n )
372
+ {
373
+ hlist_del_rcu (& n -> hash );
374
+ hlist_del_rcu (& n -> dev_list );
375
+
376
+ write_lock (& n -> lock );
377
+
378
+ neigh_del_timer (n );
379
+ neigh_mark_dead (n );
380
+
381
+ if (refcount_read (& n -> refcnt ) != 1 ) {
382
+ /* The most unpleasant situation.
383
+ * We must destroy neighbour entry,
384
+ * but someone still uses it.
385
+ *
386
+ * The destroy will be delayed until
387
+ * the last user releases us, but
388
+ * we must kill timers etc. and move
389
+ * it to safe state.
390
+ */
391
+ __skb_queue_purge (& n -> arp_queue );
392
+ n -> arp_queue_len_bytes = 0 ;
393
+ WRITE_ONCE (n -> output , neigh_blackhole );
394
+
395
+ if (n -> nud_state & NUD_VALID )
396
+ n -> nud_state = NUD_NOARP ;
397
+ else
398
+ n -> nud_state = NUD_NONE ;
399
+
400
+ neigh_dbg (2 , "neigh %p is stray\n" , n );
401
+ }
402
+
403
+ write_unlock (& n -> lock );
404
+
405
+ neigh_cleanup_and_release (n );
406
+ }
407
+
371
408
static void neigh_flush_dev (struct neigh_table * tbl , struct net_device * dev ,
372
409
bool skip_perm )
373
410
{
@@ -381,32 +418,24 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
381
418
if (skip_perm && n -> nud_state & NUD_PERMANENT )
382
419
continue ;
383
420
384
- hlist_del_rcu (& n -> hash );
385
- hlist_del_rcu (& n -> dev_list );
386
- write_lock (& n -> lock );
387
- neigh_del_timer (n );
388
- neigh_mark_dead (n );
389
- if (refcount_read (& n -> refcnt ) != 1 ) {
390
- /* The most unpleasant situation.
391
- * We must destroy neighbour entry,
392
- * but someone still uses it.
393
- *
394
- * The destroy will be delayed until
395
- * the last user releases us, but
396
- * we must kill timers etc. and move
397
- * it to safe state.
398
- */
399
- __skb_queue_purge (& n -> arp_queue );
400
- n -> arp_queue_len_bytes = 0 ;
401
- WRITE_ONCE (n -> output , neigh_blackhole );
402
- if (n -> nud_state & NUD_VALID )
403
- n -> nud_state = NUD_NOARP ;
404
- else
405
- n -> nud_state = NUD_NONE ;
406
- neigh_dbg (2 , "neigh %p is stray\n" , n );
407
- }
408
- write_unlock (& n -> lock );
409
- neigh_cleanup_and_release (n );
421
+ neigh_flush_one (n );
422
+ }
423
+ }
424
+
425
+ static void neigh_flush_table (struct neigh_table * tbl )
426
+ {
427
+ struct neigh_hash_table * nht ;
428
+ int i ;
429
+
430
+ nht = rcu_dereference_protected (tbl -> nht ,
431
+ lockdep_is_held (& tbl -> lock ));
432
+
433
+ for (i = 0 ; i < (1 << nht -> hash_shift ); i ++ ) {
434
+ struct hlist_node * tmp ;
435
+ struct neighbour * n ;
436
+
437
+ neigh_for_each_in_bucket_safe (n , tmp , & nht -> hash_heads [i ])
438
+ neigh_flush_one (n );
410
439
}
411
440
}
412
441
@@ -422,7 +451,12 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
422
451
bool skip_perm )
423
452
{
424
453
write_lock_bh (& tbl -> lock );
425
- neigh_flush_dev (tbl , dev , skip_perm );
454
+ if (likely (dev )) {
455
+ neigh_flush_dev (tbl , dev , skip_perm );
456
+ } else {
457
+ DEBUG_NET_WARN_ON_ONCE (skip_perm );
458
+ neigh_flush_table (tbl );
459
+ }
426
460
pneigh_ifdown_and_unlock (tbl , dev );
427
461
pneigh_queue_purge (& tbl -> proxy_queue , dev ? dev_net (dev ) : NULL ,
428
462
tbl -> family );
0 commit comments