@@ -5180,8 +5180,9 @@ static void napi_schedule_rps(struct softnet_data *sd)
51805180 __napi_schedule_irqoff (& mysd -> backlog );
51815181}
51825182
5183- void kick_defer_list_purge (struct softnet_data * sd , unsigned int cpu )
5183+ void kick_defer_list_purge (unsigned int cpu )
51845184{
5185+ struct softnet_data * sd = & per_cpu (softnet_data , cpu );
51855186 unsigned long flags ;
51865187
51875188 if (use_backlog_threads ()) {
@@ -6715,24 +6716,24 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
67156716}
67166717EXPORT_SYMBOL (napi_complete_done );
67176718
6718- static void skb_defer_free_flush (struct softnet_data * sd )
6719+ static void skb_defer_free_flush (void )
67196720{
6721+ struct llist_node * free_list ;
67206722 struct sk_buff * skb , * next ;
6723+ struct skb_defer_node * sdn ;
6724+ int node ;
67216725
6722- /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
6723- if (!READ_ONCE (sd -> defer_list ))
6724- return ;
6726+ for_each_node (node ) {
6727+ sdn = this_cpu_ptr (net_hotdata .skb_defer_nodes ) + node ;
67256728
6726- spin_lock (& sd -> defer_lock );
6727- skb = sd -> defer_list ;
6728- sd -> defer_list = NULL ;
6729- sd -> defer_count = 0 ;
6730- spin_unlock (& sd -> defer_lock );
6729+ if (llist_empty (& sdn -> defer_list ))
6730+ continue ;
6731+ atomic_long_set (& sdn -> defer_count , 0 );
6732+ free_list = llist_del_all (& sdn -> defer_list );
67316733
6732- while (skb != NULL ) {
6733- next = skb -> next ;
6734- napi_consume_skb (skb , 1 );
6735- skb = next ;
6734+ llist_for_each_entry_safe (skb , next , free_list , ll_node ) {
6735+ napi_consume_skb (skb , 1 );
6736+ }
67366737 }
67376738}
67386739
@@ -6860,7 +6861,7 @@ static void __napi_busy_loop(unsigned int napi_id,
68606861 if (work > 0 )
68616862 __NET_ADD_STATS (dev_net (napi -> dev ),
68626863 LINUX_MIB_BUSYPOLLRXPACKETS , work );
6863- skb_defer_free_flush (this_cpu_ptr ( & softnet_data ) );
6864+ skb_defer_free_flush ();
68646865 bpf_net_ctx_clear (bpf_net_ctx );
68656866 local_bh_enable ();
68666867
@@ -7719,7 +7720,7 @@ static void napi_threaded_poll_loop(struct napi_struct *napi)
77197720 local_irq_disable ();
77207721 net_rps_action_and_irq_enable (sd );
77217722 }
7722- skb_defer_free_flush (sd );
7723+ skb_defer_free_flush ();
77237724 bpf_net_ctx_clear (bpf_net_ctx );
77247725 local_bh_enable ();
77257726
@@ -7761,7 +7762,7 @@ static __latent_entropy void net_rx_action(void)
77617762 for (;;) {
77627763 struct napi_struct * n ;
77637764
7764- skb_defer_free_flush (sd );
7765+ skb_defer_free_flush ();
77657766
77667767 if (list_empty (& list )) {
77677768 if (list_empty (& repoll )) {
@@ -12995,7 +12996,6 @@ static int __init net_dev_init(void)
1299512996 sd -> cpu = i ;
1299612997#endif
1299712998 INIT_CSD (& sd -> defer_csd , trigger_rx_softirq , sd );
12998- spin_lock_init (& sd -> defer_lock );
1299912999
1300013000 gro_init (& sd -> backlog .gro );
1300113001 sd -> backlog .poll = process_backlog ;
@@ -13005,6 +13005,11 @@ static int __init net_dev_init(void)
1300513005 if (net_page_pool_create (i ))
1300613006 goto out ;
1300713007 }
13008+ net_hotdata .skb_defer_nodes =
13009+ __alloc_percpu (sizeof (struct skb_defer_node ) * nr_node_ids ,
13010+ __alignof__(struct skb_defer_node ));
13011+ if (!net_hotdata .skb_defer_nodes )
13012+ goto out ;
1300813013 if (use_backlog_threads ())
1300913014 smpboot_register_percpu_thread (& backlog_threads );
1301013015
0 commit comments