@@ -5180,8 +5180,9 @@ static void napi_schedule_rps(struct softnet_data *sd)
51805180 __napi_schedule_irqoff (& mysd -> backlog );
51815181}
51825182
5183- void kick_defer_list_purge (struct softnet_data * sd , unsigned int cpu )
5183+ void kick_defer_list_purge (unsigned int cpu )
51845184{
5185+ struct softnet_data * sd = & per_cpu (softnet_data , cpu );
51855186 unsigned long flags ;
51865187
51875188 if (use_backlog_threads ()) {
@@ -6715,18 +6716,24 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
67156716}
67166717EXPORT_SYMBOL (napi_complete_done );
67176718
6718- static void skb_defer_free_flush (struct softnet_data * sd )
6719+ static void skb_defer_free_flush (void )
67196720{
67206721 struct llist_node * free_list ;
67216722 struct sk_buff * skb , * next ;
6723+ struct skb_defer_node * sdn ;
6724+ int node ;
67226725
6723- if (llist_empty (& sd -> defer_list ))
6724- return ;
6725- atomic_long_set (& sd -> defer_count , 0 );
6726- free_list = llist_del_all (& sd -> defer_list );
6726+ for_each_node (node ) {
6727+ sdn = this_cpu_ptr (net_hotdata .skb_defer_nodes ) + node ;
6728+
6729+ if (llist_empty (& sdn -> defer_list ))
6730+ continue ;
6731+ atomic_long_set (& sdn -> defer_count , 0 );
6732+ free_list = llist_del_all (& sdn -> defer_list );
67276733
6728- llist_for_each_entry_safe (skb , next , free_list , ll_node ) {
6729- napi_consume_skb (skb , 1 );
6734+ llist_for_each_entry_safe (skb , next , free_list , ll_node ) {
6735+ napi_consume_skb (skb , 1 );
6736+ }
67306737 }
67316738}
67326739
@@ -6854,7 +6861,7 @@ static void __napi_busy_loop(unsigned int napi_id,
68546861 if (work > 0 )
68556862 __NET_ADD_STATS (dev_net (napi -> dev ),
68566863 LINUX_MIB_BUSYPOLLRXPACKETS , work );
6857- skb_defer_free_flush (this_cpu_ptr ( & softnet_data ) );
6864+ skb_defer_free_flush ();
68586865 bpf_net_ctx_clear (bpf_net_ctx );
68596866 local_bh_enable ();
68606867
@@ -7713,7 +7720,7 @@ static void napi_threaded_poll_loop(struct napi_struct *napi)
77137720 local_irq_disable ();
77147721 net_rps_action_and_irq_enable (sd );
77157722 }
7716- skb_defer_free_flush (sd );
7723+ skb_defer_free_flush ();
77177724 bpf_net_ctx_clear (bpf_net_ctx );
77187725 local_bh_enable ();
77197726
@@ -7755,7 +7762,7 @@ static __latent_entropy void net_rx_action(void)
77557762 for (;;) {
77567763 struct napi_struct * n ;
77577764
7758- skb_defer_free_flush (sd );
7765+ skb_defer_free_flush ();
77597766
77607767 if (list_empty (& list )) {
77617768 if (list_empty (& repoll )) {
@@ -12989,7 +12996,6 @@ static int __init net_dev_init(void)
1298912996 sd -> cpu = i ;
1299012997#endif
1299112998 INIT_CSD (& sd -> defer_csd , trigger_rx_softirq , sd );
12992- init_llist_head (& sd -> defer_list );
1299312999
1299413000 gro_init (& sd -> backlog .gro );
1299513001 sd -> backlog .poll = process_backlog ;
@@ -12999,6 +13005,11 @@ static int __init net_dev_init(void)
1299913005 if (net_page_pool_create (i ))
1300013006 goto out ;
1300113007 }
13008+ net_hotdata .skb_defer_nodes =
13009+ __alloc_percpu (sizeof (struct skb_defer_node ) * nr_node_ids ,
13010+ __alignof__(struct skb_defer_node ));
13011+ if (!net_hotdata .skb_defer_nodes )
13012+ goto out ;
1300213013 if (use_backlog_threads ())
1300313014 smpboot_register_percpu_thread (& backlog_threads );
1300413015
0 commit comments