3636#define TX_BATCH_SIZE 32
3737#define MAX_PER_SOCKET_BUDGET 32
3838
39+ struct xsk_addrs {
40+ u32 num_descs ;
41+ u64 addrs [MAX_SKB_FRAGS + 1 ];
42+ };
43+
44+ static struct kmem_cache * xsk_tx_generic_cache ;
45+
3946void xsk_set_rx_need_wakeup (struct xsk_buff_pool * pool )
4047{
4148 if (pool -> cached_need_wakeup & XDP_WAKEUP_RX )
@@ -532,25 +539,39 @@ static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
532539 return dev -> netdev_ops -> ndo_xsk_wakeup (dev , xs -> queue_id , flags );
533540}
534541
535- static int xsk_cq_reserve_addr_locked (struct xsk_buff_pool * pool , u64 addr )
542+ static int xsk_cq_reserve_locked (struct xsk_buff_pool * pool )
536543{
537544 unsigned long flags ;
538545 int ret ;
539546
540547 spin_lock_irqsave (& pool -> cq_lock , flags );
541- ret = xskq_prod_reserve_addr (pool -> cq , addr );
548+ ret = xskq_prod_reserve (pool -> cq );
542549 spin_unlock_irqrestore (& pool -> cq_lock , flags );
543550
544551 return ret ;
545552}
546553
547- static void xsk_cq_submit_locked (struct xsk_buff_pool * pool , u32 n )
554+ static void xsk_cq_submit_addr_locked (struct xdp_sock * xs ,
555+ struct sk_buff * skb )
548556{
557+ struct xsk_buff_pool * pool = xs -> pool ;
558+ struct xsk_addrs * xsk_addrs ;
549559 unsigned long flags ;
560+ u32 num_desc , i ;
561+ u32 idx ;
562+
563+ xsk_addrs = (struct xsk_addrs * )skb_shinfo (skb )-> destructor_arg ;
564+ num_desc = xsk_addrs -> num_descs ;
550565
551566 spin_lock_irqsave (& pool -> cq_lock , flags );
552- xskq_prod_submit_n (pool -> cq , n );
567+ idx = xskq_get_prod (pool -> cq );
568+
569+ for (i = 0 ; i < num_desc ; i ++ )
570+ xskq_prod_write_addr (pool -> cq , idx + i , xsk_addrs -> addrs [i ]);
571+ xskq_prod_submit_n (pool -> cq , num_desc );
572+
553573 spin_unlock_irqrestore (& pool -> cq_lock , flags );
574+ kmem_cache_free (xsk_tx_generic_cache , xsk_addrs );
554575}
555576
556577static void xsk_cq_cancel_locked (struct xsk_buff_pool * pool , u32 n )
@@ -562,11 +583,6 @@ static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
562583 spin_unlock_irqrestore (& pool -> cq_lock , flags );
563584}
564585
565- static u32 xsk_get_num_desc (struct sk_buff * skb )
566- {
567- return skb ? (long )skb_shinfo (skb )-> destructor_arg : 0 ;
568- }
569-
570586static void xsk_destruct_skb (struct sk_buff * skb )
571587{
572588 struct xsk_tx_metadata_compl * compl = & skb_shinfo (skb )-> xsk_meta ;
@@ -576,21 +592,37 @@ static void xsk_destruct_skb(struct sk_buff *skb)
576592 * compl -> tx_timestamp = ktime_get_tai_fast_ns ();
577593 }
578594
579- xsk_cq_submit_locked (xdp_sk (skb -> sk )-> pool , xsk_get_num_desc ( skb ) );
595+ xsk_cq_submit_addr_locked (xdp_sk (skb -> sk ), skb );
580596 sock_wfree (skb );
581597}
582598
583- static void xsk_set_destructor_arg (struct sk_buff * skb )
599+ static u32 xsk_get_num_desc (struct sk_buff * skb )
584600{
585- long num = xsk_get_num_desc ( xdp_sk ( skb -> sk ) -> skb ) + 1 ;
601+ struct xsk_addrs * addrs ;
586602
587- skb_shinfo (skb )-> destructor_arg = (void * )num ;
603+ addrs = (struct xsk_addrs * )skb_shinfo (skb )-> destructor_arg ;
604+ return addrs -> num_descs ;
605+ }
606+
607+ static void xsk_set_destructor_arg (struct sk_buff * skb , struct xsk_addrs * addrs )
608+ {
609+ skb_shinfo (skb )-> destructor_arg = (void * )addrs ;
610+ }
611+
612+ static void xsk_inc_skb_descs (struct sk_buff * skb )
613+ {
614+ struct xsk_addrs * addrs ;
615+
616+ addrs = (struct xsk_addrs * )skb_shinfo (skb )-> destructor_arg ;
617+ addrs -> num_descs ++ ;
588618}
589619
590620static void xsk_consume_skb (struct sk_buff * skb )
591621{
592622 struct xdp_sock * xs = xdp_sk (skb -> sk );
593623
624+ kmem_cache_free (xsk_tx_generic_cache ,
625+ (struct xsk_addrs * )skb_shinfo (skb )-> destructor_arg );
594626 skb -> destructor = sock_wfree ;
595627 xsk_cq_cancel_locked (xs -> pool , xsk_get_num_desc (skb ));
596628 /* Free skb without triggering the perf drop trace */
@@ -609,6 +641,7 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
609641{
610642 struct xsk_buff_pool * pool = xs -> pool ;
611643 u32 hr , len , ts , offset , copy , copied ;
644+ struct xsk_addrs * addrs = NULL ;
612645 struct sk_buff * skb = xs -> skb ;
613646 struct page * page ;
614647 void * buffer ;
@@ -623,6 +656,12 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
623656 return ERR_PTR (err );
624657
625658 skb_reserve (skb , hr );
659+
660+ addrs = kmem_cache_zalloc (xsk_tx_generic_cache , GFP_KERNEL );
661+ if (!addrs )
662+ return ERR_PTR (- ENOMEM );
663+
664+ xsk_set_destructor_arg (skb , addrs );
626665 }
627666
628667 addr = desc -> addr ;
@@ -662,6 +701,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
662701{
663702 struct xsk_tx_metadata * meta = NULL ;
664703 struct net_device * dev = xs -> dev ;
704+ struct xsk_addrs * addrs = NULL ;
665705 struct sk_buff * skb = xs -> skb ;
666706 bool first_frag = false;
667707 int err ;
@@ -694,6 +734,15 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
694734 err = skb_store_bits (skb , 0 , buffer , len );
695735 if (unlikely (err ))
696736 goto free_err ;
737+
738+ addrs = kmem_cache_zalloc (xsk_tx_generic_cache , GFP_KERNEL );
739+ if (!addrs ) {
740+ err = - ENOMEM ;
741+ goto free_err ;
742+ }
743+
744+ xsk_set_destructor_arg (skb , addrs );
745+
697746 } else {
698747 int nr_frags = skb_shinfo (skb )-> nr_frags ;
699748 struct page * page ;
@@ -759,7 +808,9 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
759808 skb -> mark = READ_ONCE (xs -> sk .sk_mark );
760809 skb -> destructor = xsk_destruct_skb ;
761810 xsk_tx_metadata_to_compl (meta , & skb_shinfo (skb )-> xsk_meta );
762- xsk_set_destructor_arg (skb );
811+
812+ addrs = (struct xsk_addrs * )skb_shinfo (skb )-> destructor_arg ;
813+ addrs -> addrs [addrs -> num_descs ++ ] = desc -> addr ;
763814
764815 return skb ;
765816
@@ -769,7 +820,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
769820
770821 if (err == - EOVERFLOW ) {
771822 /* Drop the packet */
772- xsk_set_destructor_arg (xs -> skb );
823+ xsk_inc_skb_descs (xs -> skb );
773824 xsk_drop_skb (xs -> skb );
774825 xskq_cons_release (xs -> tx );
775826 } else {
@@ -812,7 +863,7 @@ static int __xsk_generic_xmit(struct sock *sk)
812863 * if there is space in it. This avoids having to implement
813864 * any buffering in the Tx path.
814865 */
815- err = xsk_cq_reserve_addr_locked (xs -> pool , desc . addr );
866+ err = xsk_cq_reserve_locked (xs -> pool );
816867 if (err ) {
817868 err = - EAGAIN ;
818869 goto out ;
@@ -1815,6 +1866,14 @@ static int __init xsk_init(void)
18151866 if (err )
18161867 goto out_pernet ;
18171868
1869+ xsk_tx_generic_cache = kmem_cache_create ("xsk_generic_xmit_cache" ,
1870+ sizeof (struct xsk_addrs ), 0 ,
1871+ SLAB_HWCACHE_ALIGN , NULL );
1872+ if (!xsk_tx_generic_cache ) {
1873+ err = - ENOMEM ;
1874+ goto out_pernet ;
1875+ }
1876+
18181877 return 0 ;
18191878
18201879out_pernet :
0 commit comments