@@ -359,25 +359,24 @@ static int nsim_poll(struct napi_struct *napi, int budget)
359
359
return done ;
360
360
}
361
361
362
- static int nsim_create_page_pool (struct nsim_rq * rq )
362
+ static int nsim_create_page_pool (struct page_pool * * p , struct napi_struct * napi )
363
363
{
364
- struct page_pool_params p = {
364
+ struct page_pool_params params = {
365
365
.order = 0 ,
366
366
.pool_size = NSIM_RING_SIZE ,
367
367
.nid = NUMA_NO_NODE ,
368
- .dev = & rq -> napi . dev -> dev ,
369
- .napi = & rq -> napi ,
368
+ .dev = & napi -> dev -> dev ,
369
+ .napi = napi ,
370
370
.dma_dir = DMA_BIDIRECTIONAL ,
371
- .netdev = rq -> napi . dev ,
371
+ .netdev = napi -> dev ,
372
372
};
373
+ struct page_pool * pool ;
373
374
374
- rq -> page_pool = page_pool_create (& p );
375
- if (IS_ERR (rq -> page_pool )) {
376
- int err = PTR_ERR (rq -> page_pool );
375
+ pool = page_pool_create (& params );
376
+ if (IS_ERR (pool ))
377
+ return PTR_ERR (pool );
377
378
378
- rq -> page_pool = NULL ;
379
- return err ;
380
- }
379
+ * p = pool ;
381
380
return 0 ;
382
381
}
383
382
@@ -396,7 +395,7 @@ static int nsim_init_napi(struct netdevsim *ns)
396
395
for (i = 0 ; i < dev -> num_rx_queues ; i ++ ) {
397
396
rq = ns -> rq [i ];
398
397
399
- err = nsim_create_page_pool (rq );
398
+ err = nsim_create_page_pool (& rq -> page_pool , & rq -> napi );
400
399
if (err )
401
400
goto err_pp_destroy ;
402
401
}
@@ -613,6 +612,116 @@ static void nsim_queue_free(struct nsim_rq *rq)
613
612
kfree (rq );
614
613
}
615
614
615
+ /* Queue reset mode is controlled by ns->rq_reset_mode.
616
+ * - normal - new NAPI new pool (old NAPI enabled when new added)
617
+ * - mode 1 - allocate new pool (NAPI is only disabled / enabled)
618
+ * - mode 2 - new NAPI new pool (old NAPI removed before new added)
619
+ * - mode 3 - new NAPI new pool (old NAPI disabled when new added)
620
+ */
621
+ struct nsim_queue_mem {
622
+ struct nsim_rq * rq ;
623
+ struct page_pool * pp ;
624
+ };
625
+
626
+ static int
627
+ nsim_queue_mem_alloc (struct net_device * dev , void * per_queue_mem , int idx )
628
+ {
629
+ struct nsim_queue_mem * qmem = per_queue_mem ;
630
+ struct netdevsim * ns = netdev_priv (dev );
631
+ int err ;
632
+
633
+ if (ns -> rq_reset_mode > 3 )
634
+ return - EINVAL ;
635
+
636
+ if (ns -> rq_reset_mode == 1 )
637
+ return nsim_create_page_pool (& qmem -> pp , & ns -> rq [idx ]-> napi );
638
+
639
+ qmem -> rq = nsim_queue_alloc ();
640
+ if (!qmem -> rq )
641
+ return - ENOMEM ;
642
+
643
+ err = nsim_create_page_pool (& qmem -> rq -> page_pool , & qmem -> rq -> napi );
644
+ if (err )
645
+ goto err_free ;
646
+
647
+ if (!ns -> rq_reset_mode )
648
+ netif_napi_add_config (dev , & qmem -> rq -> napi , nsim_poll , idx );
649
+
650
+ return 0 ;
651
+
652
+ err_free :
653
+ nsim_queue_free (qmem -> rq );
654
+ return err ;
655
+ }
656
+
657
+ static void nsim_queue_mem_free (struct net_device * dev , void * per_queue_mem )
658
+ {
659
+ struct nsim_queue_mem * qmem = per_queue_mem ;
660
+ struct netdevsim * ns = netdev_priv (dev );
661
+
662
+ page_pool_destroy (qmem -> pp );
663
+ if (qmem -> rq ) {
664
+ if (!ns -> rq_reset_mode )
665
+ netif_napi_del (& qmem -> rq -> napi );
666
+ page_pool_destroy (qmem -> rq -> page_pool );
667
+ nsim_queue_free (qmem -> rq );
668
+ }
669
+ }
670
+
671
+ static int
672
+ nsim_queue_start (struct net_device * dev , void * per_queue_mem , int idx )
673
+ {
674
+ struct nsim_queue_mem * qmem = per_queue_mem ;
675
+ struct netdevsim * ns = netdev_priv (dev );
676
+
677
+ if (ns -> rq_reset_mode == 1 ) {
678
+ ns -> rq [idx ]-> page_pool = qmem -> pp ;
679
+ napi_enable (& ns -> rq [idx ]-> napi );
680
+ return 0 ;
681
+ }
682
+
683
+ /* netif_napi_add()/_del() should normally be called from alloc/free,
684
+ * here we want to test various call orders.
685
+ */
686
+ if (ns -> rq_reset_mode == 2 ) {
687
+ netif_napi_del (& ns -> rq [idx ]-> napi );
688
+ netif_napi_add_config (dev , & qmem -> rq -> napi , nsim_poll , idx );
689
+ } else if (ns -> rq_reset_mode == 3 ) {
690
+ netif_napi_add_config (dev , & qmem -> rq -> napi , nsim_poll , idx );
691
+ netif_napi_del (& ns -> rq [idx ]-> napi );
692
+ }
693
+
694
+ ns -> rq [idx ] = qmem -> rq ;
695
+ napi_enable (& ns -> rq [idx ]-> napi );
696
+
697
+ return 0 ;
698
+ }
699
+
700
+ static int nsim_queue_stop (struct net_device * dev , void * per_queue_mem , int idx )
701
+ {
702
+ struct nsim_queue_mem * qmem = per_queue_mem ;
703
+ struct netdevsim * ns = netdev_priv (dev );
704
+
705
+ napi_disable (& ns -> rq [idx ]-> napi );
706
+
707
+ if (ns -> rq_reset_mode == 1 ) {
708
+ qmem -> pp = ns -> rq [idx ]-> page_pool ;
709
+ page_pool_disable_direct_recycling (qmem -> pp );
710
+ } else {
711
+ qmem -> rq = ns -> rq [idx ];
712
+ }
713
+
714
+ return 0 ;
715
+ }
716
+
717
+ static const struct netdev_queue_mgmt_ops nsim_queue_mgmt_ops = {
718
+ .ndo_queue_mem_size = sizeof (struct nsim_queue_mem ),
719
+ .ndo_queue_mem_alloc = nsim_queue_mem_alloc ,
720
+ .ndo_queue_mem_free = nsim_queue_mem_free ,
721
+ .ndo_queue_start = nsim_queue_start ,
722
+ .ndo_queue_stop = nsim_queue_stop ,
723
+ };
724
+
616
725
static ssize_t
617
726
nsim_pp_hold_read (struct file * file , char __user * data ,
618
727
size_t count , loff_t * ppos )
@@ -739,6 +848,7 @@ static int nsim_init_netdevsim(struct netdevsim *ns)
739
848
ns -> phc = phc ;
740
849
ns -> netdev -> netdev_ops = & nsim_netdev_ops ;
741
850
ns -> netdev -> stat_ops = & nsim_stat_ops ;
851
+ ns -> netdev -> queue_mgmt_ops = & nsim_queue_mgmt_ops ;
742
852
743
853
err = nsim_udp_tunnels_info_create (ns -> nsim_dev , ns -> netdev );
744
854
if (err )
0 commit comments