@@ -386,6 +386,38 @@ static int otx2_set_real_num_queues(struct net_device *netdev,
386
386
return err ;
387
387
}
388
388
389
+ static irqreturn_t otx2_cq_intr_handler (int irq , void * cq_irq )
390
+ {
391
+ struct otx2_cq_poll * cq_poll = (struct otx2_cq_poll * )cq_irq ;
392
+ struct otx2_nic * pf = (struct otx2_nic * )cq_poll -> dev ;
393
+ int qidx = cq_poll -> cint_idx ;
394
+
395
+ /* Disable interrupts.
396
+ *
397
+ * Completion interrupts behave in a level-triggered interrupt
398
+ * fashion, and hence have to be cleared only after it is serviced.
399
+ */
400
+ otx2_write64 (pf , NIX_LF_CINTX_ENA_W1C (qidx ), BIT_ULL (0 ));
401
+
402
+ /* Schedule NAPI */
403
+ napi_schedule_irqoff (& cq_poll -> napi );
404
+
405
+ return IRQ_HANDLED ;
406
+ }
407
+
408
+ static void otx2_disable_napi (struct otx2_nic * pf )
409
+ {
410
+ struct otx2_qset * qset = & pf -> qset ;
411
+ struct otx2_cq_poll * cq_poll ;
412
+ int qidx ;
413
+
414
+ for (qidx = 0 ; qidx < pf -> hw .cint_cnt ; qidx ++ ) {
415
+ cq_poll = & qset -> napi [qidx ];
416
+ napi_disable (& cq_poll -> napi );
417
+ netif_napi_del (& cq_poll -> napi );
418
+ }
419
+ }
420
+
389
421
static void otx2_free_cq_res (struct otx2_nic * pf )
390
422
{
391
423
struct otx2_qset * qset = & pf -> qset ;
@@ -564,12 +596,21 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
564
596
static int otx2_open (struct net_device * netdev )
565
597
{
566
598
struct otx2_nic * pf = netdev_priv (netdev );
599
+ struct otx2_cq_poll * cq_poll = NULL ;
567
600
struct otx2_qset * qset = & pf -> qset ;
568
- int err = 0 ;
601
+ int err = 0 , qidx , vec ;
602
+ char * irq_name ;
569
603
570
604
netif_carrier_off (netdev );
571
605
572
606
pf -> qset .cq_cnt = pf -> hw .rx_queues + pf -> hw .tx_queues ;
607
+ /* RQ and SQs are mapped to different CQs,
608
+ * so find out max CQ IRQs (i.e CINTs) needed.
609
+ */
610
+ pf -> hw .cint_cnt = max (pf -> hw .rx_queues , pf -> hw .tx_queues );
611
+ qset -> napi = kcalloc (pf -> hw .cint_cnt , sizeof (* cq_poll ), GFP_KERNEL );
612
+ if (!qset -> napi )
613
+ return - ENOMEM ;
573
614
574
615
/* CQ size of RQ */
575
616
qset -> rqe_cnt = qset -> rqe_cnt ? qset -> rqe_cnt : Q_COUNT (Q_SIZE_256 );
@@ -591,23 +632,100 @@ static int otx2_open(struct net_device *netdev)
591
632
if (err )
592
633
goto err_free_mem ;
593
634
635
+ /* Register NAPI handler */
636
+ for (qidx = 0 ; qidx < pf -> hw .cint_cnt ; qidx ++ ) {
637
+ cq_poll = & qset -> napi [qidx ];
638
+ cq_poll -> cint_idx = qidx ;
639
+ /* RQ0 & SQ0 are mapped to CINT0 and so on..
640
+ * 'cq_ids[0]' points to RQ's CQ and
641
+ * 'cq_ids[1]' points to SQ's CQ and
642
+ */
643
+ cq_poll -> cq_ids [CQ_RX ] =
644
+ (qidx < pf -> hw .rx_queues ) ? qidx : CINT_INVALID_CQ ;
645
+ cq_poll -> cq_ids [CQ_TX ] = (qidx < pf -> hw .tx_queues ) ?
646
+ qidx + pf -> hw .rx_queues : CINT_INVALID_CQ ;
647
+ cq_poll -> dev = (void * )pf ;
648
+ netif_napi_add (netdev , & cq_poll -> napi ,
649
+ otx2_napi_handler , NAPI_POLL_WEIGHT );
650
+ napi_enable (& cq_poll -> napi );
651
+ }
652
+
653
+ /* Register CQ IRQ handlers */
654
+ vec = pf -> hw .nix_msixoff + NIX_LF_CINT_VEC_START ;
655
+ for (qidx = 0 ; qidx < pf -> hw .cint_cnt ; qidx ++ ) {
656
+ irq_name = & pf -> hw .irq_name [vec * NAME_SIZE ];
657
+
658
+ snprintf (irq_name , NAME_SIZE , "%s-rxtx-%d" , pf -> netdev -> name ,
659
+ qidx );
660
+
661
+ err = request_irq (pci_irq_vector (pf -> pdev , vec ),
662
+ otx2_cq_intr_handler , 0 , irq_name ,
663
+ & qset -> napi [qidx ]);
664
+ if (err ) {
665
+ dev_err (pf -> dev ,
666
+ "RVUPF%d: IRQ registration failed for CQ%d\n" ,
667
+ rvu_get_pf (pf -> pcifunc ), qidx );
668
+ goto err_free_cints ;
669
+ }
670
+ vec ++ ;
671
+
672
+ otx2_config_irq_coalescing (pf , qidx );
673
+
674
+ /* Enable CQ IRQ */
675
+ otx2_write64 (pf , NIX_LF_CINTX_INT (qidx ), BIT_ULL (0 ));
676
+ otx2_write64 (pf , NIX_LF_CINTX_ENA_W1S (qidx ), BIT_ULL (0 ));
677
+ }
678
+
679
+ otx2_set_cints_affinity (pf );
680
+
594
681
return 0 ;
682
+
683
+ err_free_cints :
684
+ otx2_free_cints (pf , qidx );
685
+ otx2_disable_napi (pf );
686
+ otx2_free_hw_resources (pf );
595
687
err_free_mem :
596
688
kfree (qset -> sq );
597
689
kfree (qset -> cq );
690
+ kfree (qset -> napi );
598
691
return err ;
599
692
}
600
693
601
694
static int otx2_stop (struct net_device * netdev )
602
695
{
603
696
struct otx2_nic * pf = netdev_priv (netdev );
697
+ struct otx2_cq_poll * cq_poll = NULL ;
604
698
struct otx2_qset * qset = & pf -> qset ;
699
+ int qidx , vec ;
700
+
701
+ netif_carrier_off (netdev );
702
+ netif_tx_stop_all_queues (netdev );
703
+
704
+ /* Cleanup CQ NAPI and IRQ */
705
+ vec = pf -> hw .nix_msixoff + NIX_LF_CINT_VEC_START ;
706
+ for (qidx = 0 ; qidx < pf -> hw .cint_cnt ; qidx ++ ) {
707
+ /* Disable interrupt */
708
+ otx2_write64 (pf , NIX_LF_CINTX_ENA_W1C (qidx ), BIT_ULL (0 ));
709
+
710
+ synchronize_irq (pci_irq_vector (pf -> pdev , vec ));
711
+
712
+ cq_poll = & qset -> napi [qidx ];
713
+ napi_synchronize (& cq_poll -> napi );
714
+ vec ++ ;
715
+ }
716
+
717
+ netif_tx_disable (netdev );
605
718
606
719
otx2_free_hw_resources (pf );
720
+ otx2_free_cints (pf , pf -> hw .cint_cnt );
721
+ otx2_disable_napi (pf );
722
+
723
+ for (qidx = 0 ; qidx < netdev -> num_tx_queues ; qidx ++ )
724
+ netdev_tx_reset_queue (netdev_get_tx_queue (netdev , qidx ));
607
725
608
726
kfree (qset -> sq );
609
727
kfree (qset -> cq );
610
-
728
+ kfree ( qset -> napi );
611
729
/* Do not clear RQ/SQ ringsize settings */
612
730
memset ((void * )qset + offsetof(struct otx2_qset , sqe_cnt ), 0 ,
613
731
sizeof (* qset ) - offsetof(struct otx2_qset , sqe_cnt ));
@@ -646,7 +764,6 @@ static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
646
764
* upto NIX vector offset.
647
765
*/
648
766
num_vec = hw -> nix_msixoff ;
649
- #define NIX_LF_CINT_VEC_START 0x40
650
767
num_vec += NIX_LF_CINT_VEC_START + hw -> max_queues ;
651
768
652
769
otx2_disable_mbox_intr (pf );
@@ -769,6 +886,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
769
886
if (err )
770
887
goto err_detach_rsrc ;
771
888
889
+ otx2_setup_dev_hw_settings (pf );
890
+
772
891
/* NPA's pool is a stack to which SW frees buffer pointers via Aura.
773
892
* HW allocates buffer pointer from stack and uses it for DMA'ing
774
893
* ingress packet. In some scenarios HW can free back allocated buffer
0 commit comments