@@ -357,48 +357,10 @@ static int cptvf_sw_init(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
357
357
return ret ;
358
358
}
359
359
360
- static void cptvf_disable_msix (struct cpt_vf * cptvf )
360
+ static void cptvf_free_irq_affinity (struct cpt_vf * cptvf , int vec )
361
361
{
362
- if (cptvf -> msix_enabled ) {
363
- pci_disable_msix (cptvf -> pdev );
364
- cptvf -> msix_enabled = 0 ;
365
- }
366
- }
367
-
368
- static int cptvf_enable_msix (struct cpt_vf * cptvf )
369
- {
370
- int i , ret ;
371
-
372
- for (i = 0 ; i < CPT_VF_MSIX_VECTORS ; i ++ )
373
- cptvf -> msix_entries [i ].entry = i ;
374
-
375
- ret = pci_enable_msix (cptvf -> pdev , cptvf -> msix_entries ,
376
- CPT_VF_MSIX_VECTORS );
377
- if (ret ) {
378
- dev_err (& cptvf -> pdev -> dev , "Request for #%d msix vectors failed\n" ,
379
- CPT_VF_MSIX_VECTORS );
380
- return ret ;
381
- }
382
-
383
- cptvf -> msix_enabled = 1 ;
384
- /* Mark MSIX enabled */
385
- cptvf -> flags |= CPT_FLAG_MSIX_ENABLED ;
386
-
387
- return 0 ;
388
- }
389
-
390
- static void cptvf_free_all_interrupts (struct cpt_vf * cptvf )
391
- {
392
- int irq ;
393
-
394
- for (irq = 0 ; irq < CPT_VF_MSIX_VECTORS ; irq ++ ) {
395
- if (cptvf -> irq_allocated [irq ])
396
- irq_set_affinity_hint (cptvf -> msix_entries [irq ].vector ,
397
- NULL );
398
- free_cpumask_var (cptvf -> affinity_mask [irq ]);
399
- free_irq (cptvf -> msix_entries [irq ].vector , cptvf );
400
- cptvf -> irq_allocated [irq ] = false;
401
- }
362
+ irq_set_affinity_hint (pci_irq_vector (cptvf -> pdev , vec ), NULL );
363
+ free_cpumask_var (cptvf -> affinity_mask [vec ]);
402
364
}
403
365
404
366
static void cptvf_write_vq_ctl (struct cpt_vf * cptvf , bool val )
@@ -650,85 +612,23 @@ static irqreturn_t cptvf_done_intr_handler(int irq, void *cptvf_irq)
650
612
return IRQ_HANDLED ;
651
613
}
652
614
653
- static int cptvf_register_misc_intr (struct cpt_vf * cptvf )
654
- {
655
- struct pci_dev * pdev = cptvf -> pdev ;
656
- int ret ;
657
-
658
- /* Register misc interrupt handlers */
659
- ret = request_irq (cptvf -> msix_entries [CPT_VF_INT_VEC_E_MISC ].vector ,
660
- cptvf_misc_intr_handler , 0 , "CPT VF misc intr" ,
661
- cptvf );
662
- if (ret )
663
- goto fail ;
664
-
665
- cptvf -> irq_allocated [CPT_VF_INT_VEC_E_MISC ] = true;
666
-
667
- /* Enable mailbox interrupt */
668
- cptvf_enable_mbox_interrupts (cptvf );
669
- cptvf_enable_swerr_interrupts (cptvf );
670
-
671
- return 0 ;
672
-
673
- fail :
674
- dev_err (& pdev -> dev , "Request misc irq failed" );
675
- cptvf_free_all_interrupts (cptvf );
676
- return ret ;
677
- }
678
-
679
- static int cptvf_register_done_intr (struct cpt_vf * cptvf )
680
- {
681
- struct pci_dev * pdev = cptvf -> pdev ;
682
- int ret ;
683
-
684
- /* Register DONE interrupt handlers */
685
- ret = request_irq (cptvf -> msix_entries [CPT_VF_INT_VEC_E_DONE ].vector ,
686
- cptvf_done_intr_handler , 0 , "CPT VF done intr" ,
687
- cptvf );
688
- if (ret )
689
- goto fail ;
690
-
691
- cptvf -> irq_allocated [CPT_VF_INT_VEC_E_DONE ] = true;
692
-
693
- /* Enable mailbox interrupt */
694
- cptvf_enable_done_interrupts (cptvf );
695
- return 0 ;
696
-
697
- fail :
698
- dev_err (& pdev -> dev , "Request done irq failed\n" );
699
- cptvf_free_all_interrupts (cptvf );
700
- return ret ;
701
- }
702
-
703
- static void cptvf_unregister_interrupts (struct cpt_vf * cptvf )
704
- {
705
- cptvf_free_all_interrupts (cptvf );
706
- cptvf_disable_msix (cptvf );
707
- }
708
-
709
- static void cptvf_set_irq_affinity (struct cpt_vf * cptvf )
615
+ static void cptvf_set_irq_affinity (struct cpt_vf * cptvf , int vec )
710
616
{
711
617
struct pci_dev * pdev = cptvf -> pdev ;
712
- int vec , cpu ;
713
- int irqnum ;
714
-
715
- for (vec = 0 ; vec < CPT_VF_MSIX_VECTORS ; vec ++ ) {
716
- if (!cptvf -> irq_allocated [vec ])
717
- continue ;
718
-
719
- if (!zalloc_cpumask_var (& cptvf -> affinity_mask [vec ],
720
- GFP_KERNEL )) {
721
- dev_err (& pdev -> dev , "Allocation failed for affinity_mask for VF %d" ,
722
- cptvf -> vfid );
723
- return ;
724
- }
618
+ int cpu ;
725
619
726
- cpu = cptvf -> vfid % num_online_cpus ();
727
- cpumask_set_cpu ( cpumask_local_spread ( cpu , cptvf -> node ),
728
- cptvf -> affinity_mask [ vec ]);
729
- irqnum = cptvf -> msix_entries [ vec ]. vector ;
730
- irq_set_affinity_hint ( irqnum , cptvf -> affinity_mask [ vec ]) ;
620
+ if (! zalloc_cpumask_var ( & cptvf -> affinity_mask [ vec ],
621
+ GFP_KERNEL )) {
622
+ dev_err ( & pdev -> dev , "Allocation failed for affinity_mask for VF %d" ,
623
+ cptvf -> vfid ) ;
624
+ return ;
731
625
}
626
+
627
+ cpu = cptvf -> vfid % num_online_cpus ();
628
+ cpumask_set_cpu (cpumask_local_spread (cpu , cptvf -> node ),
629
+ cptvf -> affinity_mask [vec ]);
630
+ irq_set_affinity_hint (pci_irq_vector (pdev , vec ),
631
+ cptvf -> affinity_mask [vec ]);
732
632
}
733
633
734
634
static void cptvf_write_vq_saddr (struct cpt_vf * cptvf , u64 val )
@@ -809,36 +709,46 @@ static int cptvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
809
709
}
810
710
811
711
cptvf -> node = dev_to_node (& pdev -> dev );
812
- /* Enable MSI-X */
813
- err = cptvf_enable_msix (cptvf );
814
- if (err ) {
815
- dev_err (dev , "cptvf_enable_msix() failed" );
712
+ err = pci_alloc_irq_vectors (pdev , CPT_VF_MSIX_VECTORS ,
713
+ CPT_VF_MSIX_VECTORS , PCI_IRQ_MSIX );
714
+ if (err < 0 ) {
715
+ dev_err (dev , "Request for #%d msix vectors failed\n" ,
716
+ CPT_VF_MSIX_VECTORS );
816
717
goto cptvf_err_release_regions ;
817
718
}
818
719
819
- /* Register mailbox interrupts */
820
- cptvf_register_misc_intr (cptvf );
720
+ err = request_irq (pci_irq_vector (pdev , CPT_VF_INT_VEC_E_MISC ),
721
+ cptvf_misc_intr_handler , 0 , "CPT VF misc intr" ,
722
+ cptvf );
723
+ if (err ) {
724
+ dev_err (dev , "Request misc irq failed" );
725
+ goto cptvf_free_vectors ;
726
+ }
727
+
728
+ /* Enable mailbox interrupt */
729
+ cptvf_enable_mbox_interrupts (cptvf );
730
+ cptvf_enable_swerr_interrupts (cptvf );
821
731
822
732
/* Check ready with PF */
823
733
/* Gets chip ID / device Id from PF if ready */
824
734
err = cptvf_check_pf_ready (cptvf );
825
735
if (err ) {
826
736
dev_err (dev , "PF not responding to READY msg" );
827
- goto cptvf_err_release_regions ;
737
+ goto cptvf_free_misc_irq ;
828
738
}
829
739
830
740
/* CPT VF software resources initialization */
831
741
cptvf -> cqinfo .qchunksize = CPT_CMD_QCHUNK_SIZE ;
832
742
err = cptvf_sw_init (cptvf , CPT_CMD_QLEN , CPT_NUM_QS_PER_VF );
833
743
if (err ) {
834
744
dev_err (dev , "cptvf_sw_init() failed" );
835
- goto cptvf_err_release_regions ;
745
+ goto cptvf_free_misc_irq ;
836
746
}
837
747
/* Convey VQ LEN to PF */
838
748
err = cptvf_send_vq_size_msg (cptvf );
839
749
if (err ) {
840
750
dev_err (dev , "PF not responding to QLEN msg" );
841
- goto cptvf_err_release_regions ;
751
+ goto cptvf_free_misc_irq ;
842
752
}
843
753
844
754
/* CPT VF device initialization */
@@ -848,37 +758,50 @@ static int cptvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
848
758
err = cptvf_send_vf_to_grp_msg (cptvf );
849
759
if (err ) {
850
760
dev_err (dev , "PF not responding to VF_GRP msg" );
851
- goto cptvf_err_release_regions ;
761
+ goto cptvf_free_misc_irq ;
852
762
}
853
763
854
764
cptvf -> priority = 1 ;
855
765
err = cptvf_send_vf_priority_msg (cptvf );
856
766
if (err ) {
857
767
dev_err (dev , "PF not responding to VF_PRIO msg" );
858
- goto cptvf_err_release_regions ;
768
+ goto cptvf_free_misc_irq ;
769
+ }
770
+
771
+ err = request_irq (pci_irq_vector (pdev , CPT_VF_INT_VEC_E_DONE ),
772
+ cptvf_done_intr_handler , 0 , "CPT VF done intr" ,
773
+ cptvf );
774
+ if (err ) {
775
+ dev_err (dev , "Request done irq failed\n" );
776
+ goto cptvf_free_misc_irq ;
859
777
}
860
- /* Register DONE interrupts */
861
- err = cptvf_register_done_intr (cptvf );
862
- if (err )
863
- goto cptvf_err_release_regions ;
778
+
779
+ /* Enable mailbox interrupt */
780
+ cptvf_enable_done_interrupts (cptvf );
864
781
865
782
/* Set irq affinity masks */
866
- cptvf_set_irq_affinity (cptvf );
867
- /* Convey UP to PF */
783
+ cptvf_set_irq_affinity (cptvf , CPT_VF_INT_VEC_E_MISC );
784
+ cptvf_set_irq_affinity (cptvf , CPT_VF_INT_VEC_E_DONE );
785
+
868
786
err = cptvf_send_vf_up (cptvf );
869
787
if (err ) {
870
788
dev_err (dev , "PF not responding to UP msg" );
871
- goto cptvf_up_fail ;
789
+ goto cptvf_free_irq_affinity ;
872
790
}
873
791
err = cvm_crypto_init (cptvf );
874
792
if (err ) {
875
793
dev_err (dev , "Algorithm register failed\n" );
876
- goto cptvf_up_fail ;
794
+ goto cptvf_free_irq_affinity ;
877
795
}
878
796
return 0 ;
879
797
880
- cptvf_up_fail :
881
- cptvf_unregister_interrupts (cptvf );
798
+ cptvf_free_irq_affinity :
799
+ cptvf_free_irq_affinity (cptvf , CPT_VF_INT_VEC_E_DONE );
800
+ cptvf_free_irq_affinity (cptvf , CPT_VF_INT_VEC_E_MISC );
801
+ cptvf_free_misc_irq :
802
+ free_irq (pci_irq_vector (pdev , CPT_VF_INT_VEC_E_MISC ), cptvf );
803
+ cptvf_free_vectors :
804
+ pci_free_irq_vectors (cptvf -> pdev );
882
805
cptvf_err_release_regions :
883
806
pci_release_regions (pdev );
884
807
cptvf_err_disable_device :
@@ -899,7 +822,11 @@ static void cptvf_remove(struct pci_dev *pdev)
899
822
if (cptvf_send_vf_down (cptvf )) {
900
823
dev_err (& pdev -> dev , "PF not responding to DOWN msg" );
901
824
} else {
902
- cptvf_unregister_interrupts (cptvf );
825
+ cptvf_free_irq_affinity (cptvf , CPT_VF_INT_VEC_E_DONE );
826
+ cptvf_free_irq_affinity (cptvf , CPT_VF_INT_VEC_E_MISC );
827
+ free_irq (pci_irq_vector (pdev , CPT_VF_INT_VEC_E_DONE ), cptvf );
828
+ free_irq (pci_irq_vector (pdev , CPT_VF_INT_VEC_E_MISC ), cptvf );
829
+ pci_free_irq_vectors (cptvf -> pdev );
903
830
cptvf_sw_cleanup (cptvf );
904
831
pci_set_drvdata (pdev , NULL );
905
832
pci_release_regions (pdev );
0 commit comments