6
6
7
7
#include <linux/device.h>
8
8
#include <linux/interrupt.h>
9
+ #include <linux/iommu.h>
9
10
#include <linux/irq.h>
10
11
#include <linux/kernel.h>
11
12
#include <linux/module.h>
@@ -68,6 +69,8 @@ enum vmd_features {
68
69
VMD_FEAT_CAN_BYPASS_MSI_REMAP = (1 << 4 ),
69
70
};
70
71
72
+ static DEFINE_IDA (vmd_instance_ida );
73
+
71
74
/*
72
75
* Lock for manipulating VMD IRQ lists.
73
76
*/
@@ -118,6 +121,8 @@ struct vmd_dev {
118
121
struct pci_bus * bus ;
119
122
u8 busn_start ;
120
123
u8 first_vec ;
124
+ char * name ;
125
+ int instance ;
121
126
};
122
127
123
128
static inline struct vmd_dev * vmd_from_bus (struct pci_bus * bus )
@@ -648,7 +653,7 @@ static int vmd_alloc_irqs(struct vmd_dev *vmd)
648
653
INIT_LIST_HEAD (& vmd -> irqs [i ].irq_list );
649
654
err = devm_request_irq (& dev -> dev , pci_irq_vector (dev , i ),
650
655
vmd_irq , IRQF_NO_THREAD ,
651
- " vmd" , & vmd -> irqs [i ]);
656
+ vmd -> name , & vmd -> irqs [i ]);
652
657
if (err )
653
658
return err ;
654
659
}
@@ -759,7 +764,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
759
764
* acceptable because the guest is usually CPU-limited and MSI
760
765
* remapping doesn't become a performance bottleneck.
761
766
*/
762
- if (!(features & VMD_FEAT_CAN_BYPASS_MSI_REMAP ) ||
767
+ if (iommu_capable (vmd -> dev -> dev .bus , IOMMU_CAP_INTR_REMAP ) ||
768
+ !(features & VMD_FEAT_CAN_BYPASS_MSI_REMAP ) ||
763
769
offset [0 ] || offset [1 ]) {
764
770
ret = vmd_alloc_irqs (vmd );
765
771
if (ret )
@@ -832,18 +838,32 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
832
838
return - ENOMEM ;
833
839
834
840
vmd -> dev = dev ;
841
+ vmd -> instance = ida_simple_get (& vmd_instance_ida , 0 , 0 , GFP_KERNEL );
842
+ if (vmd -> instance < 0 )
843
+ return vmd -> instance ;
844
+
845
+ vmd -> name = kasprintf (GFP_KERNEL , "vmd%d" , vmd -> instance );
846
+ if (!vmd -> name ) {
847
+ err = - ENOMEM ;
848
+ goto out_release_instance ;
849
+ }
850
+
835
851
err = pcim_enable_device (dev );
836
852
if (err < 0 )
837
- return err ;
853
+ goto out_release_instance ;
838
854
839
855
vmd -> cfgbar = pcim_iomap (dev , VMD_CFGBAR , 0 );
840
- if (!vmd -> cfgbar )
841
- return - ENOMEM ;
856
+ if (!vmd -> cfgbar ) {
857
+ err = - ENOMEM ;
858
+ goto out_release_instance ;
859
+ }
842
860
843
861
pci_set_master (dev );
844
862
if (dma_set_mask_and_coherent (& dev -> dev , DMA_BIT_MASK (64 )) &&
845
- dma_set_mask_and_coherent (& dev -> dev , DMA_BIT_MASK (32 )))
846
- return - ENODEV ;
863
+ dma_set_mask_and_coherent (& dev -> dev , DMA_BIT_MASK (32 ))) {
864
+ err = - ENODEV ;
865
+ goto out_release_instance ;
866
+ }
847
867
848
868
if (features & VMD_FEAT_OFFSET_FIRST_VECTOR )
849
869
vmd -> first_vec = 1 ;
@@ -852,11 +872,16 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
852
872
pci_set_drvdata (dev , vmd );
853
873
err = vmd_enable_domain (vmd , features );
854
874
if (err )
855
- return err ;
875
+ goto out_release_instance ;
856
876
857
877
dev_info (& vmd -> dev -> dev , "Bound to PCI domain %04x\n" ,
858
878
vmd -> sysdata .domain );
859
879
return 0 ;
880
+
881
+ out_release_instance :
882
+ ida_simple_remove (& vmd_instance_ida , vmd -> instance );
883
+ kfree (vmd -> name );
884
+ return err ;
860
885
}
861
886
862
887
static void vmd_cleanup_srcu (struct vmd_dev * vmd )
@@ -877,6 +902,8 @@ static void vmd_remove(struct pci_dev *dev)
877
902
vmd_cleanup_srcu (vmd );
878
903
vmd_detach_resources (vmd );
879
904
vmd_remove_irq_domain (vmd );
905
+ ida_simple_remove (& vmd_instance_ida , vmd -> instance );
906
+ kfree (vmd -> name );
880
907
}
881
908
882
909
#ifdef CONFIG_PM_SLEEP
@@ -901,7 +928,7 @@ static int vmd_resume(struct device *dev)
901
928
for (i = 0 ; i < vmd -> msix_count ; i ++ ) {
902
929
err = devm_request_irq (dev , pci_irq_vector (pdev , i ),
903
930
vmd_irq , IRQF_NO_THREAD ,
904
- " vmd" , & vmd -> irqs [i ]);
931
+ vmd -> name , & vmd -> irqs [i ]);
905
932
if (err )
906
933
return err ;
907
934
}
0 commit comments