@@ -733,3 +733,141 @@ void intel_pasid_teardown_sm_context(struct device *dev)
733
733
734
734
pci_for_each_dma_alias (to_pci_dev (dev ), pci_pasid_table_teardown , dev );
735
735
}
736
+
737
+ /*
738
+ * Get the PASID directory size for scalable mode context entry.
739
+ * Value of X in the PDTS field of a scalable mode context entry
740
+ * indicates PASID directory with 2^(X + 7) entries.
741
+ */
742
+ static unsigned long context_get_sm_pds (struct pasid_table * table )
743
+ {
744
+ unsigned long pds , max_pde ;
745
+
746
+ max_pde = table -> max_pasid >> PASID_PDE_SHIFT ;
747
+ pds = find_first_bit (& max_pde , MAX_NR_PASID_BITS );
748
+ if (pds < 7 )
749
+ return 0 ;
750
+
751
+ return pds - 7 ;
752
+ }
753
+
754
+ static int context_entry_set_pasid_table (struct context_entry * context ,
755
+ struct device * dev )
756
+ {
757
+ struct device_domain_info * info = dev_iommu_priv_get (dev );
758
+ struct pasid_table * table = info -> pasid_table ;
759
+ struct intel_iommu * iommu = info -> iommu ;
760
+ unsigned long pds ;
761
+
762
+ context_clear_entry (context );
763
+
764
+ pds = context_get_sm_pds (table );
765
+ context -> lo = (u64 )virt_to_phys (table -> table ) | context_pdts (pds );
766
+ context_set_sm_rid2pasid (context , IOMMU_NO_PASID );
767
+
768
+ if (info -> ats_supported )
769
+ context_set_sm_dte (context );
770
+ if (info -> pri_supported )
771
+ context_set_sm_pre (context );
772
+ if (info -> pasid_supported )
773
+ context_set_pasid (context );
774
+
775
+ context_set_fault_enable (context );
776
+ context_set_present (context );
777
+ __iommu_flush_cache (iommu , context , sizeof (* context ));
778
+
779
+ return 0 ;
780
+ }
781
+
782
+ static int device_pasid_table_setup (struct device * dev , u8 bus , u8 devfn )
783
+ {
784
+ struct device_domain_info * info = dev_iommu_priv_get (dev );
785
+ struct intel_iommu * iommu = info -> iommu ;
786
+ struct context_entry * context ;
787
+
788
+ spin_lock (& iommu -> lock );
789
+ context = iommu_context_addr (iommu , bus , devfn , true);
790
+ if (!context ) {
791
+ spin_unlock (& iommu -> lock );
792
+ return - ENOMEM ;
793
+ }
794
+
795
+ if (context_present (context ) && !context_copied (iommu , bus , devfn )) {
796
+ spin_unlock (& iommu -> lock );
797
+ return 0 ;
798
+ }
799
+
800
+ if (context_copied (iommu , bus , devfn )) {
801
+ context_clear_entry (context );
802
+ __iommu_flush_cache (iommu , context , sizeof (* context ));
803
+
804
+ /*
805
+ * For kdump cases, old valid entries may be cached due to
806
+ * the in-flight DMA and copied pgtable, but there is no
807
+ * unmapping behaviour for them, thus we need explicit cache
808
+ * flushes for all affected domain IDs and PASIDs used in
809
+ * the copied PASID table. Given that we have no idea about
810
+ * which domain IDs and PASIDs were used in the copied tables,
811
+ * upgrade them to global PASID and IOTLB cache invalidation.
812
+ */
813
+ iommu -> flush .flush_context (iommu , 0 ,
814
+ PCI_DEVID (bus , devfn ),
815
+ DMA_CCMD_MASK_NOBIT ,
816
+ DMA_CCMD_DEVICE_INVL );
817
+ qi_flush_pasid_cache (iommu , 0 , QI_PC_GLOBAL , 0 );
818
+ iommu -> flush .flush_iotlb (iommu , 0 , 0 , 0 , DMA_TLB_GLOBAL_FLUSH );
819
+ devtlb_invalidation_with_pasid (iommu , dev , IOMMU_NO_PASID );
820
+
821
+ /*
822
+ * At this point, the device is supposed to finish reset at
823
+ * its driver probe stage, so no in-flight DMA will exist,
824
+ * and we don't need to worry anymore hereafter.
825
+ */
826
+ clear_context_copied (iommu , bus , devfn );
827
+ }
828
+
829
+ context_entry_set_pasid_table (context , dev );
830
+ spin_unlock (& iommu -> lock );
831
+
832
+ /*
833
+ * It's a non-present to present mapping. If hardware doesn't cache
834
+ * non-present entry we don't need to flush the caches. If it does
835
+ * cache non-present entries, then it does so in the special
836
+ * domain #0, which we have to flush:
837
+ */
838
+ if (cap_caching_mode (iommu -> cap )) {
839
+ iommu -> flush .flush_context (iommu , 0 ,
840
+ PCI_DEVID (bus , devfn ),
841
+ DMA_CCMD_MASK_NOBIT ,
842
+ DMA_CCMD_DEVICE_INVL );
843
+ iommu -> flush .flush_iotlb (iommu , 0 , 0 , 0 , DMA_TLB_DSI_FLUSH );
844
+ }
845
+
846
+ return 0 ;
847
+ }
848
+
849
+ static int pci_pasid_table_setup (struct pci_dev * pdev , u16 alias , void * data )
850
+ {
851
+ struct device * dev = data ;
852
+
853
+ if (dev != & pdev -> dev )
854
+ return 0 ;
855
+
856
+ return device_pasid_table_setup (dev , PCI_BUS_NUM (alias ), alias & 0xff );
857
+ }
858
+
859
+ /*
860
+ * Set the device's PASID table to its context table entry.
861
+ *
862
+ * The PASID table is set to the context entries of both device itself
863
+ * and its alias requester ID for DMA.
864
+ */
865
+ int intel_pasid_setup_sm_context (struct device * dev )
866
+ {
867
+ struct device_domain_info * info = dev_iommu_priv_get (dev );
868
+
869
+ if (!dev_is_pci (dev ))
870
+ return device_pasid_table_setup (dev , info -> bus , info -> devfn );
871
+
872
+ return pci_for_each_dma_alias (to_pci_dev (dev ), pci_pasid_table_setup , dev );
873
+ }
0 commit comments