@@ -694,25 +694,7 @@ static void device_pasid_table_teardown(struct device *dev, u8 bus, u8 devfn)
694
694
context_clear_entry (context );
695
695
__iommu_flush_cache (iommu , context , sizeof (* context ));
696
696
spin_unlock (& iommu -> lock );
697
-
698
- /*
699
- * Cache invalidation for changes to a scalable-mode context table
700
- * entry.
701
- *
702
- * Section 6.5.3.3 of the VT-d spec:
703
- * - Device-selective context-cache invalidation;
704
- * - Domain-selective PASID-cache invalidation to affected domains
705
- * (can be skipped if all PASID entries were not-present);
706
- * - Domain-selective IOTLB invalidation to affected domains;
707
- * - Global Device-TLB invalidation to affected functions.
708
- *
709
- * The iommu has been parked in the blocking state. All domains have
710
- * been detached from the device or PASID. The PASID and IOTLB caches
711
- * have been invalidated during the domain detach path.
712
- */
713
- iommu -> flush .flush_context (iommu , 0 , PCI_DEVID (bus , devfn ),
714
- DMA_CCMD_MASK_NOBIT , DMA_CCMD_DEVICE_INVL );
715
- devtlb_invalidation_with_pasid (iommu , dev , IOMMU_NO_PASID );
697
+ intel_context_flush_present (info , context , false);
716
698
}
717
699
718
700
static int pci_pasid_table_teardown (struct pci_dev * pdev , u16 alias , void * data )
@@ -874,3 +856,89 @@ int intel_pasid_setup_sm_context(struct device *dev)
874
856
875
857
return pci_for_each_dma_alias (to_pci_dev (dev ), pci_pasid_table_setup , dev );
876
858
}
859
+
860
+ /*
861
+ * Global Device-TLB invalidation following changes in a context entry which
862
+ * was present.
863
+ */
864
+ static void __context_flush_dev_iotlb (struct device_domain_info * info )
865
+ {
866
+ if (!info -> ats_enabled )
867
+ return ;
868
+
869
+ qi_flush_dev_iotlb (info -> iommu , PCI_DEVID (info -> bus , info -> devfn ),
870
+ info -> pfsid , info -> ats_qdep , 0 , MAX_AGAW_PFN_WIDTH );
871
+
872
+ /*
873
+ * There is no guarantee that the device DMA is stopped when it reaches
874
+ * here. Therefore, always attempt the extra device TLB invalidation
875
+ * quirk. The impact on performance is acceptable since this is not a
876
+ * performance-critical path.
877
+ */
878
+ quirk_extra_dev_tlb_flush (info , 0 , MAX_AGAW_PFN_WIDTH , IOMMU_NO_PASID ,
879
+ info -> ats_qdep );
880
+ }
881
+
882
+ /*
883
+ * Cache invalidations after change in a context table entry that was present
884
+ * according to the Spec 6.5.3.3 (Guidance to Software for Invalidations). If
885
+ * IOMMU is in scalable mode and all PASID table entries of the device were
886
+ * non-present, set flush_domains to false. Otherwise, true.
887
+ */
888
+ void intel_context_flush_present (struct device_domain_info * info ,
889
+ struct context_entry * context ,
890
+ bool flush_domains )
891
+ {
892
+ struct intel_iommu * iommu = info -> iommu ;
893
+ u16 did = context_domain_id (context );
894
+ struct pasid_entry * pte ;
895
+ int i ;
896
+
897
+ /*
898
+ * Device-selective context-cache invalidation. The Domain-ID field
899
+ * of the Context-cache Invalidate Descriptor is ignored by hardware
900
+ * when operating in scalable mode. Therefore the @did value doesn't
901
+ * matter in scalable mode.
902
+ */
903
+ iommu -> flush .flush_context (iommu , did , PCI_DEVID (info -> bus , info -> devfn ),
904
+ DMA_CCMD_MASK_NOBIT , DMA_CCMD_DEVICE_INVL );
905
+
906
+ /*
907
+ * For legacy mode:
908
+ * - Domain-selective IOTLB invalidation
909
+ * - Global Device-TLB invalidation to all affected functions
910
+ */
911
+ if (!sm_supported (iommu )) {
912
+ iommu -> flush .flush_iotlb (iommu , did , 0 , 0 , DMA_TLB_DSI_FLUSH );
913
+ __context_flush_dev_iotlb (info );
914
+
915
+ return ;
916
+ }
917
+
918
+ /*
919
+ * For scalable mode:
920
+ * - Domain-selective PASID-cache invalidation to affected domains
921
+ * - Domain-selective IOTLB invalidation to affected domains
922
+ * - Global Device-TLB invalidation to affected functions
923
+ */
924
+ if (flush_domains ) {
925
+ /*
926
+ * If the IOMMU is running in scalable mode and there might
927
+ * be potential PASID translations, the caller should hold
928
+ * the lock to ensure that context changes and cache flushes
929
+ * are atomic.
930
+ */
931
+ assert_spin_locked (& iommu -> lock );
932
+ for (i = 0 ; i < info -> pasid_table -> max_pasid ; i ++ ) {
933
+ pte = intel_pasid_get_entry (info -> dev , i );
934
+ if (!pte || !pasid_pte_is_present (pte ))
935
+ continue ;
936
+
937
+ did = pasid_get_domain_id (pte );
938
+ qi_flush_pasid_cache (iommu , did , QI_PC_ALL_PASIDS , 0 );
939
+ iommu -> flush .flush_iotlb (iommu , did , 0 , 0 , DMA_TLB_DSI_FLUSH );
940
+ }
941
+ }
942
+
943
+ __context_flush_dev_iotlb (info );
944
+ }
0 commit comments