@@ -1396,6 +1396,24 @@ static void domain_update_iotlb(struct dmar_domain *domain)
1396
1396
spin_unlock_irqrestore (& domain -> lock , flags );
1397
1397
}
1398
1398
1399
+ /*
1400
+ * The extra devTLB flush quirk impacts those QAT devices with PCI device
1401
+ * IDs ranging from 0x4940 to 0x4943. It is exempted from risky_device()
1402
+ * check because it applies only to the built-in QAT devices and it doesn't
1403
+ * grant additional privileges.
1404
+ */
1405
+ #define BUGGY_QAT_DEVID_MASK 0x494c
1406
+ static bool dev_needs_extra_dtlb_flush (struct pci_dev * pdev )
1407
+ {
1408
+ if (pdev -> vendor != PCI_VENDOR_ID_INTEL )
1409
+ return false;
1410
+
1411
+ if ((pdev -> device & 0xfffc ) != BUGGY_QAT_DEVID_MASK )
1412
+ return false;
1413
+
1414
+ return true;
1415
+ }
1416
+
1399
1417
static void iommu_enable_pci_caps (struct device_domain_info * info )
1400
1418
{
1401
1419
struct pci_dev * pdev ;
@@ -1478,6 +1496,7 @@ static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
1478
1496
qdep = info -> ats_qdep ;
1479
1497
qi_flush_dev_iotlb (info -> iommu , sid , info -> pfsid ,
1480
1498
qdep , addr , mask );
1499
+ quirk_extra_dev_tlb_flush (info , addr , mask , PASID_RID2PASID , qdep );
1481
1500
}
1482
1501
1483
1502
static void iommu_flush_dev_iotlb (struct dmar_domain * domain ,
@@ -3854,8 +3873,10 @@ static inline bool has_external_pci(void)
3854
3873
struct pci_dev * pdev = NULL ;
3855
3874
3856
3875
for_each_pci_dev (pdev )
3857
- if (pdev -> external_facing )
3876
+ if (pdev -> external_facing ) {
3877
+ pci_dev_put (pdev );
3858
3878
return true;
3879
+ }
3859
3880
3860
3881
return false;
3861
3882
}
@@ -4490,9 +4511,10 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
4490
4511
if (dev_is_pci (dev )) {
4491
4512
if (ecap_dev_iotlb_support (iommu -> ecap ) &&
4492
4513
pci_ats_supported (pdev ) &&
4493
- dmar_ats_supported (pdev , iommu ))
4514
+ dmar_ats_supported (pdev , iommu )) {
4494
4515
info -> ats_supported = 1 ;
4495
-
4516
+ info -> dtlb_extra_inval = dev_needs_extra_dtlb_flush (pdev );
4517
+ }
4496
4518
if (sm_supported (iommu )) {
4497
4519
if (pasid_supported (iommu )) {
4498
4520
int features = pci_pasid_features (pdev );
@@ -4931,3 +4953,48 @@ static void __init check_tylersburg_isoch(void)
4931
4953
pr_warn ("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n" ,
4932
4954
vtisochctrl );
4933
4955
}
4956
+
4957
+ /*
4958
+ * Here we deal with a device TLB defect where device may inadvertently issue ATS
4959
+ * invalidation completion before posted writes initiated with translated address
4960
+ * that utilized translations matching the invalidation address range, violating
4961
+ * the invalidation completion ordering.
4962
+ * Therefore, any use cases that cannot guarantee DMA is stopped before unmap is
4963
+ * vulnerable to this defect. In other words, any dTLB invalidation initiated not
4964
+ * under the control of the trusted/privileged host device driver must use this
4965
+ * quirk.
4966
+ * Device TLBs are invalidated under the following six conditions:
4967
+ * 1. Device driver does DMA API unmap IOVA
4968
+ * 2. Device driver unbind a PASID from a process, sva_unbind_device()
4969
+ * 3. PASID is torn down, after PASID cache is flushed. e.g. process
4970
+ * exit_mmap() due to crash
4971
+ * 4. Under SVA usage, called by mmu_notifier.invalidate_range() where
4972
+ * VM has to free pages that were unmapped
4973
+ * 5. Userspace driver unmaps a DMA buffer
4974
+ * 6. Cache invalidation in vSVA usage (upcoming)
4975
+ *
4976
+ * For #1 and #2, device drivers are responsible for stopping DMA traffic
4977
+ * before unmap/unbind. For #3, iommu driver gets mmu_notifier to
4978
+ * invalidate TLB the same way as normal user unmap which will use this quirk.
4979
+ * The dTLB invalidation after PASID cache flush does not need this quirk.
4980
+ *
4981
+ * As a reminder, #6 will *NEED* this quirk as we enable nested translation.
4982
+ */
4983
+ void quirk_extra_dev_tlb_flush (struct device_domain_info * info ,
4984
+ unsigned long address , unsigned long mask ,
4985
+ u32 pasid , u16 qdep )
4986
+ {
4987
+ u16 sid ;
4988
+
4989
+ if (likely (!info -> dtlb_extra_inval ))
4990
+ return ;
4991
+
4992
+ sid = PCI_DEVID (info -> bus , info -> devfn );
4993
+ if (pasid == PASID_RID2PASID ) {
4994
+ qi_flush_dev_iotlb (info -> iommu , sid , info -> pfsid ,
4995
+ qdep , address , mask );
4996
+ } else {
4997
+ qi_flush_dev_iotlb_pasid (info -> iommu , sid , info -> pfsid ,
4998
+ pasid , qdep , address , mask );
4999
+ }
5000
+ }
0 commit comments