Skip to content

Commit f90584f

Browse files
LuBaoluwilldeacon
authored andcommitted
iommu/vt-d: Add helper to flush caches for context change
This helper is used to flush the related caches following a change in a context table entry that was previously present. The VT-d specification provides guidance for such invalidations in section 6.5.3.3. This helper replaces the existing open code in the code paths where a present context entry is being torn down. Signed-off-by: Lu Baolu <[email protected]> Reviewed-by: Kevin Tian <[email protected]> Link: https://lore.kernel.org/r/[email protected] Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Will Deacon <[email protected]>
1 parent 2b989ab commit f90584f

File tree

3 files changed

+92
-50
lines changed

3 files changed

+92
-50
lines changed

drivers/iommu/intel/iommu.c

Lines changed: 1 addition & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1359,21 +1359,6 @@ static void iommu_disable_pci_caps(struct device_domain_info *info)
13591359
}
13601360
}
13611361

1362-
static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
1363-
u64 addr, unsigned int mask)
1364-
{
1365-
u16 sid, qdep;
1366-
1367-
if (!info || !info->ats_enabled)
1368-
return;
1369-
1370-
sid = info->bus << 8 | info->devfn;
1371-
qdep = info->ats_qdep;
1372-
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1373-
qdep, addr, mask);
1374-
quirk_extra_dev_tlb_flush(info, addr, mask, IOMMU_NO_PASID, qdep);
1375-
}
1376-
13771362
static void intel_flush_iotlb_all(struct iommu_domain *domain)
13781363
{
13791364
cache_tag_flush_all(to_dmar_domain(domain));
@@ -1959,7 +1944,6 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
19591944
{
19601945
struct intel_iommu *iommu = info->iommu;
19611946
struct context_entry *context;
1962-
u16 did_old;
19631947

19641948
spin_lock(&iommu->lock);
19651949
context = iommu_context_addr(iommu, bus, devfn, 0);
@@ -1968,24 +1952,10 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
19681952
return;
19691953
}
19701954

1971-
did_old = context_domain_id(context);
1972-
19731955
context_clear_entry(context);
19741956
__iommu_flush_cache(iommu, context, sizeof(*context));
19751957
spin_unlock(&iommu->lock);
1976-
iommu->flush.flush_context(iommu,
1977-
did_old,
1978-
(((u16)bus) << 8) | devfn,
1979-
DMA_CCMD_MASK_NOBIT,
1980-
DMA_CCMD_DEVICE_INVL);
1981-
1982-
iommu->flush.flush_iotlb(iommu,
1983-
did_old,
1984-
0,
1985-
0,
1986-
DMA_TLB_DSI_FLUSH);
1987-
1988-
__iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH);
1958+
intel_context_flush_present(info, context, true);
19891959
}
19901960

19911961
static int domain_setup_first_level(struct intel_iommu *iommu,

drivers/iommu/intel/iommu.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1143,6 +1143,10 @@ void cache_tag_flush_all(struct dmar_domain *domain);
11431143
void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
11441144
unsigned long end);
11451145

1146+
void intel_context_flush_present(struct device_domain_info *info,
1147+
struct context_entry *context,
1148+
bool affect_domains);
1149+
11461150
#ifdef CONFIG_INTEL_IOMMU_SVM
11471151
void intel_svm_check(struct intel_iommu *iommu);
11481152
int intel_svm_enable_prq(struct intel_iommu *iommu);

drivers/iommu/intel/pasid.c

Lines changed: 87 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -694,25 +694,7 @@ static void device_pasid_table_teardown(struct device *dev, u8 bus, u8 devfn)
694694
context_clear_entry(context);
695695
__iommu_flush_cache(iommu, context, sizeof(*context));
696696
spin_unlock(&iommu->lock);
697-
698-
/*
699-
* Cache invalidation for changes to a scalable-mode context table
700-
* entry.
701-
*
702-
* Section 6.5.3.3 of the VT-d spec:
703-
* - Device-selective context-cache invalidation;
704-
* - Domain-selective PASID-cache invalidation to affected domains
705-
* (can be skipped if all PASID entries were not-present);
706-
* - Domain-selective IOTLB invalidation to affected domains;
707-
* - Global Device-TLB invalidation to affected functions.
708-
*
709-
* The iommu has been parked in the blocking state. All domains have
710-
* been detached from the device or PASID. The PASID and IOTLB caches
711-
* have been invalidated during the domain detach path.
712-
*/
713-
iommu->flush.flush_context(iommu, 0, PCI_DEVID(bus, devfn),
714-
DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL);
715-
devtlb_invalidation_with_pasid(iommu, dev, IOMMU_NO_PASID);
697+
intel_context_flush_present(info, context, false);
716698
}
717699

718700
static int pci_pasid_table_teardown(struct pci_dev *pdev, u16 alias, void *data)
@@ -874,3 +856,89 @@ int intel_pasid_setup_sm_context(struct device *dev)
874856

875857
return pci_for_each_dma_alias(to_pci_dev(dev), pci_pasid_table_setup, dev);
876858
}
859+
860+
/*
861+
* Global Device-TLB invalidation following changes in a context entry which
862+
* was present.
863+
*/
864+
static void __context_flush_dev_iotlb(struct device_domain_info *info)
865+
{
866+
if (!info->ats_enabled)
867+
return;
868+
869+
qi_flush_dev_iotlb(info->iommu, PCI_DEVID(info->bus, info->devfn),
870+
info->pfsid, info->ats_qdep, 0, MAX_AGAW_PFN_WIDTH);
871+
872+
/*
873+
* There is no guarantee that the device DMA is stopped when it reaches
874+
* here. Therefore, always attempt the extra device TLB invalidation
875+
* quirk. The impact on performance is acceptable since this is not a
876+
* performance-critical path.
877+
*/
878+
quirk_extra_dev_tlb_flush(info, 0, MAX_AGAW_PFN_WIDTH, IOMMU_NO_PASID,
879+
info->ats_qdep);
880+
}
881+
882+
/*
883+
* Cache invalidations after change in a context table entry that was present
884+
* according to the Spec 6.5.3.3 (Guidance to Software for Invalidations). If
885+
* IOMMU is in scalable mode and all PASID table entries of the device were
886+
* non-present, set flush_domains to false. Otherwise, true.
887+
*/
888+
void intel_context_flush_present(struct device_domain_info *info,
889+
struct context_entry *context,
890+
bool flush_domains)
891+
{
892+
struct intel_iommu *iommu = info->iommu;
893+
u16 did = context_domain_id(context);
894+
struct pasid_entry *pte;
895+
int i;
896+
897+
/*
898+
* Device-selective context-cache invalidation. The Domain-ID field
899+
* of the Context-cache Invalidate Descriptor is ignored by hardware
900+
* when operating in scalable mode. Therefore the @did value doesn't
901+
* matter in scalable mode.
902+
*/
903+
iommu->flush.flush_context(iommu, did, PCI_DEVID(info->bus, info->devfn),
904+
DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL);
905+
906+
/*
907+
* For legacy mode:
908+
* - Domain-selective IOTLB invalidation
909+
* - Global Device-TLB invalidation to all affected functions
910+
*/
911+
if (!sm_supported(iommu)) {
912+
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
913+
__context_flush_dev_iotlb(info);
914+
915+
return;
916+
}
917+
918+
/*
919+
* For scalable mode:
920+
* - Domain-selective PASID-cache invalidation to affected domains
921+
* - Domain-selective IOTLB invalidation to affected domains
922+
* - Global Device-TLB invalidation to affected functions
923+
*/
924+
if (flush_domains) {
925+
/*
926+
* If the IOMMU is running in scalable mode and there might
927+
* be potential PASID translations, the caller should hold
928+
* the lock to ensure that context changes and cache flushes
929+
* are atomic.
930+
*/
931+
assert_spin_locked(&iommu->lock);
932+
for (i = 0; i < info->pasid_table->max_pasid; i++) {
933+
pte = intel_pasid_get_entry(info->dev, i);
934+
if (!pte || !pasid_pte_is_present(pte))
935+
continue;
936+
937+
did = pasid_get_domain_id(pte);
938+
qi_flush_pasid_cache(iommu, did, QI_PC_ALL_PASIDS, 0);
939+
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
940+
}
941+
}
942+
943+
__context_flush_dev_iotlb(info);
944+
}

0 commit comments

Comments
 (0)