Skip to content

Commit 0455d31

Browse files
yiliu1765joergroedel
authored andcommitted
iommu/vt-d: Add __iommu_flush_iotlb_psi()
Add __iommu_flush_iotlb_psi() to do the psi iotlb flush with a DID input rather than calculating it within the helper. This is useful when flushing cache for parent domain which reuses DIDs of its nested domains. Signed-off-by: Yi Liu <[email protected]> Reviewed-by: Kevin Tian <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Lu Baolu <[email protected]> Signed-off-by: Joerg Roedel <[email protected]>
1 parent 85ce8e1 commit 0455d31

File tree

1 file changed

+43
-35
lines changed

1 file changed

+43
-35
lines changed

drivers/iommu/intel/iommu.c

Lines changed: 43 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -1368,6 +1368,46 @@ static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
13681368
spin_unlock_irqrestore(&domain->lock, flags);
13691369
}
13701370

1371+
static void __iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1372+
unsigned long pfn, unsigned int pages,
1373+
int ih)
1374+
{
1375+
unsigned int aligned_pages = __roundup_pow_of_two(pages);
1376+
unsigned long bitmask = aligned_pages - 1;
1377+
unsigned int mask = ilog2(aligned_pages);
1378+
u64 addr = (u64)pfn << VTD_PAGE_SHIFT;
1379+
1380+
/*
1381+
* PSI masks the low order bits of the base address. If the
1382+
* address isn't aligned to the mask, then compute a mask value
1383+
* needed to ensure the target range is flushed.
1384+
*/
1385+
if (unlikely(bitmask & pfn)) {
1386+
unsigned long end_pfn = pfn + pages - 1, shared_bits;
1387+
1388+
/*
1389+
* Since end_pfn <= pfn + bitmask, the only way bits
1390+
* higher than bitmask can differ in pfn and end_pfn is
1391+
* by carrying. This means after masking out bitmask,
1392+
* high bits starting with the first set bit in
1393+
* shared_bits are all equal in both pfn and end_pfn.
1394+
*/
1395+
shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
1396+
mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
1397+
}
1398+
1399+
/*
1400+
* Fallback to domain selective flush if no PSI support or
1401+
* the size is too big.
1402+
*/
1403+
if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1404+
iommu->flush.flush_iotlb(iommu, did, 0, 0,
1405+
DMA_TLB_DSI_FLUSH);
1406+
else
1407+
iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1408+
DMA_TLB_PSI_FLUSH);
1409+
}
1410+
13711411
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
13721412
struct dmar_domain *domain,
13731413
unsigned long pfn, unsigned int pages,
@@ -1384,42 +1424,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
13841424
if (ih)
13851425
ih = 1 << 6;
13861426

1387-
if (domain->use_first_level) {
1427+
if (domain->use_first_level)
13881428
domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
1389-
} else {
1390-
unsigned long bitmask = aligned_pages - 1;
1391-
1392-
/*
1393-
* PSI masks the low order bits of the base address. If the
1394-
* address isn't aligned to the mask, then compute a mask value
1395-
* needed to ensure the target range is flushed.
1396-
*/
1397-
if (unlikely(bitmask & pfn)) {
1398-
unsigned long end_pfn = pfn + pages - 1, shared_bits;
1399-
1400-
/*
1401-
* Since end_pfn <= pfn + bitmask, the only way bits
1402-
* higher than bitmask can differ in pfn and end_pfn is
1403-
* by carrying. This means after masking out bitmask,
1404-
* high bits starting with the first set bit in
1405-
* shared_bits are all equal in both pfn and end_pfn.
1406-
*/
1407-
shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
1408-
mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
1409-
}
1410-
1411-
/*
1412-
* Fallback to domain selective flush if no PSI support or
1413-
* the size is too big.
1414-
*/
1415-
if (!cap_pgsel_inv(iommu->cap) ||
1416-
mask > cap_max_amask_val(iommu->cap))
1417-
iommu->flush.flush_iotlb(iommu, did, 0, 0,
1418-
DMA_TLB_DSI_FLUSH);
1419-
else
1420-
iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1421-
DMA_TLB_PSI_FLUSH);
1422-
}
1429+
else
1430+
__iommu_flush_iotlb_psi(iommu, did, pfn, pages, ih);
14231431

14241432
/*
14251433
* In caching mode, changes of pages from non-present to present require

0 commit comments

Comments
 (0)