Skip to content

Commit b1d99dc

Browse files
willdeaconjoergroedel
authored andcommitted
iommu: Hook up '->unmap_pages' driver callback
Extend iommu_pgsize() to populate an optional 'count' parameter so that we can direct unmapping operation to the ->unmap_pages callback if it has been provided by the driver. Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Isaac J. Manjarres <[email protected]> Signed-off-by: Georgi Djakov <[email protected]> Reviewed-by: Lu Baolu <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent 89d5b96 commit b1d99dc

File tree

1 file changed

+50
-9
lines changed

1 file changed

+50
-9
lines changed

drivers/iommu/iommu.c

Lines changed: 50 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2376,11 +2376,11 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
23762376
EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
23772377

23782378
static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
2379-
phys_addr_t paddr, size_t size)
2379+
phys_addr_t paddr, size_t size, size_t *count)
23802380
{
2381-
unsigned int pgsize_idx;
2381+
unsigned int pgsize_idx, pgsize_idx_next;
23822382
unsigned long pgsizes;
2383-
size_t pgsize;
2383+
size_t offset, pgsize, pgsize_next;
23842384
unsigned long addr_merge = paddr | iova;
23852385

23862386
/* Page sizes supported by the hardware and small enough for @size */
@@ -2396,7 +2396,36 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
23962396
/* Pick the biggest page size remaining */
23972397
pgsize_idx = __fls(pgsizes);
23982398
pgsize = BIT(pgsize_idx);
2399+
if (!count)
2400+
return pgsize;
23992401

2402+
/* Find the next biggest support page size, if it exists */
2403+
pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
2404+
if (!pgsizes)
2405+
goto out_set_count;
2406+
2407+
pgsize_idx_next = __ffs(pgsizes);
2408+
pgsize_next = BIT(pgsize_idx_next);
2409+
2410+
/*
2411+
* There's no point trying a bigger page size unless the virtual
2412+
* and physical addresses are similarly offset within the larger page.
2413+
*/
2414+
if ((iova ^ paddr) & (pgsize_next - 1))
2415+
goto out_set_count;
2416+
2417+
/* Calculate the offset to the next page size alignment boundary */
2418+
offset = pgsize_next - (addr_merge & (pgsize_next - 1));
2419+
2420+
/*
2421+
* If size is big enough to accommodate the larger page, reduce
2422+
* the number of smaller pages.
2423+
*/
2424+
if (offset + pgsize_next <= size)
2425+
size = offset;
2426+
2427+
out_set_count:
2428+
*count = size >> pgsize_idx;
24002429
return pgsize;
24012430
}
24022431

@@ -2434,7 +2463,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
24342463
pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
24352464

24362465
while (size) {
2437-
size_t pgsize = iommu_pgsize(domain, iova, paddr, size);
2466+
size_t pgsize = iommu_pgsize(domain, iova, paddr, size, NULL);
24382467

24392468
pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
24402469
iova, &paddr, pgsize);
@@ -2485,6 +2514,19 @@ int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
24852514
}
24862515
EXPORT_SYMBOL_GPL(iommu_map_atomic);
24872516

2517+
static size_t __iommu_unmap_pages(struct iommu_domain *domain,
2518+
unsigned long iova, size_t size,
2519+
struct iommu_iotlb_gather *iotlb_gather)
2520+
{
2521+
const struct iommu_ops *ops = domain->ops;
2522+
size_t pgsize, count;
2523+
2524+
pgsize = iommu_pgsize(domain, iova, iova, size, &count);
2525+
return ops->unmap_pages ?
2526+
ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) :
2527+
ops->unmap(domain, iova, pgsize, iotlb_gather);
2528+
}
2529+
24882530
static size_t __iommu_unmap(struct iommu_domain *domain,
24892531
unsigned long iova, size_t size,
24902532
struct iommu_iotlb_gather *iotlb_gather)
@@ -2494,7 +2536,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
24942536
unsigned long orig_iova = iova;
24952537
unsigned int min_pagesz;
24962538

2497-
if (unlikely(ops->unmap == NULL ||
2539+
if (unlikely(!(ops->unmap || ops->unmap_pages) ||
24982540
domain->pgsize_bitmap == 0UL))
24992541
return 0;
25002542

@@ -2522,10 +2564,9 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
25222564
* or we hit an area that isn't mapped.
25232565
*/
25242566
while (unmapped < size) {
2525-
size_t pgsize;
2526-
2527-
pgsize = iommu_pgsize(domain, iova, iova, size - unmapped);
2528-
unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
2567+
unmapped_page = __iommu_unmap_pages(domain, iova,
2568+
size - unmapped,
2569+
iotlb_gather);
25292570
if (!unmapped_page)
25302571
break;
25312572

0 commit comments

Comments
 (0)