Skip to content

Commit b577f7e

Browse files
rmurphy-armjoergroedel
authored andcommitted
iommu/mediatek-v1: Update to {map,unmap}_pages
Now that the core API has a proper notion of multi-page mappings, clean up the old pgsize_bitmap hack by implementing the new interfaces instead. This also brings a slight simplification since we no longer need to worry about rolling back partial mappings on failure. Signed-off-by: Robin Murphy <[email protected]> Acked-by: Will Deacon <[email protected]> Link: https://lore.kernel.org/r/768e90ff0c2d61e4723049c1349d8bac58daa437.1668100209.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel <[email protected]>
1 parent 01657bc commit b577f7e

File tree

1 file changed

+14
-16
lines changed

1 file changed

+14
-16
lines changed

drivers/iommu/mtk_iommu_v1.c

Lines changed: 14 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -327,44 +327,42 @@ static void mtk_iommu_v1_detach_device(struct iommu_domain *domain, struct devic
327327
}
328328

329329
static int mtk_iommu_v1_map(struct iommu_domain *domain, unsigned long iova,
330-
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
330+
phys_addr_t paddr, size_t pgsize, size_t pgcount,
331+
int prot, gfp_t gfp, size_t *mapped)
331332
{
332333
struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
333-
unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
334334
unsigned long flags;
335335
unsigned int i;
336336
u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT);
337337
u32 pabase = (u32)paddr;
338-
int map_size = 0;
339338

340339
spin_lock_irqsave(&dom->pgtlock, flags);
341-
for (i = 0; i < page_num; i++) {
342-
if (pgt_base_iova[i]) {
343-
memset(pgt_base_iova, 0, i * sizeof(u32));
340+
for (i = 0; i < pgcount; i++) {
341+
if (pgt_base_iova[i])
344342
break;
345-
}
346343
pgt_base_iova[i] = pabase | F_DESC_VALID | F_DESC_NONSEC;
347344
pabase += MT2701_IOMMU_PAGE_SIZE;
348-
map_size += MT2701_IOMMU_PAGE_SIZE;
349345
}
350346

351347
spin_unlock_irqrestore(&dom->pgtlock, flags);
352348

353-
mtk_iommu_v1_tlb_flush_range(dom->data, iova, size);
349+
*mapped = i * MT2701_IOMMU_PAGE_SIZE;
350+
mtk_iommu_v1_tlb_flush_range(dom->data, iova, *mapped);
354351

355-
return map_size == size ? 0 : -EEXIST;
352+
return i == pgcount ? 0 : -EEXIST;
356353
}
357354

358355
static size_t mtk_iommu_v1_unmap(struct iommu_domain *domain, unsigned long iova,
359-
size_t size, struct iommu_iotlb_gather *gather)
356+
size_t pgsize, size_t pgcount,
357+
struct iommu_iotlb_gather *gather)
360358
{
361359
struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
362360
unsigned long flags;
363361
u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT);
364-
unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
362+
size_t size = pgcount * MT2701_IOMMU_PAGE_SIZE;
365363

366364
spin_lock_irqsave(&dom->pgtlock, flags);
367-
memset(pgt_base_iova, 0, page_num * sizeof(u32));
365+
memset(pgt_base_iova, 0, pgcount * sizeof(u32));
368366
spin_unlock_irqrestore(&dom->pgtlock, flags);
369367

370368
mtk_iommu_v1_tlb_flush_range(dom->data, iova, size);
@@ -586,13 +584,13 @@ static const struct iommu_ops mtk_iommu_v1_ops = {
586584
.release_device = mtk_iommu_v1_release_device,
587585
.def_domain_type = mtk_iommu_v1_def_domain_type,
588586
.device_group = generic_device_group,
589-
.pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
587+
.pgsize_bitmap = MT2701_IOMMU_PAGE_SIZE,
590588
.owner = THIS_MODULE,
591589
.default_domain_ops = &(const struct iommu_domain_ops) {
592590
.attach_dev = mtk_iommu_v1_attach_device,
593591
.detach_dev = mtk_iommu_v1_detach_device,
594-
.map = mtk_iommu_v1_map,
595-
.unmap = mtk_iommu_v1_unmap,
592+
.map_pages = mtk_iommu_v1_map,
593+
.unmap_pages = mtk_iommu_v1_unmap,
596594
.iova_to_phys = mtk_iommu_v1_iova_to_phys,
597595
.free = mtk_iommu_v1_domain_free,
598596
}

0 commit comments

Comments
 (0)