Skip to content

Commit 8b35cdc

Browse files
rmurphy-armjoergroedel
authored andcommitted
iommu/msm: Update to {map,unmap}_pages
Update map/unmap to the new multi-page interfaces, which is dead easy since we just pass them through to io-pgtable anyway. Signed-off-by: Robin Murphy <[email protected]> Acked-by: Will Deacon <[email protected]> Link: https://lore.kernel.org/r/24a8f522710ddd6bbac4da154aa28799e939ebe4.1668100209.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel <[email protected]>
1 parent 8563738 commit 8b35cdc

File tree

1 file changed

+11
-7
lines changed

1 file changed

+11
-7
lines changed

drivers/iommu/msm_iommu.c

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -471,14 +471,16 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain,
471471
}
472472

473473
static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
474-
phys_addr_t pa, size_t len, int prot, gfp_t gfp)
474+
phys_addr_t pa, size_t pgsize, size_t pgcount,
475+
int prot, gfp_t gfp, size_t *mapped)
475476
{
476477
struct msm_priv *priv = to_msm_priv(domain);
477478
unsigned long flags;
478479
int ret;
479480

480481
spin_lock_irqsave(&priv->pgtlock, flags);
481-
ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
482+
ret = priv->iop->map_pages(priv->iop, iova, pa, pgsize, pgcount, prot,
483+
GFP_ATOMIC, mapped);
482484
spin_unlock_irqrestore(&priv->pgtlock, flags);
483485

484486
return ret;
@@ -493,16 +495,18 @@ static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
493495
}
494496

495497
static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
496-
size_t len, struct iommu_iotlb_gather *gather)
498+
size_t pgsize, size_t pgcount,
499+
struct iommu_iotlb_gather *gather)
497500
{
498501
struct msm_priv *priv = to_msm_priv(domain);
499502
unsigned long flags;
503+
size_t ret;
500504

501505
spin_lock_irqsave(&priv->pgtlock, flags);
502-
len = priv->iop->unmap(priv->iop, iova, len, gather);
506+
ret = priv->iop->unmap_pages(priv->iop, iova, pgsize, pgcount, gather);
503507
spin_unlock_irqrestore(&priv->pgtlock, flags);
504508

505-
return len;
509+
return ret;
506510
}
507511

508512
static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -679,8 +683,8 @@ static struct iommu_ops msm_iommu_ops = {
679683
.default_domain_ops = &(const struct iommu_domain_ops) {
680684
.attach_dev = msm_iommu_attach_dev,
681685
.detach_dev = msm_iommu_detach_dev,
682-
.map = msm_iommu_map,
683-
.unmap = msm_iommu_unmap,
686+
.map_pages = msm_iommu_map,
687+
.unmap_pages = msm_iommu_unmap,
684688
/*
685689
* Nothing is needed here, the barrier to guarantee
686690
* completion of the tlb sync operation is implicitly

0 commit comments

Comments
 (0)