@@ -711,7 +711,8 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
711
711
}
712
712
713
713
static int mtk_iommu_map (struct iommu_domain * domain , unsigned long iova ,
714
- phys_addr_t paddr , size_t size , int prot , gfp_t gfp )
714
+ phys_addr_t paddr , size_t pgsize , size_t pgcount ,
715
+ int prot , gfp_t gfp , size_t * mapped )
715
716
{
716
717
struct mtk_iommu_domain * dom = to_mtk_domain (domain );
717
718
@@ -720,17 +721,17 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
720
721
paddr |= BIT_ULL (32 );
721
722
722
723
/* Synchronize with the tlb_lock */
723
- return dom -> iop -> map (dom -> iop , iova , paddr , size , prot , gfp );
724
+ return dom -> iop -> map_pages (dom -> iop , iova , paddr , pgsize , pgcount , prot , gfp , mapped );
724
725
}
725
726
726
727
static size_t mtk_iommu_unmap (struct iommu_domain * domain ,
727
- unsigned long iova , size_t size ,
728
+ unsigned long iova , size_t pgsize , size_t pgcount ,
728
729
struct iommu_iotlb_gather * gather )
729
730
{
730
731
struct mtk_iommu_domain * dom = to_mtk_domain (domain );
731
732
732
- iommu_iotlb_gather_add_range (gather , iova , size );
733
- return dom -> iop -> unmap (dom -> iop , iova , size , gather );
733
+ iommu_iotlb_gather_add_range (gather , iova , pgsize * pgcount );
734
+ return dom -> iop -> unmap_pages (dom -> iop , iova , pgsize , pgcount , gather );
734
735
}
735
736
736
737
static void mtk_iommu_flush_iotlb_all (struct iommu_domain * domain )
@@ -938,8 +939,8 @@ static const struct iommu_ops mtk_iommu_ops = {
938
939
.default_domain_ops = & (const struct iommu_domain_ops ) {
939
940
.attach_dev = mtk_iommu_attach_device ,
940
941
.detach_dev = mtk_iommu_detach_device ,
941
- .map = mtk_iommu_map ,
942
- .unmap = mtk_iommu_unmap ,
942
+ .map_pages = mtk_iommu_map ,
943
+ .unmap_pages = mtk_iommu_unmap ,
943
944
.flush_iotlb_all = mtk_iommu_flush_iotlb_all ,
944
945
.iotlb_sync = mtk_iommu_iotlb_sync ,
945
946
.iotlb_sync_map = mtk_iommu_sync_map ,
0 commit comments