Skip to content

Commit 6b080c4

Browse files
hegdevasantjoergroedel
authored andcommitted
iommu/amd: Add map/unmap_pages() iommu_domain_ops callback support
Implement the map_pages() and unmap_pages() callback for the AMD IOMMU driver to allow calls from iommu core to map and unmap multiple pages. Also deprecate map/unmap callbacks. Finally gatherer is not updated by iommu_v1_unmap_pages(). Hence pass NULL instead of gather to iommu_v1_unmap_pages. Suggested-by: Robin Murphy <[email protected]> Signed-off-by: Vasant Hegde <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent 251c4db commit 6b080c4

File tree

1 file changed

+16
-13
lines changed

1 file changed

+16
-13
lines changed

drivers/iommu/amd/iommu.c

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2174,13 +2174,13 @@ static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
21742174
struct protection_domain *domain = to_pdomain(dom);
21752175
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
21762176

2177-
if (ops->map)
2177+
if (ops->map_pages)
21782178
domain_flush_np_cache(domain, iova, size);
21792179
}
21802180

2181-
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
2182-
phys_addr_t paddr, size_t page_size, int iommu_prot,
2183-
gfp_t gfp)
2181+
static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
2182+
phys_addr_t paddr, size_t pgsize, size_t pgcount,
2183+
int iommu_prot, gfp_t gfp, size_t *mapped)
21842184
{
21852185
struct protection_domain *domain = to_pdomain(dom);
21862186
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
@@ -2196,8 +2196,10 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
21962196
if (iommu_prot & IOMMU_WRITE)
21972197
prot |= IOMMU_PROT_IW;
21982198

2199-
if (ops->map)
2200-
ret = ops->map(ops, iova, paddr, page_size, prot, gfp);
2199+
if (ops->map_pages) {
2200+
ret = ops->map_pages(ops, iova, paddr, pgsize,
2201+
pgcount, prot, gfp, mapped);
2202+
}
22012203

22022204
return ret;
22032205
}
@@ -2223,9 +2225,9 @@ static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain,
22232225
iommu_iotlb_gather_add_range(gather, iova, size);
22242226
}
22252227

2226-
static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
2227-
size_t page_size,
2228-
struct iommu_iotlb_gather *gather)
2228+
static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova,
2229+
size_t pgsize, size_t pgcount,
2230+
struct iommu_iotlb_gather *gather)
22292231
{
22302232
struct protection_domain *domain = to_pdomain(dom);
22312233
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
@@ -2235,9 +2237,10 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
22352237
(domain->iop.mode == PAGE_MODE_NONE))
22362238
return 0;
22372239

2238-
r = (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
2240+
r = (ops->unmap_pages) ? ops->unmap_pages(ops, iova, pgsize, pgcount, NULL) : 0;
22392241

2240-
amd_iommu_iotlb_gather_add_page(dom, gather, iova, page_size);
2242+
if (r)
2243+
amd_iommu_iotlb_gather_add_page(dom, gather, iova, r);
22412244

22422245
return r;
22432246
}
@@ -2399,8 +2402,8 @@ const struct iommu_ops amd_iommu_ops = {
23992402
.default_domain_ops = &(const struct iommu_domain_ops) {
24002403
.attach_dev = amd_iommu_attach_device,
24012404
.detach_dev = amd_iommu_detach_device,
2402-
.map = amd_iommu_map,
2403-
.unmap = amd_iommu_unmap,
2405+
.map_pages = amd_iommu_map_pages,
2406+
.unmap_pages = amd_iommu_unmap_pages,
24042407
.iotlb_sync_map = amd_iommu_iotlb_sync_map,
24052408
.iova_to_phys = amd_iommu_iova_to_phys,
24062409
.flush_iotlb_all = amd_iommu_flush_iotlb_all,

0 commit comments

Comments
 (0)