Skip to content

Commit 8cc233d

Browse files
hegdevasantjoergroedel
authored andcommitted
iommu/amd/io-pgtable: Implement map_pages io_pgtable_ops callback
Implement the io_pgtable_ops->map_pages() callback for AMD driver. Also deprecate io_pgtable->map callback. Suggested-by: Robin Murphy <[email protected]> Signed-off-by: Vasant Hegde <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent 7e18e42 commit 8cc233d

File tree

1 file changed

+34
-25
lines changed

1 file changed

+34
-25
lines changed

drivers/iommu/amd/io_pgtable.c

Lines changed: 34 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -360,48 +360,57 @@ static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
360360
* supporting all features of AMD IOMMU page tables like level skipping
361361
* and full 64 bit address spaces.
362362
*/
363-
static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
364-
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
363+
static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
364+
phys_addr_t paddr, size_t pgsize, size_t pgcount,
365+
int prot, gfp_t gfp, size_t *mapped)
365366
{
366367
struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
367368
LIST_HEAD(freelist);
368369
bool updated = false;
369370
u64 __pte, *pte;
370371
int ret, i, count;
371372

372-
BUG_ON(!IS_ALIGNED(iova, size));
373-
BUG_ON(!IS_ALIGNED(paddr, size));
373+
BUG_ON(!IS_ALIGNED(iova, pgsize));
374+
BUG_ON(!IS_ALIGNED(paddr, pgsize));
374375

375376
ret = -EINVAL;
376377
if (!(prot & IOMMU_PROT_MASK))
377378
goto out;
378379

379-
count = PAGE_SIZE_PTE_COUNT(size);
380-
pte = alloc_pte(dom, iova, size, NULL, gfp, &updated);
380+
while (pgcount > 0) {
381+
count = PAGE_SIZE_PTE_COUNT(pgsize);
382+
pte = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated);
381383

382-
ret = -ENOMEM;
383-
if (!pte)
384-
goto out;
384+
ret = -ENOMEM;
385+
if (!pte)
386+
goto out;
385387

386-
for (i = 0; i < count; ++i)
387-
free_clear_pte(&pte[i], pte[i], &freelist);
388+
for (i = 0; i < count; ++i)
389+
free_clear_pte(&pte[i], pte[i], &freelist);
388390

389-
if (!list_empty(&freelist))
390-
updated = true;
391+
if (!list_empty(&freelist))
392+
updated = true;
391393

392-
if (count > 1) {
393-
__pte = PAGE_SIZE_PTE(__sme_set(paddr), size);
394-
__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
395-
} else
396-
__pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
394+
if (count > 1) {
395+
__pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize);
396+
__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
397+
} else
398+
__pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
397399

398-
if (prot & IOMMU_PROT_IR)
399-
__pte |= IOMMU_PTE_IR;
400-
if (prot & IOMMU_PROT_IW)
401-
__pte |= IOMMU_PTE_IW;
400+
if (prot & IOMMU_PROT_IR)
401+
__pte |= IOMMU_PTE_IR;
402+
if (prot & IOMMU_PROT_IW)
403+
__pte |= IOMMU_PTE_IW;
402404

403-
for (i = 0; i < count; ++i)
404-
pte[i] = __pte;
405+
for (i = 0; i < count; ++i)
406+
pte[i] = __pte;
407+
408+
iova += pgsize;
409+
paddr += pgsize;
410+
pgcount--;
411+
if (mapped)
412+
*mapped += pgsize;
413+
}
405414

406415
ret = 0;
407416

@@ -514,7 +523,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
514523
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
515524
cfg->tlb = &v1_flush_ops;
516525

517-
pgtable->iop.ops.map = iommu_v1_map_page;
526+
pgtable->iop.ops.map_pages = iommu_v1_map_pages;
518527
pgtable->iop.ops.unmap = iommu_v1_unmap_page;
519528
pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
520529

0 commit comments

Comments
 (0)