@@ -360,48 +360,57 @@ static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
360
360
* supporting all features of AMD IOMMU page tables like level skipping
361
361
* and full 64 bit address spaces.
362
362
*/
363
- static int iommu_v1_map_page (struct io_pgtable_ops * ops , unsigned long iova ,
364
- phys_addr_t paddr , size_t size , int prot , gfp_t gfp )
363
+ static int iommu_v1_map_pages (struct io_pgtable_ops * ops , unsigned long iova ,
364
+ phys_addr_t paddr , size_t pgsize , size_t pgcount ,
365
+ int prot , gfp_t gfp , size_t * mapped )
365
366
{
366
367
struct protection_domain * dom = io_pgtable_ops_to_domain (ops );
367
368
LIST_HEAD (freelist );
368
369
bool updated = false;
369
370
u64 __pte , * pte ;
370
371
int ret , i , count ;
371
372
372
- BUG_ON (!IS_ALIGNED (iova , size ));
373
- BUG_ON (!IS_ALIGNED (paddr , size ));
373
+ BUG_ON (!IS_ALIGNED (iova , pgsize ));
374
+ BUG_ON (!IS_ALIGNED (paddr , pgsize ));
374
375
375
376
ret = - EINVAL ;
376
377
if (!(prot & IOMMU_PROT_MASK ))
377
378
goto out ;
378
379
379
- count = PAGE_SIZE_PTE_COUNT (size );
380
- pte = alloc_pte (dom , iova , size , NULL , gfp , & updated );
380
+ while (pgcount > 0 ) {
381
+ count = PAGE_SIZE_PTE_COUNT (pgsize );
382
+ pte = alloc_pte (dom , iova , pgsize , NULL , gfp , & updated );
381
383
382
- ret = - ENOMEM ;
383
- if (!pte )
384
- goto out ;
384
+ ret = - ENOMEM ;
385
+ if (!pte )
386
+ goto out ;
385
387
386
- for (i = 0 ; i < count ; ++ i )
387
- free_clear_pte (& pte [i ], pte [i ], & freelist );
388
+ for (i = 0 ; i < count ; ++ i )
389
+ free_clear_pte (& pte [i ], pte [i ], & freelist );
388
390
389
- if (!list_empty (& freelist ))
390
- updated = true;
391
+ if (!list_empty (& freelist ))
392
+ updated = true;
391
393
392
- if (count > 1 ) {
393
- __pte = PAGE_SIZE_PTE (__sme_set (paddr ), size );
394
- __pte |= PM_LEVEL_ENC (7 ) | IOMMU_PTE_PR | IOMMU_PTE_FC ;
395
- } else
396
- __pte = __sme_set (paddr ) | IOMMU_PTE_PR | IOMMU_PTE_FC ;
394
+ if (count > 1 ) {
395
+ __pte = PAGE_SIZE_PTE (__sme_set (paddr ), pgsize );
396
+ __pte |= PM_LEVEL_ENC (7 ) | IOMMU_PTE_PR | IOMMU_PTE_FC ;
397
+ } else
398
+ __pte = __sme_set (paddr ) | IOMMU_PTE_PR | IOMMU_PTE_FC ;
397
399
398
- if (prot & IOMMU_PROT_IR )
399
- __pte |= IOMMU_PTE_IR ;
400
- if (prot & IOMMU_PROT_IW )
401
- __pte |= IOMMU_PTE_IW ;
400
+ if (prot & IOMMU_PROT_IR )
401
+ __pte |= IOMMU_PTE_IR ;
402
+ if (prot & IOMMU_PROT_IW )
403
+ __pte |= IOMMU_PTE_IW ;
402
404
403
- for (i = 0 ; i < count ; ++ i )
404
- pte [i ] = __pte ;
405
+ for (i = 0 ; i < count ; ++ i )
406
+ pte [i ] = __pte ;
407
+
408
+ iova += pgsize ;
409
+ paddr += pgsize ;
410
+ pgcount -- ;
411
+ if (mapped )
412
+ * mapped += pgsize ;
413
+ }
405
414
406
415
ret = 0 ;
407
416
@@ -514,7 +523,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
514
523
cfg -> oas = IOMMU_OUT_ADDR_BIT_SIZE ,
515
524
cfg -> tlb = & v1_flush_ops ;
516
525
517
- pgtable -> iop .ops .map = iommu_v1_map_page ;
526
+ pgtable -> iop .ops .map_pages = iommu_v1_map_pages ;
518
527
pgtable -> iop .ops .unmap = iommu_v1_unmap_page ;
519
528
pgtable -> iop .ops .iova_to_phys = iommu_v1_iova_to_phys ;
520
529
0 commit comments