@@ -378,8 +378,7 @@ static void tce_iommu_release(void *iommu_data)
378
378
kfree (container );
379
379
}
380
380
381
- static void tce_iommu_unuse_page (struct tce_container * container ,
382
- unsigned long hpa )
381
+ static void tce_iommu_unuse_page (unsigned long hpa )
383
382
{
384
383
struct page * page ;
385
384
@@ -474,7 +473,7 @@ static int tce_iommu_clear(struct tce_container *container,
474
473
continue ;
475
474
}
476
475
477
- tce_iommu_unuse_page (container , oldhpa );
476
+ tce_iommu_unuse_page (oldhpa );
478
477
}
479
478
480
479
iommu_tce_kill (tbl , firstentry , pages );
@@ -524,15 +523,15 @@ static long tce_iommu_build(struct tce_container *container,
524
523
ret = iommu_tce_xchg_no_kill (container -> mm , tbl , entry + i ,
525
524
& hpa , & dirtmp );
526
525
if (ret ) {
527
- tce_iommu_unuse_page (container , hpa );
526
+ tce_iommu_unuse_page (hpa );
528
527
pr_err ("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n" ,
529
528
__func__ , entry << tbl -> it_page_shift ,
530
529
tce , ret );
531
530
break ;
532
531
}
533
532
534
533
if (dirtmp != DMA_NONE )
535
- tce_iommu_unuse_page (container , hpa );
534
+ tce_iommu_unuse_page (hpa );
536
535
537
536
tce += IOMMU_PAGE_SIZE (tbl );
538
537
}
0 commit comments