@@ -388,43 +388,23 @@ static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
388
388
return ret ;
389
389
}
390
390
391
- static int msm_iommu_add_device (struct device * dev )
391
+ static struct iommu_device * msm_iommu_probe_device (struct device * dev )
392
392
{
393
393
struct msm_iommu_dev * iommu ;
394
- struct iommu_group * group ;
395
394
unsigned long flags ;
396
395
397
396
spin_lock_irqsave (& msm_iommu_lock , flags );
398
397
iommu = find_iommu_for_dev (dev );
399
398
spin_unlock_irqrestore (& msm_iommu_lock , flags );
400
399
401
- if (iommu )
402
- iommu_device_link (& iommu -> iommu , dev );
403
- else
404
- return - ENODEV ;
405
-
406
- group = iommu_group_get_for_dev (dev );
407
- if (IS_ERR (group ))
408
- return PTR_ERR (group );
409
-
410
- iommu_group_put (group );
400
+ if (!iommu )
401
+ return ERR_PTR (- ENODEV );
411
402
412
- return 0 ;
403
+ return & iommu -> iommu ;
413
404
}
414
405
415
- static void msm_iommu_remove_device (struct device * dev )
406
+ static void msm_iommu_release_device (struct device * dev )
416
407
{
417
- struct msm_iommu_dev * iommu ;
418
- unsigned long flags ;
419
-
420
- spin_lock_irqsave (& msm_iommu_lock , flags );
421
- iommu = find_iommu_for_dev (dev );
422
- spin_unlock_irqrestore (& msm_iommu_lock , flags );
423
-
424
- if (iommu )
425
- iommu_device_unlink (& iommu -> iommu , dev );
426
-
427
- iommu_group_remove_device (dev );
428
408
}
429
409
430
410
static int msm_iommu_attach_dev (struct iommu_domain * domain , struct device * dev )
@@ -708,8 +688,8 @@ static struct iommu_ops msm_iommu_ops = {
708
688
*/
709
689
.iotlb_sync = NULL ,
710
690
.iova_to_phys = msm_iommu_iova_to_phys ,
711
- .add_device = msm_iommu_add_device ,
712
- .remove_device = msm_iommu_remove_device ,
691
+ .probe_device = msm_iommu_probe_device ,
692
+ .release_device = msm_iommu_release_device ,
713
693
.device_group = generic_device_group ,
714
694
.pgsize_bitmap = MSM_IOMMU_PGSIZES ,
715
695
.of_xlate = qcom_iommu_of_xlate ,
0 commit comments