@@ -788,6 +788,29 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
788
788
return 0 ;
789
789
}
790
790
791
+ static void viommu_detach_dev (struct viommu_endpoint * vdev )
792
+ {
793
+ int i ;
794
+ struct virtio_iommu_req_detach req ;
795
+ struct viommu_domain * vdomain = vdev -> vdomain ;
796
+ struct iommu_fwspec * fwspec = dev_iommu_fwspec_get (vdev -> dev );
797
+
798
+ if (!vdomain )
799
+ return ;
800
+
801
+ req = (struct virtio_iommu_req_detach ) {
802
+ .head .type = VIRTIO_IOMMU_T_DETACH ,
803
+ .domain = cpu_to_le32 (vdomain -> id ),
804
+ };
805
+
806
+ for (i = 0 ; i < fwspec -> num_ids ; i ++ ) {
807
+ req .endpoint = cpu_to_le32 (fwspec -> ids [i ]);
808
+ WARN_ON (viommu_send_req_sync (vdev -> viommu , & req , sizeof (req )));
809
+ }
810
+ vdomain -> nr_endpoints -- ;
811
+ vdev -> vdomain = NULL ;
812
+ }
813
+
791
814
static int viommu_map_pages (struct iommu_domain * domain , unsigned long iova ,
792
815
phys_addr_t paddr , size_t pgsize , size_t pgcount ,
793
816
int prot , gfp_t gfp , size_t * mapped )
@@ -990,6 +1013,7 @@ static void viommu_release_device(struct device *dev)
990
1013
{
991
1014
struct viommu_endpoint * vdev = dev_iommu_priv_get (dev );
992
1015
1016
+ viommu_detach_dev (vdev );
993
1017
iommu_put_resv_regions (dev , & vdev -> resv_regions );
994
1018
kfree (vdev );
995
1019
}
0 commit comments