@@ -63,7 +63,6 @@ struct viommu_mapping {
6363struct viommu_domain {
6464 struct iommu_domain domain ;
6565 struct viommu_dev * viommu ;
66- struct mutex mutex ; /* protects viommu pointer */
6766 unsigned int id ;
6867 u32 map_flags ;
6968
@@ -97,6 +96,8 @@ struct viommu_event {
9796 };
9897};
9998
99+ static struct viommu_domain viommu_identity_domain ;
100+
100101#define to_viommu_domain (domain ) \
101102 container_of(domain, struct viommu_domain, domain)
102103
@@ -653,65 +654,45 @@ static void viommu_event_handler(struct virtqueue *vq)
653654
654655/* IOMMU API */
655656
656- static struct iommu_domain * viommu_domain_alloc ( unsigned type )
657+ static struct iommu_domain * viommu_domain_alloc_paging ( struct device * dev )
657658{
659+ struct viommu_endpoint * vdev = dev_iommu_priv_get (dev );
660+ struct viommu_dev * viommu = vdev -> viommu ;
661+ unsigned long viommu_page_size ;
658662 struct viommu_domain * vdomain ;
659-
660- if (type != IOMMU_DOMAIN_UNMANAGED &&
661- type != IOMMU_DOMAIN_DMA &&
662- type != IOMMU_DOMAIN_IDENTITY )
663- return NULL ;
664-
665- vdomain = kzalloc (sizeof (* vdomain ), GFP_KERNEL );
666- if (!vdomain )
667- return NULL ;
668-
669- mutex_init (& vdomain -> mutex );
670- spin_lock_init (& vdomain -> mappings_lock );
671- vdomain -> mappings = RB_ROOT_CACHED ;
672-
673- return & vdomain -> domain ;
674- }
675-
676- static int viommu_domain_finalise (struct viommu_endpoint * vdev ,
677- struct iommu_domain * domain )
678- {
679663 int ret ;
680- unsigned long viommu_page_size ;
681- struct viommu_dev * viommu = vdev -> viommu ;
682- struct viommu_domain * vdomain = to_viommu_domain (domain );
683664
684665 viommu_page_size = 1UL << __ffs (viommu -> pgsize_bitmap );
685666 if (viommu_page_size > PAGE_SIZE ) {
686667 dev_err (vdev -> dev ,
687668 "granule 0x%lx larger than system page size 0x%lx\n" ,
688669 viommu_page_size , PAGE_SIZE );
689- return - ENODEV ;
670+ return ERR_PTR ( - ENODEV ) ;
690671 }
691672
673+ vdomain = kzalloc (sizeof (* vdomain ), GFP_KERNEL );
674+ if (!vdomain )
675+ return ERR_PTR (- ENOMEM );
676+
677+ spin_lock_init (& vdomain -> mappings_lock );
678+ vdomain -> mappings = RB_ROOT_CACHED ;
679+
692680 ret = ida_alloc_range (& viommu -> domain_ids , viommu -> first_domain ,
693681 viommu -> last_domain , GFP_KERNEL );
694- if (ret < 0 )
695- return ret ;
682+ if (ret < 0 ) {
683+ kfree (vdomain );
684+ return ERR_PTR (ret );
685+ }
696686
697- vdomain -> id = (unsigned int )ret ;
687+ vdomain -> id = (unsigned int )ret ;
698688
699- domain -> pgsize_bitmap = viommu -> pgsize_bitmap ;
700- domain -> geometry = viommu -> geometry ;
689+ vdomain -> domain . pgsize_bitmap = viommu -> pgsize_bitmap ;
690+ vdomain -> domain . geometry = viommu -> geometry ;
701691
702- vdomain -> map_flags = viommu -> map_flags ;
703- vdomain -> viommu = viommu ;
692+ vdomain -> map_flags = viommu -> map_flags ;
693+ vdomain -> viommu = viommu ;
704694
705- if (domain -> type == IOMMU_DOMAIN_IDENTITY ) {
706- ret = viommu_domain_map_identity (vdev , vdomain );
707- if (ret ) {
708- ida_free (& viommu -> domain_ids , vdomain -> id );
709- vdomain -> viommu = NULL ;
710- return ret ;
711- }
712- }
713-
714- return 0 ;
695+ return & vdomain -> domain ;
715696}
716697
717698static void viommu_domain_free (struct iommu_domain * domain )
@@ -727,27 +708,37 @@ static void viommu_domain_free(struct iommu_domain *domain)
727708 kfree (vdomain );
728709}
729710
711+ static struct iommu_domain * viommu_domain_alloc_identity (struct device * dev )
712+ {
713+ struct viommu_endpoint * vdev = dev_iommu_priv_get (dev );
714+ struct iommu_domain * domain ;
715+ int ret ;
716+
717+ if (virtio_has_feature (vdev -> viommu -> vdev ,
718+ VIRTIO_IOMMU_F_BYPASS_CONFIG ))
719+ return & viommu_identity_domain .domain ;
720+
721+ domain = viommu_domain_alloc_paging (dev );
722+ if (IS_ERR (domain ))
723+ return domain ;
724+
725+ ret = viommu_domain_map_identity (vdev , to_viommu_domain (domain ));
726+ if (ret ) {
727+ viommu_domain_free (domain );
728+ return ERR_PTR (ret );
729+ }
730+ return domain ;
731+ }
732+
730733static int viommu_attach_dev (struct iommu_domain * domain , struct device * dev )
731734{
732735 int ret = 0 ;
733736 struct virtio_iommu_req_attach req ;
734737 struct viommu_endpoint * vdev = dev_iommu_priv_get (dev );
735738 struct viommu_domain * vdomain = to_viommu_domain (domain );
736739
737- mutex_lock (& vdomain -> mutex );
738- if (!vdomain -> viommu ) {
739- /*
740- * Properly initialize the domain now that we know which viommu
741- * owns it.
742- */
743- ret = viommu_domain_finalise (vdev , domain );
744- } else if (vdomain -> viommu != vdev -> viommu ) {
745- ret = - EINVAL ;
746- }
747- mutex_unlock (& vdomain -> mutex );
748-
749- if (ret )
750- return ret ;
740+ if (vdomain -> viommu != vdev -> viommu )
741+ return - EINVAL ;
751742
752743 /*
753744 * In the virtio-iommu device, when attaching the endpoint to a new
@@ -1096,9 +1087,9 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
10961087}
10971088
10981089static struct iommu_ops viommu_ops = {
1099- .identity_domain = & viommu_identity_domain .domain ,
11001090 .capable = viommu_capable ,
1101- .domain_alloc = viommu_domain_alloc ,
1091+ .domain_alloc_identity = viommu_domain_alloc_identity ,
1092+ .domain_alloc_paging = viommu_domain_alloc_paging ,
11021093 .probe_device = viommu_probe_device ,
11031094 .release_device = viommu_release_device ,
11041095 .device_group = viommu_device_group ,
@@ -1224,12 +1215,6 @@ static int viommu_probe(struct virtio_device *vdev)
12241215 if (virtio_has_feature (viommu -> vdev , VIRTIO_IOMMU_F_BYPASS_CONFIG )) {
12251216 viommu -> identity_domain_id = viommu -> first_domain ;
12261217 viommu -> first_domain ++ ;
1227- } else {
1228- /*
1229- * Assume the VMM is sensible and it either supports bypass on
1230- * all instances or no instances.
1231- */
1232- viommu_ops .identity_domain = NULL ;
12331218 }
12341219
12351220 viommu_ops .pgsize_bitmap = viommu -> pgsize_bitmap ;
0 commit comments