@@ -45,6 +45,9 @@ static unsigned int iommu_def_domain_type __read_mostly;
4545static bool iommu_dma_strict __read_mostly = IS_ENABLED (CONFIG_IOMMU_DEFAULT_DMA_STRICT );
4646static u32 iommu_cmd_line __read_mostly ;
4747
48+ /* Tags used with xa_tag_pointer() in group->pasid_array */
49+ enum { IOMMU_PASID_ARRAY_DOMAIN = 0 , IOMMU_PASID_ARRAY_HANDLE = 1 };
50+
4851struct iommu_group {
4952 struct kobject kobj ;
5053 struct kobject * devices_kobj ;
@@ -2147,6 +2150,17 @@ struct iommu_domain *iommu_get_dma_domain(struct device *dev)
21472150 return dev -> iommu_group -> default_domain ;
21482151}
21492152
2153+ static void * iommu_make_pasid_array_entry (struct iommu_domain * domain ,
2154+ struct iommu_attach_handle * handle )
2155+ {
2156+ if (handle ) {
2157+ handle -> domain = domain ;
2158+ return xa_tag_pointer (handle , IOMMU_PASID_ARRAY_HANDLE );
2159+ }
2160+
2161+ return xa_tag_pointer (domain , IOMMU_PASID_ARRAY_DOMAIN );
2162+ }
2163+
21502164static int __iommu_attach_group (struct iommu_domain * domain ,
21512165 struct iommu_group * group )
21522166{
@@ -2187,32 +2201,6 @@ int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
21872201}
21882202EXPORT_SYMBOL_GPL (iommu_attach_group );
21892203
2190- /**
2191- * iommu_group_replace_domain - replace the domain that a group is attached to
2192- * @group: IOMMU group that will be attached to the new domain
2193- * @new_domain: new IOMMU domain to replace with
2194- *
2195- * This API allows the group to switch domains without being forced to go to
2196- * the blocking domain in-between.
2197- *
2198- * If the currently attached domain is a core domain (e.g. a default_domain),
2199- * it will act just like the iommu_attach_group().
2200- */
2201- int iommu_group_replace_domain (struct iommu_group * group ,
2202- struct iommu_domain * new_domain )
2203- {
2204- int ret ;
2205-
2206- if (!new_domain )
2207- return - EINVAL ;
2208-
2209- mutex_lock (& group -> mutex );
2210- ret = __iommu_group_set_domain (group , new_domain );
2211- mutex_unlock (& group -> mutex );
2212- return ret ;
2213- }
2214- EXPORT_SYMBOL_NS_GPL (iommu_group_replace_domain , "IOMMUFD_INTERNAL" );
2215-
22162204static int __iommu_device_set_domain (struct iommu_group * group ,
22172205 struct device * dev ,
22182206 struct iommu_domain * new_domain ,
@@ -3374,6 +3362,7 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
33743362 struct iommu_group * group = dev -> iommu_group ;
33753363 struct group_device * device ;
33763364 const struct iommu_ops * ops ;
3365+ void * entry ;
33773366 int ret ;
33783367
33793368 if (!group )
@@ -3397,16 +3386,31 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
33973386 }
33983387 }
33993388
3400- if (handle )
3401- handle -> domain = domain ;
3389+ entry = iommu_make_pasid_array_entry (domain , handle );
34023390
3403- ret = xa_insert (& group -> pasid_array , pasid , handle , GFP_KERNEL );
3391+ /*
3392+ * Entry present is a failure case. Use xa_insert() instead of
3393+ * xa_reserve().
3394+ */
3395+ ret = xa_insert (& group -> pasid_array , pasid , XA_ZERO_ENTRY , GFP_KERNEL );
34043396 if (ret )
34053397 goto out_unlock ;
34063398
34073399 ret = __iommu_set_group_pasid (domain , group , pasid );
3408- if (ret )
3409- xa_erase (& group -> pasid_array , pasid );
3400+ if (ret ) {
3401+ xa_release (& group -> pasid_array , pasid );
3402+ goto out_unlock ;
3403+ }
3404+
3405+ /*
3406+ * The xa_insert() above reserved the memory, and the group->mutex is
3407+ * held, this cannot fail. The new domain cannot be visible until the
3408+ * operation succeeds as we cannot tolerate PRIs becoming concurrently
3409+ * queued and then failing attach.
3410+ */
3411+ WARN_ON (xa_is_err (xa_store (& group -> pasid_array ,
3412+ pasid , entry , GFP_KERNEL )));
3413+
34103414out_unlock :
34113415 mutex_unlock (& group -> mutex );
34123416 return ret ;
@@ -3480,13 +3484,17 @@ struct iommu_attach_handle *
34803484iommu_attach_handle_get (struct iommu_group * group , ioasid_t pasid , unsigned int type )
34813485{
34823486 struct iommu_attach_handle * handle ;
3487+ void * entry ;
34833488
34843489 xa_lock (& group -> pasid_array );
3485- handle = xa_load (& group -> pasid_array , pasid );
3486- if (!handle )
3490+ entry = xa_load (& group -> pasid_array , pasid );
3491+ if (!entry || xa_pointer_tag ( entry ) != IOMMU_PASID_ARRAY_HANDLE ) {
34873492 handle = ERR_PTR (- ENOENT );
3488- else if (type && handle -> domain -> type != type )
3489- handle = ERR_PTR (- EBUSY );
3493+ } else {
3494+ handle = xa_untag_pointer (entry );
3495+ if (type && handle -> domain -> type != type )
3496+ handle = ERR_PTR (- EBUSY );
3497+ }
34903498 xa_unlock (& group -> pasid_array );
34913499
34923500 return handle ;
@@ -3509,25 +3517,35 @@ int iommu_attach_group_handle(struct iommu_domain *domain,
35093517 struct iommu_group * group ,
35103518 struct iommu_attach_handle * handle )
35113519{
3520+ void * entry ;
35123521 int ret ;
35133522
3514- if (handle )
3515- handle -> domain = domain ;
3523+ if (! handle )
3524+ return - EINVAL ;
35163525
35173526 mutex_lock (& group -> mutex );
3518- ret = xa_insert (& group -> pasid_array , IOMMU_NO_PASID , handle , GFP_KERNEL );
3527+ entry = iommu_make_pasid_array_entry (domain , handle );
3528+ ret = xa_insert (& group -> pasid_array ,
3529+ IOMMU_NO_PASID , XA_ZERO_ENTRY , GFP_KERNEL );
35193530 if (ret )
3520- goto err_unlock ;
3531+ goto out_unlock ;
35213532
35223533 ret = __iommu_attach_group (domain , group );
3523- if (ret )
3524- goto err_erase ;
3525- mutex_unlock (& group -> mutex );
3534+ if (ret ) {
3535+ xa_release (& group -> pasid_array , IOMMU_NO_PASID );
3536+ goto out_unlock ;
3537+ }
35263538
3527- return 0 ;
3528- err_erase :
3529- xa_erase (& group -> pasid_array , IOMMU_NO_PASID );
3530- err_unlock :
3539+ /*
3540+ * The xa_insert() above reserved the memory, and the group->mutex is
3541+ * held, this cannot fail. The new domain cannot be visible until the
3542+ * operation succeeds as we cannot tolerate PRIs becoming concurrently
3543+ * queued and then failing attach.
3544+ */
3545+ WARN_ON (xa_is_err (xa_store (& group -> pasid_array ,
3546+ IOMMU_NO_PASID , entry , GFP_KERNEL )));
3547+
3548+ out_unlock :
35313549 mutex_unlock (& group -> mutex );
35323550 return ret ;
35333551}
@@ -3557,33 +3575,34 @@ EXPORT_SYMBOL_NS_GPL(iommu_detach_group_handle, "IOMMUFD_INTERNAL");
35573575 * @new_domain: new IOMMU domain to replace with
35583576 * @handle: attach handle
35593577 *
3560- * This is a variant of iommu_group_replace_domain(). It allows the caller to
3561- * provide an attach handle for the new domain and use it when the domain is
3562- * attached.
3578+ * This API allows the group to switch domains without being forced to go to
3579+ * the blocking domain in-between. It allows the caller to provide an attach
3580+ * handle for the new domain and use it when the domain is attached.
3581+ *
3582+ * If the currently attached domain is a core domain (e.g. a default_domain),
3583+ * it will act just like the iommu_attach_group_handle().
35633584 */
35643585int iommu_replace_group_handle (struct iommu_group * group ,
35653586 struct iommu_domain * new_domain ,
35663587 struct iommu_attach_handle * handle )
35673588{
3568- void * curr ;
3589+ void * curr , * entry ;
35693590 int ret ;
35703591
3571- if (!new_domain )
3592+ if (!new_domain || ! handle )
35723593 return - EINVAL ;
35733594
35743595 mutex_lock (& group -> mutex );
3575- if (handle ) {
3576- ret = xa_reserve (& group -> pasid_array , IOMMU_NO_PASID , GFP_KERNEL );
3577- if (ret )
3578- goto err_unlock ;
3579- handle -> domain = new_domain ;
3580- }
3596+ entry = iommu_make_pasid_array_entry (new_domain , handle );
3597+ ret = xa_reserve (& group -> pasid_array , IOMMU_NO_PASID , GFP_KERNEL );
3598+ if (ret )
3599+ goto err_unlock ;
35813600
35823601 ret = __iommu_group_set_domain (group , new_domain );
35833602 if (ret )
35843603 goto err_release ;
35853604
3586- curr = xa_store (& group -> pasid_array , IOMMU_NO_PASID , handle , GFP_KERNEL );
3605+ curr = xa_store (& group -> pasid_array , IOMMU_NO_PASID , entry , GFP_KERNEL );
35873606 WARN_ON (xa_is_err (curr ));
35883607
35893608 mutex_unlock (& group -> mutex );
@@ -3596,3 +3615,32 @@ int iommu_replace_group_handle(struct iommu_group *group,
35963615 return ret ;
35973616}
35983617EXPORT_SYMBOL_NS_GPL (iommu_replace_group_handle , "IOMMUFD_INTERNAL" );
3618+
3619+ #if IS_ENABLED (CONFIG_IRQ_MSI_IOMMU )
3620+ /**
3621+ * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
3622+ * @desc: MSI descriptor, will store the MSI page
3623+ * @msi_addr: MSI target address to be mapped
3624+ *
3625+ * The implementation of sw_msi() should take msi_addr and map it to
3626+ * an IOVA in the domain and call msi_desc_set_iommu_msi_iova() with the
3627+ * mapping information.
3628+ *
3629+ * Return: 0 on success or negative error code if the mapping failed.
3630+ */
3631+ int iommu_dma_prepare_msi (struct msi_desc * desc , phys_addr_t msi_addr )
3632+ {
3633+ struct device * dev = msi_desc_to_dev (desc );
3634+ struct iommu_group * group = dev -> iommu_group ;
3635+ int ret = 0 ;
3636+
3637+ if (!group )
3638+ return 0 ;
3639+
3640+ mutex_lock (& group -> mutex );
3641+ if (group -> domain && group -> domain -> sw_msi )
3642+ ret = group -> domain -> sw_msi (group -> domain , desc , msi_addr );
3643+ mutex_unlock (& group -> mutex );
3644+ return ret ;
3645+ }
3646+ #endif /* CONFIG_IRQ_MSI_IOMMU */
0 commit comments