@@ -45,6 +45,9 @@ static unsigned int iommu_def_domain_type __read_mostly;
45
45
static bool iommu_dma_strict __read_mostly = IS_ENABLED (CONFIG_IOMMU_DEFAULT_DMA_STRICT );
46
46
static u32 iommu_cmd_line __read_mostly ;
47
47
48
+ /* Tags used with xa_tag_pointer() in group->pasid_array */
49
+ enum { IOMMU_PASID_ARRAY_DOMAIN = 0 , IOMMU_PASID_ARRAY_HANDLE = 1 };
50
+
48
51
struct iommu_group {
49
52
struct kobject kobj ;
50
53
struct kobject * devices_kobj ;
@@ -2147,6 +2150,17 @@ struct iommu_domain *iommu_get_dma_domain(struct device *dev)
2147
2150
return dev -> iommu_group -> default_domain ;
2148
2151
}
2149
2152
2153
+ static void * iommu_make_pasid_array_entry (struct iommu_domain * domain ,
2154
+ struct iommu_attach_handle * handle )
2155
+ {
2156
+ if (handle ) {
2157
+ handle -> domain = domain ;
2158
+ return xa_tag_pointer (handle , IOMMU_PASID_ARRAY_HANDLE );
2159
+ }
2160
+
2161
+ return xa_tag_pointer (domain , IOMMU_PASID_ARRAY_DOMAIN );
2162
+ }
2163
+
2150
2164
static int __iommu_attach_group (struct iommu_domain * domain ,
2151
2165
struct iommu_group * group )
2152
2166
{
@@ -2187,32 +2201,6 @@ int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
2187
2201
}
2188
2202
EXPORT_SYMBOL_GPL (iommu_attach_group );
2189
2203
2190
- /**
2191
- * iommu_group_replace_domain - replace the domain that a group is attached to
2192
- * @group: IOMMU group that will be attached to the new domain
2193
- * @new_domain: new IOMMU domain to replace with
2194
- *
2195
- * This API allows the group to switch domains without being forced to go to
2196
- * the blocking domain in-between.
2197
- *
2198
- * If the currently attached domain is a core domain (e.g. a default_domain),
2199
- * it will act just like the iommu_attach_group().
2200
- */
2201
- int iommu_group_replace_domain (struct iommu_group * group ,
2202
- struct iommu_domain * new_domain )
2203
- {
2204
- int ret ;
2205
-
2206
- if (!new_domain )
2207
- return - EINVAL ;
2208
-
2209
- mutex_lock (& group -> mutex );
2210
- ret = __iommu_group_set_domain (group , new_domain );
2211
- mutex_unlock (& group -> mutex );
2212
- return ret ;
2213
- }
2214
- EXPORT_SYMBOL_NS_GPL (iommu_group_replace_domain , "IOMMUFD_INTERNAL" );
2215
-
2216
2204
static int __iommu_device_set_domain (struct iommu_group * group ,
2217
2205
struct device * dev ,
2218
2206
struct iommu_domain * new_domain ,
@@ -3374,6 +3362,7 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
3374
3362
struct iommu_group * group = dev -> iommu_group ;
3375
3363
struct group_device * device ;
3376
3364
const struct iommu_ops * ops ;
3365
+ void * entry ;
3377
3366
int ret ;
3378
3367
3379
3368
if (!group )
@@ -3397,16 +3386,31 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
3397
3386
}
3398
3387
}
3399
3388
3400
- if (handle )
3401
- handle -> domain = domain ;
3389
+ entry = iommu_make_pasid_array_entry (domain , handle );
3402
3390
3403
- ret = xa_insert (& group -> pasid_array , pasid , handle , GFP_KERNEL );
3391
+ /*
3392
+ * Entry present is a failure case. Use xa_insert() instead of
3393
+ * xa_reserve().
3394
+ */
3395
+ ret = xa_insert (& group -> pasid_array , pasid , XA_ZERO_ENTRY , GFP_KERNEL );
3404
3396
if (ret )
3405
3397
goto out_unlock ;
3406
3398
3407
3399
ret = __iommu_set_group_pasid (domain , group , pasid );
3408
- if (ret )
3409
- xa_erase (& group -> pasid_array , pasid );
3400
+ if (ret ) {
3401
+ xa_release (& group -> pasid_array , pasid );
3402
+ goto out_unlock ;
3403
+ }
3404
+
3405
+ /*
3406
+ * The xa_insert() above reserved the memory, and the group->mutex is
3407
+ * held, this cannot fail. The new domain cannot be visible until the
3408
+ * operation succeeds as we cannot tolerate PRIs becoming concurrently
3409
+ * queued and then failing attach.
3410
+ */
3411
+ WARN_ON (xa_is_err (xa_store (& group -> pasid_array ,
3412
+ pasid , entry , GFP_KERNEL )));
3413
+
3410
3414
out_unlock :
3411
3415
mutex_unlock (& group -> mutex );
3412
3416
return ret ;
@@ -3480,13 +3484,17 @@ struct iommu_attach_handle *
3480
3484
iommu_attach_handle_get (struct iommu_group * group , ioasid_t pasid , unsigned int type )
3481
3485
{
3482
3486
struct iommu_attach_handle * handle ;
3487
+ void * entry ;
3483
3488
3484
3489
xa_lock (& group -> pasid_array );
3485
- handle = xa_load (& group -> pasid_array , pasid );
3486
- if (!handle )
3490
+ entry = xa_load (& group -> pasid_array , pasid );
3491
+ if (!entry || xa_pointer_tag ( entry ) != IOMMU_PASID_ARRAY_HANDLE ) {
3487
3492
handle = ERR_PTR (- ENOENT );
3488
- else if (type && handle -> domain -> type != type )
3489
- handle = ERR_PTR (- EBUSY );
3493
+ } else {
3494
+ handle = xa_untag_pointer (entry );
3495
+ if (type && handle -> domain -> type != type )
3496
+ handle = ERR_PTR (- EBUSY );
3497
+ }
3490
3498
xa_unlock (& group -> pasid_array );
3491
3499
3492
3500
return handle ;
@@ -3509,25 +3517,35 @@ int iommu_attach_group_handle(struct iommu_domain *domain,
3509
3517
struct iommu_group * group ,
3510
3518
struct iommu_attach_handle * handle )
3511
3519
{
3520
+ void * entry ;
3512
3521
int ret ;
3513
3522
3514
- if (handle )
3515
- handle -> domain = domain ;
3523
+ if (! handle )
3524
+ return - EINVAL ;
3516
3525
3517
3526
mutex_lock (& group -> mutex );
3518
- ret = xa_insert (& group -> pasid_array , IOMMU_NO_PASID , handle , GFP_KERNEL );
3527
+ entry = iommu_make_pasid_array_entry (domain , handle );
3528
+ ret = xa_insert (& group -> pasid_array ,
3529
+ IOMMU_NO_PASID , XA_ZERO_ENTRY , GFP_KERNEL );
3519
3530
if (ret )
3520
- goto err_unlock ;
3531
+ goto out_unlock ;
3521
3532
3522
3533
ret = __iommu_attach_group (domain , group );
3523
- if (ret )
3524
- goto err_erase ;
3525
- mutex_unlock (& group -> mutex );
3534
+ if (ret ) {
3535
+ xa_release (& group -> pasid_array , IOMMU_NO_PASID );
3536
+ goto out_unlock ;
3537
+ }
3526
3538
3527
- return 0 ;
3528
- err_erase :
3529
- xa_erase (& group -> pasid_array , IOMMU_NO_PASID );
3530
- err_unlock :
3539
+ /*
3540
+ * The xa_insert() above reserved the memory, and the group->mutex is
3541
+ * held, this cannot fail. The new domain cannot be visible until the
3542
+ * operation succeeds as we cannot tolerate PRIs becoming concurrently
3543
+ * queued and then failing attach.
3544
+ */
3545
+ WARN_ON (xa_is_err (xa_store (& group -> pasid_array ,
3546
+ IOMMU_NO_PASID , entry , GFP_KERNEL )));
3547
+
3548
+ out_unlock :
3531
3549
mutex_unlock (& group -> mutex );
3532
3550
return ret ;
3533
3551
}
@@ -3557,33 +3575,34 @@ EXPORT_SYMBOL_NS_GPL(iommu_detach_group_handle, "IOMMUFD_INTERNAL");
3557
3575
* @new_domain: new IOMMU domain to replace with
3558
3576
* @handle: attach handle
3559
3577
*
3560
- * This is a variant of iommu_group_replace_domain(). It allows the caller to
3561
- * provide an attach handle for the new domain and use it when the domain is
3562
- * attached.
3578
+ * This API allows the group to switch domains without being forced to go to
3579
+ * the blocking domain in-between. It allows the caller to provide an attach
3580
+ * handle for the new domain and use it when the domain is attached.
3581
+ *
3582
+ * If the currently attached domain is a core domain (e.g. a default_domain),
3583
+ * it will act just like the iommu_attach_group_handle().
3563
3584
*/
3564
3585
int iommu_replace_group_handle (struct iommu_group * group ,
3565
3586
struct iommu_domain * new_domain ,
3566
3587
struct iommu_attach_handle * handle )
3567
3588
{
3568
- void * curr ;
3589
+ void * curr , * entry ;
3569
3590
int ret ;
3570
3591
3571
- if (!new_domain )
3592
+ if (!new_domain || ! handle )
3572
3593
return - EINVAL ;
3573
3594
3574
3595
mutex_lock (& group -> mutex );
3575
- if (handle ) {
3576
- ret = xa_reserve (& group -> pasid_array , IOMMU_NO_PASID , GFP_KERNEL );
3577
- if (ret )
3578
- goto err_unlock ;
3579
- handle -> domain = new_domain ;
3580
- }
3596
+ entry = iommu_make_pasid_array_entry (new_domain , handle );
3597
+ ret = xa_reserve (& group -> pasid_array , IOMMU_NO_PASID , GFP_KERNEL );
3598
+ if (ret )
3599
+ goto err_unlock ;
3581
3600
3582
3601
ret = __iommu_group_set_domain (group , new_domain );
3583
3602
if (ret )
3584
3603
goto err_release ;
3585
3604
3586
- curr = xa_store (& group -> pasid_array , IOMMU_NO_PASID , handle , GFP_KERNEL );
3605
+ curr = xa_store (& group -> pasid_array , IOMMU_NO_PASID , entry , GFP_KERNEL );
3587
3606
WARN_ON (xa_is_err (curr ));
3588
3607
3589
3608
mutex_unlock (& group -> mutex );
@@ -3596,3 +3615,32 @@ int iommu_replace_group_handle(struct iommu_group *group,
3596
3615
return ret ;
3597
3616
}
3598
3617
EXPORT_SYMBOL_NS_GPL (iommu_replace_group_handle , "IOMMUFD_INTERNAL" );
3618
+
3619
+ #if IS_ENABLED (CONFIG_IRQ_MSI_IOMMU )
3620
+ /**
3621
+ * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
3622
+ * @desc: MSI descriptor, will store the MSI page
3623
+ * @msi_addr: MSI target address to be mapped
3624
+ *
3625
+ * The implementation of sw_msi() should take msi_addr and map it to
3626
+ * an IOVA in the domain and call msi_desc_set_iommu_msi_iova() with the
3627
+ * mapping information.
3628
+ *
3629
+ * Return: 0 on success or negative error code if the mapping failed.
3630
+ */
3631
+ int iommu_dma_prepare_msi (struct msi_desc * desc , phys_addr_t msi_addr )
3632
+ {
3633
+ struct device * dev = msi_desc_to_dev (desc );
3634
+ struct iommu_group * group = dev -> iommu_group ;
3635
+ int ret = 0 ;
3636
+
3637
+ if (!group )
3638
+ return 0 ;
3639
+
3640
+ mutex_lock (& group -> mutex );
3641
+ if (group -> domain && group -> domain -> sw_msi )
3642
+ ret = group -> domain -> sw_msi (group -> domain , desc , msi_addr );
3643
+ mutex_unlock (& group -> mutex );
3644
+ return ret ;
3645
+ }
3646
+ #endif /* CONFIG_IRQ_MSI_IOMMU */
0 commit comments