Skip to content

Commit 5e9f822

Browse files
yiliu1765jgunthorpe
authored andcommitted
iommu: Swap the order of setting group->pasid_array and calling attach op of iommu drivers
The current implementation stores entry to the group->pasid_array before the underlying iommu driver has successfully set the new domain. This can lead to issues where PRIs are received on the new domain before the attach operation is completed. This patch swaps the order of operations to ensure that the domain is set in the underlying iommu driver before updating the group->pasid_array. Link: https://patch.msgid.link/r/[email protected] Suggested-by: Jason Gunthorpe <[email protected]> Reviewed-by: Jason Gunthorpe <[email protected]> Reviewed-by: Kevin Tian <[email protected]> Reviewed-by: Nicolin Chen <[email protected]> Reviewed-by: Lu Baolu <[email protected]> Signed-off-by: Yi Liu <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
1 parent e1ea9d3 commit 5e9f822

File tree

1 file changed

+36
-12
lines changed

1 file changed

+36
-12
lines changed

drivers/iommu/iommu.c

Lines changed: 36 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -3388,13 +3388,29 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
33883388

33893389
entry = iommu_make_pasid_array_entry(domain, handle);
33903390

3391-
ret = xa_insert(&group->pasid_array, pasid, entry, GFP_KERNEL);
3391+
/*
3392+
* Entry present is a failure case. Use xa_insert() instead of
3393+
* xa_reserve().
3394+
*/
3395+
ret = xa_insert(&group->pasid_array, pasid, XA_ZERO_ENTRY, GFP_KERNEL);
33923396
if (ret)
33933397
goto out_unlock;
33943398

33953399
ret = __iommu_set_group_pasid(domain, group, pasid);
3396-
if (ret)
3397-
xa_erase(&group->pasid_array, pasid);
3400+
if (ret) {
3401+
xa_release(&group->pasid_array, pasid);
3402+
goto out_unlock;
3403+
}
3404+
3405+
/*
3406+
* The xa_insert() above reserved the memory, and the group->mutex is
3407+
* held, this cannot fail. The new domain cannot be visible until the
3408+
* operation succeeds as we cannot tolerate PRIs becoming concurrently
3409+
* queued and then failing attach.
3410+
*/
3411+
WARN_ON(xa_is_err(xa_store(&group->pasid_array,
3412+
pasid, entry, GFP_KERNEL)));
3413+
33983414
out_unlock:
33993415
mutex_unlock(&group->mutex);
34003416
return ret;
@@ -3509,19 +3525,27 @@ int iommu_attach_group_handle(struct iommu_domain *domain,
35093525

35103526
mutex_lock(&group->mutex);
35113527
entry = iommu_make_pasid_array_entry(domain, handle);
3512-
ret = xa_insert(&group->pasid_array, IOMMU_NO_PASID, entry, GFP_KERNEL);
3528+
ret = xa_insert(&group->pasid_array,
3529+
IOMMU_NO_PASID, XA_ZERO_ENTRY, GFP_KERNEL);
35133530
if (ret)
3514-
goto err_unlock;
3531+
goto out_unlock;
35153532

35163533
ret = __iommu_attach_group(domain, group);
3517-
if (ret)
3518-
goto err_erase;
3519-
mutex_unlock(&group->mutex);
3534+
if (ret) {
3535+
xa_release(&group->pasid_array, IOMMU_NO_PASID);
3536+
goto out_unlock;
3537+
}
35203538

3521-
return 0;
3522-
err_erase:
3523-
xa_erase(&group->pasid_array, IOMMU_NO_PASID);
3524-
err_unlock:
3539+
/*
3540+
* The xa_insert() above reserved the memory, and the group->mutex is
3541+
* held, this cannot fail. The new domain cannot be visible until the
3542+
* operation succeeds as we cannot tolerate PRIs becoming concurrently
3543+
* queued and then failing attach.
3544+
*/
3545+
WARN_ON(xa_is_err(xa_store(&group->pasid_array,
3546+
IOMMU_NO_PASID, entry, GFP_KERNEL)));
3547+
3548+
out_unlock:
35253549
mutex_unlock(&group->mutex);
35263550
return ret;
35273551
}

0 commit comments

Comments
 (0)