@@ -1377,12 +1377,6 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
1377
1377
1378
1378
if (!iommu -> v2 && iova > dma -> iova )
1379
1379
break ;
1380
- /*
1381
- * Task with same address space who mapped this iova range is
1382
- * allowed to unmap the iova range.
1383
- */
1384
- if (dma -> task -> mm != current -> mm )
1385
- break ;
1386
1380
1387
1381
if (invalidate_vaddr ) {
1388
1382
if (dma -> vaddr_invalid ) {
@@ -1679,18 +1673,6 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
1679
1673
return ret ;
1680
1674
}
1681
1675
1682
- static int vfio_bus_type (struct device * dev , void * data )
1683
- {
1684
- struct bus_type * * bus = data ;
1685
-
1686
- if (* bus && * bus != dev -> bus )
1687
- return - EINVAL ;
1688
-
1689
- * bus = dev -> bus ;
1690
-
1691
- return 0 ;
1692
- }
1693
-
1694
1676
static int vfio_iommu_replay (struct vfio_iommu * iommu ,
1695
1677
struct vfio_domain * domain )
1696
1678
{
@@ -2153,13 +2135,26 @@ static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu,
2153
2135
list_splice_tail (iova_copy , iova );
2154
2136
}
2155
2137
2138
+ /* Redundantly walks non-present capabilities to simplify caller */
2139
+ static int vfio_iommu_device_capable (struct device * dev , void * data )
2140
+ {
2141
+ return device_iommu_capable (dev , (enum iommu_cap )data );
2142
+ }
2143
+
2144
+ static int vfio_iommu_domain_alloc (struct device * dev , void * data )
2145
+ {
2146
+ struct iommu_domain * * domain = data ;
2147
+
2148
+ * domain = iommu_domain_alloc (dev -> bus );
2149
+ return 1 ; /* Don't iterate */
2150
+ }
2151
+
2156
2152
static int vfio_iommu_type1_attach_group (void * iommu_data ,
2157
2153
struct iommu_group * iommu_group , enum vfio_group_type type )
2158
2154
{
2159
2155
struct vfio_iommu * iommu = iommu_data ;
2160
2156
struct vfio_iommu_group * group ;
2161
2157
struct vfio_domain * domain , * d ;
2162
- struct bus_type * bus = NULL ;
2163
2158
bool resv_msi , msi_remap ;
2164
2159
phys_addr_t resv_msi_base = 0 ;
2165
2160
struct iommu_domain_geometry * geo ;
@@ -2192,18 +2187,19 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
2192
2187
goto out_unlock ;
2193
2188
}
2194
2189
2195
- /* Determine bus_type in order to allocate a domain */
2196
- ret = iommu_group_for_each_dev (iommu_group , & bus , vfio_bus_type );
2197
- if (ret )
2198
- goto out_free_group ;
2199
-
2200
2190
ret = - ENOMEM ;
2201
2191
domain = kzalloc (sizeof (* domain ), GFP_KERNEL );
2202
2192
if (!domain )
2203
2193
goto out_free_group ;
2204
2194
2195
+ /*
2196
+ * Going via the iommu_group iterator avoids races, and trivially gives
2197
+ * us a representative device for the IOMMU API call. We don't actually
2198
+ * want to iterate beyond the first device (if any).
2199
+ */
2205
2200
ret = - EIO ;
2206
- domain -> domain = iommu_domain_alloc (bus );
2201
+ iommu_group_for_each_dev (iommu_group , & domain -> domain ,
2202
+ vfio_iommu_domain_alloc );
2207
2203
if (!domain -> domain )
2208
2204
goto out_free_domain ;
2209
2205
@@ -2258,7 +2254,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
2258
2254
list_add (& group -> next , & domain -> group_list );
2259
2255
2260
2256
msi_remap = irq_domain_check_msi_remap () ||
2261
- iommu_capable (bus , IOMMU_CAP_INTR_REMAP );
2257
+ iommu_group_for_each_dev (iommu_group , (void * )IOMMU_CAP_INTR_REMAP ,
2258
+ vfio_iommu_device_capable );
2262
2259
2263
2260
if (!allow_unsafe_interrupts && !msi_remap ) {
2264
2261
pr_warn ("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n" ,
0 commit comments