Skip to content

Commit b331252

Browse files
jgunthorpewilldeacon
authored andcommitted
iommu/vt-d: Create unique domain ops for each stage
Use the domain ops pointer to tell what kind of domain it is instead of the internal use_first_level indication. This also protects against wrongly using a SVA/nested/IDENTITY/BLOCKED domain type in places they should not be. The only remaining uses of use_first_level outside the paging domain are in paging_domain_compatible() and intel_iommu_enforce_cache_coherency(). Thus, remove the useless sets of use_first_level in intel_svm_domain_alloc() and intel_iommu_domain_alloc_nested(). None of the unique ops for these domain types ever reference it on their call chains. Add a WARN_ON() check in domain_context_mapping_one() as it only works with second stage. This is preparation for iommupt which will have different ops for each of the stages. Reviewed-by: Kevin Tian <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Lu Baolu <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Will Deacon <[email protected]>
1 parent b9434ba commit b331252

File tree

5 files changed

+58
-24
lines changed

5 files changed

+58
-24
lines changed

drivers/iommu/intel/cache.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -371,7 +371,7 @@ static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag *
371371
struct intel_iommu *iommu = tag->iommu;
372372
u64 type = DMA_TLB_PSI_FLUSH;
373373

374-
if (domain->use_first_level) {
374+
if (intel_domain_is_fs_paging(domain)) {
375375
qi_batch_add_piotlb(iommu, tag->domain_id, tag->pasid, addr,
376376
pages, ih, domain->qi_batch);
377377
return;
@@ -546,7 +546,8 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
546546
qi_batch_flush_descs(iommu, domain->qi_batch);
547547
iommu = tag->iommu;
548548

549-
if (!cap_caching_mode(iommu->cap) || domain->use_first_level) {
549+
if (!cap_caching_mode(iommu->cap) ||
550+
intel_domain_is_fs_paging(domain)) {
550551
iommu_flush_write_buffer(iommu);
551552
continue;
552553
}

drivers/iommu/intel/iommu.c

Lines changed: 42 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1462,6 +1462,9 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
14621462
struct context_entry *context;
14631463
int ret;
14641464

1465+
if (WARN_ON(!intel_domain_is_ss_paging(domain)))
1466+
return -EINVAL;
1467+
14651468
pr_debug("Set context mapping for %02x:%02x.%d\n",
14661469
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
14671470

@@ -1780,7 +1783,7 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
17801783
static bool domain_need_iotlb_sync_map(struct dmar_domain *domain,
17811784
struct intel_iommu *iommu)
17821785
{
1783-
if (cap_caching_mode(iommu->cap) && !domain->use_first_level)
1786+
if (cap_caching_mode(iommu->cap) && intel_domain_is_ss_paging(domain))
17841787
return true;
17851788

17861789
if (rwbf_quirk || cap_rwbf(iommu->cap))
@@ -1812,12 +1815,14 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
18121815

18131816
if (!sm_supported(iommu))
18141817
ret = domain_context_mapping(domain, dev);
1815-
else if (domain->use_first_level)
1818+
else if (intel_domain_is_fs_paging(domain))
18161819
ret = domain_setup_first_level(iommu, domain, dev,
18171820
IOMMU_NO_PASID, NULL);
1818-
else
1821+
else if (intel_domain_is_ss_paging(domain))
18191822
ret = domain_setup_second_level(iommu, domain, dev,
18201823
IOMMU_NO_PASID, NULL);
1824+
else if (WARN_ON(true))
1825+
ret = -EINVAL;
18211826

18221827
if (ret)
18231828
goto out_block_translation;
@@ -3288,7 +3293,6 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
32883293
domain->use_first_level = first_stage;
32893294

32903295
domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
3291-
domain->domain.ops = intel_iommu_ops.default_domain_ops;
32923296

32933297
/* calculate the address width */
32943298
addr_width = agaw_to_width(iommu->agaw);
@@ -3346,6 +3350,8 @@ intel_iommu_domain_alloc_first_stage(struct device *dev,
33463350
dmar_domain = paging_domain_alloc(dev, true);
33473351
if (IS_ERR(dmar_domain))
33483352
return ERR_CAST(dmar_domain);
3353+
3354+
dmar_domain->domain.ops = &intel_fs_paging_domain_ops;
33493355
return &dmar_domain->domain;
33503356
}
33513357

@@ -3374,6 +3380,7 @@ intel_iommu_domain_alloc_second_stage(struct device *dev,
33743380
if (IS_ERR(dmar_domain))
33753381
return ERR_CAST(dmar_domain);
33763382

3383+
dmar_domain->domain.ops = &intel_ss_paging_domain_ops;
33773384
dmar_domain->nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
33783385

33793386
if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
@@ -4098,12 +4105,15 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
40984105
if (ret)
40994106
goto out_remove_dev_pasid;
41004107

4101-
if (dmar_domain->use_first_level)
4108+
if (intel_domain_is_fs_paging(dmar_domain))
41024109
ret = domain_setup_first_level(iommu, dmar_domain,
41034110
dev, pasid, old);
4104-
else
4111+
else if (intel_domain_is_ss_paging(dmar_domain))
41054112
ret = domain_setup_second_level(iommu, dmar_domain,
41064113
dev, pasid, old);
4114+
else if (WARN_ON(true))
4115+
ret = -EINVAL;
4116+
41074117
if (ret)
41084118
goto out_unwind_iopf;
41094119

@@ -4378,6 +4388,32 @@ static struct iommu_domain identity_domain = {
43784388
},
43794389
};
43804390

4391+
const struct iommu_domain_ops intel_fs_paging_domain_ops = {
4392+
.attach_dev = intel_iommu_attach_device,
4393+
.set_dev_pasid = intel_iommu_set_dev_pasid,
4394+
.map_pages = intel_iommu_map_pages,
4395+
.unmap_pages = intel_iommu_unmap_pages,
4396+
.iotlb_sync_map = intel_iommu_iotlb_sync_map,
4397+
.flush_iotlb_all = intel_flush_iotlb_all,
4398+
.iotlb_sync = intel_iommu_tlb_sync,
4399+
.iova_to_phys = intel_iommu_iova_to_phys,
4400+
.free = intel_iommu_domain_free,
4401+
.enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
4402+
};
4403+
4404+
const struct iommu_domain_ops intel_ss_paging_domain_ops = {
4405+
.attach_dev = intel_iommu_attach_device,
4406+
.set_dev_pasid = intel_iommu_set_dev_pasid,
4407+
.map_pages = intel_iommu_map_pages,
4408+
.unmap_pages = intel_iommu_unmap_pages,
4409+
.iotlb_sync_map = intel_iommu_iotlb_sync_map,
4410+
.flush_iotlb_all = intel_flush_iotlb_all,
4411+
.iotlb_sync = intel_iommu_tlb_sync,
4412+
.iova_to_phys = intel_iommu_iova_to_phys,
4413+
.free = intel_iommu_domain_free,
4414+
.enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
4415+
};
4416+
43814417
const struct iommu_ops intel_iommu_ops = {
43824418
.blocked_domain = &blocking_domain,
43834419
.release_domain = &blocking_domain,
@@ -4396,18 +4432,6 @@ const struct iommu_ops intel_iommu_ops = {
43964432
.def_domain_type = device_def_domain_type,
43974433
.pgsize_bitmap = SZ_4K,
43984434
.page_response = intel_iommu_page_response,
4399-
.default_domain_ops = &(const struct iommu_domain_ops) {
4400-
.attach_dev = intel_iommu_attach_device,
4401-
.set_dev_pasid = intel_iommu_set_dev_pasid,
4402-
.map_pages = intel_iommu_map_pages,
4403-
.unmap_pages = intel_iommu_unmap_pages,
4404-
.iotlb_sync_map = intel_iommu_iotlb_sync_map,
4405-
.flush_iotlb_all = intel_flush_iotlb_all,
4406-
.iotlb_sync = intel_iommu_tlb_sync,
4407-
.iova_to_phys = intel_iommu_iova_to_phys,
4408-
.free = intel_iommu_domain_free,
4409-
.enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
4410-
}
44114435
};
44124436

44134437
static void quirk_iommu_igfx(struct pci_dev *dev)

drivers/iommu/intel/iommu.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1378,6 +1378,18 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
13781378
u8 devfn, int alloc);
13791379

13801380
extern const struct iommu_ops intel_iommu_ops;
1381+
extern const struct iommu_domain_ops intel_fs_paging_domain_ops;
1382+
extern const struct iommu_domain_ops intel_ss_paging_domain_ops;
1383+
1384+
static inline bool intel_domain_is_fs_paging(struct dmar_domain *domain)
1385+
{
1386+
return domain->domain.ops == &intel_fs_paging_domain_ops;
1387+
}
1388+
1389+
static inline bool intel_domain_is_ss_paging(struct dmar_domain *domain)
1390+
{
1391+
return domain->domain.ops == &intel_ss_paging_domain_ops;
1392+
}
13811393

13821394
#ifdef CONFIG_INTEL_IOMMU
13831395
extern int intel_iommu_sm;

drivers/iommu/intel/nested.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -216,8 +216,7 @@ intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
216216
/* Must be nested domain */
217217
if (user_data->type != IOMMU_HWPT_DATA_VTD_S1)
218218
return ERR_PTR(-EOPNOTSUPP);
219-
if (parent->ops != intel_iommu_ops.default_domain_ops ||
220-
!s2_domain->nested_parent)
219+
if (!intel_domain_is_ss_paging(s2_domain) || !s2_domain->nested_parent)
221220
return ERR_PTR(-EINVAL);
222221

223222
ret = iommu_copy_struct_from_user(&vtd, user_data,
@@ -229,7 +228,6 @@ intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
229228
if (!domain)
230229
return ERR_PTR(-ENOMEM);
231230

232-
domain->use_first_level = true;
233231
domain->s2_domain = s2_domain;
234232
domain->s1_cfg = vtd;
235233
domain->domain.ops = &intel_nested_domain_ops;

drivers/iommu/intel/svm.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,6 @@ struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
214214
return ERR_PTR(-ENOMEM);
215215

216216
domain->domain.ops = &intel_svm_domain_ops;
217-
domain->use_first_level = true;
218217
INIT_LIST_HEAD(&domain->dev_pasids);
219218
INIT_LIST_HEAD(&domain->cache_tags);
220219
spin_lock_init(&domain->cache_lock);

0 commit comments

Comments
 (0)