Skip to content

Commit a98db51

Browse files
LuBaolujoergroedel
authored andcommitted
iommu/vt-d: Enhance compatibility check for paging domain attach
The driver now supports domain_alloc_paging, ensuring that a valid device pointer is provided whenever a paging domain is allocated. Additionally, the dmar_domain attributes are set up at the time of allocation. Consistent with the established semantics in the IOMMU core, if a domain is attached to a device and found to be incompatible with the IOMMU hardware capabilities, the operation will return an -EINVAL error. This implicitly advises the caller to allocate a new domain for the device and attempt the domain attachment again. Rename prepare_domain_attach_device() to a more meaningful name. Signed-off-by: Lu Baolu <[email protected]> Reviewed-by: Jason Gunthorpe <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent 9ecfcac commit a98db51

File tree

4 files changed

+26
-77
lines changed

4 files changed

+26
-77
lines changed

drivers/iommu/intel/iommu.c

Lines changed: 23 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -1606,7 +1606,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
16061606
int translation = CONTEXT_TT_MULTI_LEVEL;
16071607
struct dma_pte *pgd = domain->pgd;
16081608
struct context_entry *context;
1609-
int agaw, ret;
1609+
int ret;
16101610

16111611
pr_debug("Set context mapping for %02x:%02x.%d\n",
16121612
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
@@ -1623,27 +1623,15 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
16231623

16241624
copied_context_tear_down(iommu, context, bus, devfn);
16251625
context_clear_entry(context);
1626-
16271626
context_set_domain_id(context, did);
16281627

1629-
/*
1630-
* Skip top levels of page tables for iommu which has
1631-
* less agaw than default. Unnecessary for PT mode.
1632-
*/
1633-
for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
1634-
ret = -ENOMEM;
1635-
pgd = phys_to_virt(dma_pte_addr(pgd));
1636-
if (!dma_pte_present(pgd))
1637-
goto out_unlock;
1638-
}
1639-
16401628
if (info && info->ats_supported)
16411629
translation = CONTEXT_TT_DEV_IOTLB;
16421630
else
16431631
translation = CONTEXT_TT_MULTI_LEVEL;
16441632

16451633
context_set_address_root(context, virt_to_phys(pgd));
1646-
context_set_address_width(context, agaw);
1634+
context_set_address_width(context, domain->agaw);
16471635
context_set_translation_type(context, translation);
16481636
context_set_fault_enable(context);
16491637
context_set_present(context);
@@ -1876,20 +1864,9 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
18761864
u32 pasid)
18771865
{
18781866
struct dma_pte *pgd = domain->pgd;
1879-
int agaw, level;
1880-
int flags = 0;
1867+
int level, flags = 0;
18811868

1882-
/*
1883-
* Skip top levels of page tables for iommu which has
1884-
* less agaw than default. Unnecessary for PT mode.
1885-
*/
1886-
for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
1887-
pgd = phys_to_virt(dma_pte_addr(pgd));
1888-
if (!dma_pte_present(pgd))
1889-
return -ENOMEM;
1890-
}
1891-
1892-
level = agaw_to_level(agaw);
1869+
level = agaw_to_level(domain->agaw);
18931870
if (level != 4 && level != 5)
18941871
return -EINVAL;
18951872

@@ -3492,42 +3469,41 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
34923469
domain_exit(dmar_domain);
34933470
}
34943471

3495-
int prepare_domain_attach_device(struct iommu_domain *domain,
3496-
struct device *dev)
3472+
int paging_domain_compatible(struct iommu_domain *domain, struct device *dev)
34973473
{
34983474
struct device_domain_info *info = dev_iommu_priv_get(dev);
34993475
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
35003476
struct intel_iommu *iommu = info->iommu;
35013477
int addr_width;
35023478

3479+
if (WARN_ON_ONCE(!(domain->type & __IOMMU_DOMAIN_PAGING)))
3480+
return -EPERM;
3481+
35033482
if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
35043483
return -EINVAL;
35053484

35063485
if (domain->dirty_ops && !ssads_supported(iommu))
35073486
return -EINVAL;
35083487

3488+
if (dmar_domain->iommu_coherency !=
3489+
iommu_paging_structure_coherency(iommu))
3490+
return -EINVAL;
3491+
3492+
if (dmar_domain->iommu_superpage !=
3493+
iommu_superpage_capability(iommu, dmar_domain->use_first_level))
3494+
return -EINVAL;
3495+
3496+
if (dmar_domain->use_first_level &&
3497+
(!sm_supported(iommu) || !ecap_flts(iommu->ecap)))
3498+
return -EINVAL;
3499+
35093500
/* check if this iommu agaw is sufficient for max mapped address */
35103501
addr_width = agaw_to_width(iommu->agaw);
35113502
if (addr_width > cap_mgaw(iommu->cap))
35123503
addr_width = cap_mgaw(iommu->cap);
35133504

3514-
if (dmar_domain->max_addr > (1LL << addr_width))
3505+
if (dmar_domain->gaw > addr_width || dmar_domain->agaw > iommu->agaw)
35153506
return -EINVAL;
3516-
dmar_domain->gaw = addr_width;
3517-
3518-
/*
3519-
* Knock out extra levels of page tables if necessary
3520-
*/
3521-
while (iommu->agaw < dmar_domain->agaw) {
3522-
struct dma_pte *pte;
3523-
3524-
pte = dmar_domain->pgd;
3525-
if (dma_pte_present(pte)) {
3526-
dmar_domain->pgd = phys_to_virt(dma_pte_addr(pte));
3527-
iommu_free_page(pte);
3528-
}
3529-
dmar_domain->agaw--;
3530-
}
35313507

35323508
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
35333509
context_copied(iommu, info->bus, info->devfn))
@@ -3543,7 +3519,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
35433519

35443520
device_block_translation(dev);
35453521

3546-
ret = prepare_domain_attach_device(domain, dev);
3522+
ret = paging_domain_compatible(domain, dev);
35473523
if (ret)
35483524
return ret;
35493525

@@ -4214,7 +4190,7 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
42144190
if (context_copied(iommu, info->bus, info->devfn))
42154191
return -EBUSY;
42164192

4217-
ret = prepare_domain_attach_device(domain, dev);
4193+
ret = paging_domain_compatible(domain, dev);
42184194
if (ret)
42194195
return ret;
42204196

drivers/iommu/intel/iommu.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1230,8 +1230,7 @@ void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
12301230
int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
12311231
void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
12321232
void device_block_translation(struct device *dev);
1233-
int prepare_domain_attach_device(struct iommu_domain *domain,
1234-
struct device *dev);
1233+
int paging_domain_compatible(struct iommu_domain *domain, struct device *dev);
12351234
void domain_update_iommu_cap(struct dmar_domain *domain);
12361235

12371236
int dmar_ir_support(void);

drivers/iommu/intel/nested.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
4040
* The s2_domain will be used in nested translation, hence needs
4141
* to ensure the s2_domain is compatible with this IOMMU.
4242
*/
43-
ret = prepare_domain_attach_device(&dmar_domain->s2_domain->domain, dev);
43+
ret = paging_domain_compatible(&dmar_domain->s2_domain->domain, dev);
4444
if (ret) {
4545
dev_err_ratelimited(dev, "s2 domain is not compatible\n");
4646
return ret;

drivers/iommu/intel/pasid.c

Lines changed: 1 addition & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -345,25 +345,6 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
345345
return 0;
346346
}
347347

348-
/*
349-
* Skip top levels of page tables for iommu which has less agaw
350-
* than default. Unnecessary for PT mode.
351-
*/
352-
static int iommu_skip_agaw(struct dmar_domain *domain,
353-
struct intel_iommu *iommu,
354-
struct dma_pte **pgd)
355-
{
356-
int agaw;
357-
358-
for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
359-
*pgd = phys_to_virt(dma_pte_addr(*pgd));
360-
if (!dma_pte_present(*pgd))
361-
return -EINVAL;
362-
}
363-
364-
return agaw;
365-
}
366-
367348
/*
368349
* Set up the scalable mode pasid entry for second only translation type.
369350
*/
@@ -374,7 +355,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
374355
struct pasid_entry *pte;
375356
struct dma_pte *pgd;
376357
u64 pgd_val;
377-
int agaw;
378358
u16 did;
379359

380360
/*
@@ -388,12 +368,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
388368
}
389369

390370
pgd = domain->pgd;
391-
agaw = iommu_skip_agaw(domain, iommu, &pgd);
392-
if (agaw < 0) {
393-
dev_err(dev, "Invalid domain page table\n");
394-
return -EINVAL;
395-
}
396-
397371
pgd_val = virt_to_phys(pgd);
398372
did = domain_id_iommu(domain, iommu);
399373

@@ -412,7 +386,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
412386
pasid_clear_entry(pte);
413387
pasid_set_domain_id(pte, did);
414388
pasid_set_slptr(pte, pgd_val);
415-
pasid_set_address_width(pte, agaw);
389+
pasid_set_address_width(pte, domain->agaw);
416390
pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY);
417391
pasid_set_fault_enable(pte);
418392
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));

0 commit comments

Comments
 (0)