Skip to content

Commit 2b989ab

Browse files
LuBaoluwilldeacon
authored andcommitted
iommu/vt-d: Add helper to allocate paging domain
The domain_alloc_user operation is currently implemented by allocating a paging domain using iommu_domain_alloc(). This is because it needs to fully initialize the domain before return. Add a helper to do this to avoid using iommu_domain_alloc(). Signed-off-by: Lu Baolu <[email protected]> Reviewed-by: Jason Gunthorpe <[email protected]> Link: https://lore.kernel.org/r/[email protected] Reviewed-by: Yi Liu <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Will Deacon <[email protected]>
1 parent 804f98e commit 2b989ab

File tree

1 file changed

+81
-9
lines changed

1 file changed

+81
-9
lines changed

drivers/iommu/intel/iommu.c

Lines changed: 81 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3622,6 +3622,79 @@ static struct iommu_domain blocking_domain = {
36223622
}
36233623
};
36243624

3625+
static int iommu_superpage_capability(struct intel_iommu *iommu, bool first_stage)
3626+
{
3627+
if (!intel_iommu_superpage)
3628+
return 0;
3629+
3630+
if (first_stage)
3631+
return cap_fl1gp_support(iommu->cap) ? 2 : 1;
3632+
3633+
return fls(cap_super_page_val(iommu->cap));
3634+
}
3635+
3636+
static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_stage)
3637+
{
3638+
struct device_domain_info *info = dev_iommu_priv_get(dev);
3639+
struct intel_iommu *iommu = info->iommu;
3640+
struct dmar_domain *domain;
3641+
int addr_width;
3642+
3643+
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
3644+
if (!domain)
3645+
return ERR_PTR(-ENOMEM);
3646+
3647+
INIT_LIST_HEAD(&domain->devices);
3648+
INIT_LIST_HEAD(&domain->dev_pasids);
3649+
INIT_LIST_HEAD(&domain->cache_tags);
3650+
spin_lock_init(&domain->lock);
3651+
spin_lock_init(&domain->cache_lock);
3652+
xa_init(&domain->iommu_array);
3653+
3654+
domain->nid = dev_to_node(dev);
3655+
domain->has_iotlb_device = info->ats_enabled;
3656+
domain->use_first_level = first_stage;
3657+
3658+
/* calculate the address width */
3659+
addr_width = agaw_to_width(iommu->agaw);
3660+
if (addr_width > cap_mgaw(iommu->cap))
3661+
addr_width = cap_mgaw(iommu->cap);
3662+
domain->gaw = addr_width;
3663+
domain->agaw = iommu->agaw;
3664+
domain->max_addr = __DOMAIN_MAX_ADDR(addr_width);
3665+
3666+
/* iommu memory access coherency */
3667+
domain->iommu_coherency = iommu_paging_structure_coherency(iommu);
3668+
3669+
/* pagesize bitmap */
3670+
domain->domain.pgsize_bitmap = SZ_4K;
3671+
domain->iommu_superpage = iommu_superpage_capability(iommu, first_stage);
3672+
domain->domain.pgsize_bitmap |= domain_super_pgsize_bitmap(domain);
3673+
3674+
/*
3675+
* IOVA aperture: First-level translation restricts the input-address
3676+
* to a canonical address (i.e., address bits 63:N have the same value
3677+
* as address bit [N-1], where N is 48-bits with 4-level paging and
3678+
* 57-bits with 5-level paging). Hence, skip bit [N-1].
3679+
*/
3680+
domain->domain.geometry.force_aperture = true;
3681+
domain->domain.geometry.aperture_start = 0;
3682+
if (first_stage)
3683+
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
3684+
else
3685+
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
3686+
3687+
/* always allocate the top pgd */
3688+
domain->pgd = iommu_alloc_page_node(domain->nid, GFP_KERNEL);
3689+
if (!domain->pgd) {
3690+
kfree(domain);
3691+
return ERR_PTR(-ENOMEM);
3692+
}
3693+
domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3694+
3695+
return domain;
3696+
}
3697+
36253698
static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
36263699
{
36273700
struct dmar_domain *dmar_domain;
@@ -3684,15 +3757,14 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
36843757
if (user_data || (dirty_tracking && !ssads_supported(iommu)))
36853758
return ERR_PTR(-EOPNOTSUPP);
36863759

3687-
/*
3688-
* domain_alloc_user op needs to fully initialize a domain before
3689-
* return, so uses iommu_domain_alloc() here for simple.
3690-
*/
3691-
domain = iommu_domain_alloc(dev->bus);
3692-
if (!domain)
3693-
return ERR_PTR(-ENOMEM);
3694-
3695-
dmar_domain = to_dmar_domain(domain);
3760+
/* Do not use first stage for user domain translation. */
3761+
dmar_domain = paging_domain_alloc(dev, false);
3762+
if (IS_ERR(dmar_domain))
3763+
return ERR_CAST(dmar_domain);
3764+
domain = &dmar_domain->domain;
3765+
domain->type = IOMMU_DOMAIN_UNMANAGED;
3766+
domain->owner = &intel_iommu_ops;
3767+
domain->ops = intel_iommu_ops.default_domain_ops;
36963768

36973769
if (nested_parent) {
36983770
dmar_domain->nested_parent = true;

0 commit comments

Comments
 (0)