Skip to content

Commit a016e53

Browse files
LuBaolujoergroedel
authored andcommitted
iommu/vt-d: Remove scalable mode context entry setup from attach_dev
The scalable mode context entry is now setup in the probe_device path, eliminating the need to configure it in the attach_dev path. Removes the redundant code from the attach_dev path to avoid dead code. Signed-off-by: Lu Baolu <[email protected]> Reviewed-by: Kevin Tian <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent 301f1a8 commit a016e53

File tree

1 file changed

+44
-112
lines changed

1 file changed

+44
-112
lines changed

drivers/iommu/intel/iommu.c

Lines changed: 44 additions & 112 deletions
Original file line numberDiff line numberDiff line change
@@ -1802,34 +1802,17 @@ static void domain_exit(struct dmar_domain *domain)
18021802
kfree(domain);
18031803
}
18041804

1805-
/*
1806-
* Get the PASID directory size for scalable mode context entry.
1807-
* Value of X in the PDTS field of a scalable mode context entry
1808-
* indicates PASID directory with 2^(X + 7) entries.
1809-
*/
1810-
static unsigned long context_get_sm_pds(struct pasid_table *table)
1811-
{
1812-
unsigned long pds, max_pde;
1813-
1814-
max_pde = table->max_pasid >> PASID_PDE_SHIFT;
1815-
pds = find_first_bit(&max_pde, MAX_NR_PASID_BITS);
1816-
if (pds < 7)
1817-
return 0;
1818-
1819-
return pds - 7;
1820-
}
1821-
18221805
static int domain_context_mapping_one(struct dmar_domain *domain,
18231806
struct intel_iommu *iommu,
1824-
struct pasid_table *table,
18251807
u8 bus, u8 devfn)
18261808
{
18271809
struct device_domain_info *info =
18281810
domain_lookup_dev_info(domain, iommu, bus, devfn);
18291811
u16 did = domain_id_iommu(domain, iommu);
18301812
int translation = CONTEXT_TT_MULTI_LEVEL;
1813+
struct dma_pte *pgd = domain->pgd;
18311814
struct context_entry *context;
1832-
int ret;
1815+
int agaw, ret;
18331816

18341817
if (hw_pass_through && domain_type_is_si(domain))
18351818
translation = CONTEXT_TT_PASS_THROUGH;
@@ -1872,65 +1855,37 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
18721855
}
18731856

18741857
context_clear_entry(context);
1858+
context_set_domain_id(context, did);
18751859

1876-
if (sm_supported(iommu)) {
1877-
unsigned long pds;
1878-
1879-
/* Setup the PASID DIR pointer: */
1880-
pds = context_get_sm_pds(table);
1881-
context->lo = (u64)virt_to_phys(table->table) |
1882-
context_pdts(pds);
1883-
1884-
/* Setup the RID_PASID field: */
1885-
context_set_sm_rid2pasid(context, IOMMU_NO_PASID);
1886-
1860+
if (translation != CONTEXT_TT_PASS_THROUGH) {
18871861
/*
1888-
* Setup the Device-TLB enable bit and Page request
1889-
* Enable bit:
1862+
* Skip top levels of page tables for iommu which has
1863+
* less agaw than default. Unnecessary for PT mode.
18901864
*/
1891-
if (info && info->ats_supported)
1892-
context_set_sm_dte(context);
1893-
if (info && info->pri_supported)
1894-
context_set_sm_pre(context);
1895-
if (info && info->pasid_supported)
1896-
context_set_pasid(context);
1897-
} else {
1898-
struct dma_pte *pgd = domain->pgd;
1899-
int agaw;
1900-
1901-
context_set_domain_id(context, did);
1902-
1903-
if (translation != CONTEXT_TT_PASS_THROUGH) {
1904-
/*
1905-
* Skip top levels of page tables for iommu which has
1906-
* less agaw than default. Unnecessary for PT mode.
1907-
*/
1908-
for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
1909-
ret = -ENOMEM;
1910-
pgd = phys_to_virt(dma_pte_addr(pgd));
1911-
if (!dma_pte_present(pgd))
1912-
goto out_unlock;
1913-
}
1914-
1915-
if (info && info->ats_supported)
1916-
translation = CONTEXT_TT_DEV_IOTLB;
1917-
else
1918-
translation = CONTEXT_TT_MULTI_LEVEL;
1919-
1920-
context_set_address_root(context, virt_to_phys(pgd));
1921-
context_set_address_width(context, agaw);
1922-
} else {
1923-
/*
1924-
* In pass through mode, AW must be programmed to
1925-
* indicate the largest AGAW value supported by
1926-
* hardware. And ASR is ignored by hardware.
1927-
*/
1928-
context_set_address_width(context, iommu->msagaw);
1865+
for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
1866+
ret = -ENOMEM;
1867+
pgd = phys_to_virt(dma_pte_addr(pgd));
1868+
if (!dma_pte_present(pgd))
1869+
goto out_unlock;
19291870
}
19301871

1931-
context_set_translation_type(context, translation);
1872+
if (info && info->ats_supported)
1873+
translation = CONTEXT_TT_DEV_IOTLB;
1874+
else
1875+
translation = CONTEXT_TT_MULTI_LEVEL;
1876+
1877+
context_set_address_root(context, virt_to_phys(pgd));
1878+
context_set_address_width(context, agaw);
1879+
} else {
1880+
/*
1881+
* In pass through mode, AW must be programmed to
1882+
* indicate the largest AGAW value supported by
1883+
* hardware. And ASR is ignored by hardware.
1884+
*/
1885+
context_set_address_width(context, iommu->msagaw);
19321886
}
19331887

1888+
context_set_translation_type(context, translation);
19341889
context_set_fault_enable(context);
19351890
context_set_present(context);
19361891
if (!ecap_coherent(iommu->ecap))
@@ -1960,43 +1915,29 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
19601915
return ret;
19611916
}
19621917

1963-
struct domain_context_mapping_data {
1964-
struct dmar_domain *domain;
1965-
struct intel_iommu *iommu;
1966-
struct pasid_table *table;
1967-
};
1968-
19691918
static int domain_context_mapping_cb(struct pci_dev *pdev,
19701919
u16 alias, void *opaque)
19711920
{
1972-
struct domain_context_mapping_data *data = opaque;
1921+
struct device_domain_info *info = dev_iommu_priv_get(&pdev->dev);
1922+
struct intel_iommu *iommu = info->iommu;
1923+
struct dmar_domain *domain = opaque;
19731924

1974-
return domain_context_mapping_one(data->domain, data->iommu,
1975-
data->table, PCI_BUS_NUM(alias),
1976-
alias & 0xff);
1925+
return domain_context_mapping_one(domain, iommu,
1926+
PCI_BUS_NUM(alias), alias & 0xff);
19771927
}
19781928

19791929
static int
19801930
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
19811931
{
19821932
struct device_domain_info *info = dev_iommu_priv_get(dev);
1983-
struct domain_context_mapping_data data;
19841933
struct intel_iommu *iommu = info->iommu;
19851934
u8 bus = info->bus, devfn = info->devfn;
1986-
struct pasid_table *table;
1987-
1988-
table = intel_pasid_get_table(dev);
19891935

19901936
if (!dev_is_pci(dev))
1991-
return domain_context_mapping_one(domain, iommu, table,
1992-
bus, devfn);
1993-
1994-
data.domain = domain;
1995-
data.iommu = iommu;
1996-
data.table = table;
1937+
return domain_context_mapping_one(domain, iommu, bus, devfn);
19971938

19981939
return pci_for_each_dma_alias(to_pci_dev(dev),
1999-
&domain_context_mapping_cb, &data);
1940+
domain_context_mapping_cb, domain);
20001941
}
20011942

20021943
/* Returns a number of VTD pages, but aligned to MM page size */
@@ -2353,28 +2294,19 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
23532294
list_add(&info->link, &domain->devices);
23542295
spin_unlock_irqrestore(&domain->lock, flags);
23552296

2356-
/* PASID table is mandatory for a PCI device in scalable mode. */
2357-
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
2358-
/* Setup the PASID entry for requests without PASID: */
2359-
if (hw_pass_through && domain_type_is_si(domain))
2360-
ret = intel_pasid_setup_pass_through(iommu,
2361-
dev, IOMMU_NO_PASID);
2362-
else if (domain->use_first_level)
2363-
ret = domain_setup_first_level(iommu, domain, dev,
2364-
IOMMU_NO_PASID);
2365-
else
2366-
ret = intel_pasid_setup_second_level(iommu, domain,
2367-
dev, IOMMU_NO_PASID);
2368-
if (ret) {
2369-
dev_err(dev, "Setup RID2PASID failed\n");
2370-
device_block_translation(dev);
2371-
return ret;
2372-
}
2373-
}
2297+
if (dev_is_real_dma_subdevice(dev))
2298+
return 0;
2299+
2300+
if (!sm_supported(iommu))
2301+
ret = domain_context_mapping(domain, dev);
2302+
else if (hw_pass_through && domain_type_is_si(domain))
2303+
ret = intel_pasid_setup_pass_through(iommu, dev, IOMMU_NO_PASID);
2304+
else if (domain->use_first_level)
2305+
ret = domain_setup_first_level(iommu, domain, dev, IOMMU_NO_PASID);
2306+
else
2307+
ret = intel_pasid_setup_second_level(iommu, domain, dev, IOMMU_NO_PASID);
23742308

2375-
ret = domain_context_mapping(domain, dev);
23762309
if (ret) {
2377-
dev_err(dev, "Domain context map failed\n");
23782310
device_block_translation(dev);
23792311
return ret;
23802312
}

0 commit comments

Comments
 (0)