Skip to content

Commit dcde1c4

Browse files
rmurphy-armjoergroedel
authored andcommitted
iommu/rockchip: Retire global dma_dev workaround
The global dma_dev trick was mostly because the old domain_alloc op provided no context, so no way to know which IOMMU was to own the pagetable, or if a suitable one even existed at all. In the new multi-instance world with domain_alloc_paging this is no longer a concern - now we know that the given device must be associated with a valid IOMMU instance which provided the op to call in the first place, and therefore that instance can and should be the pagetable owner. To avoid worrying about the lifetime and stability of the rk_domain->iommus list, and keep the lookups simple and efficient, we'll still stash a dma_dev pointer, but now it's accurately per-domain. Signed-off-by: Robin Murphy <[email protected]> Tested-by: Quentin Schulz <[email protected]> Tested-by: Dang Huynh <[email protected]> Reviewed-by: Nicolas Frattaroli <[email protected]> Tested-by: Nicolas Frattaroli <[email protected]> Link: https://lore.kernel.org/r/25dc948a7d35c8142c5719ac22bc523f8524d006.1741886382.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel <[email protected]>
1 parent f90aa59 commit dcde1c4

File tree

1 file changed

+13
-21
lines changed

1 file changed

+13
-21
lines changed

drivers/iommu/rockchip-iommu.c

Lines changed: 13 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,7 @@ struct rk_iommu_domain {
8888
dma_addr_t dt_dma;
8989
spinlock_t iommus_lock; /* lock for iommus list */
9090
spinlock_t dt_lock; /* lock for modifying page directory table */
91+
struct device *dma_dev;
9192

9293
struct iommu_domain domain;
9394
};
@@ -123,7 +124,6 @@ struct rk_iommudata {
123124
struct rk_iommu *iommu;
124125
};
125126

126-
static struct device *dma_dev;
127127
static const struct rk_iommu_ops *rk_ops;
128128
static struct iommu_domain rk_identity_domain;
129129

@@ -132,7 +132,7 @@ static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
132132
{
133133
size_t size = count * sizeof(u32); /* count of u32 entry */
134134

135-
dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
135+
dma_sync_single_for_device(dom->dma_dev, dma, size, DMA_TO_DEVICE);
136136
}
137137

138138
static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
@@ -734,9 +734,9 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
734734
if (!page_table)
735735
return ERR_PTR(-ENOMEM);
736736

737-
pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
738-
if (dma_mapping_error(dma_dev, pt_dma)) {
739-
dev_err(dma_dev, "DMA mapping error while allocating page table\n");
737+
pt_dma = dma_map_single(rk_domain->dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
738+
if (dma_mapping_error(rk_domain->dma_dev, pt_dma)) {
739+
dev_err(rk_domain->dma_dev, "DMA mapping error while allocating page table\n");
740740
iommu_free_page(page_table);
741741
return ERR_PTR(-ENOMEM);
742742
}
@@ -1051,9 +1051,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
10511051
static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
10521052
{
10531053
struct rk_iommu_domain *rk_domain;
1054-
1055-
if (!dma_dev)
1056-
return NULL;
1054+
struct rk_iommu *iommu;
10571055

10581056
rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
10591057
if (!rk_domain)
@@ -1068,10 +1066,12 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
10681066
if (!rk_domain->dt)
10691067
goto err_free_domain;
10701068

1071-
rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
1069+
iommu = rk_iommu_from_dev(dev);
1070+
rk_domain->dma_dev = iommu->dev;
1071+
rk_domain->dt_dma = dma_map_single(rk_domain->dma_dev, rk_domain->dt,
10721072
SPAGE_SIZE, DMA_TO_DEVICE);
1073-
if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
1074-
dev_err(dma_dev, "DMA map error for DT\n");
1073+
if (dma_mapping_error(rk_domain->dma_dev, rk_domain->dt_dma)) {
1074+
dev_err(rk_domain->dma_dev, "DMA map error for DT\n");
10751075
goto err_free_dt;
10761076
}
10771077

@@ -1105,13 +1105,13 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
11051105
if (rk_dte_is_pt_valid(dte)) {
11061106
phys_addr_t pt_phys = rk_ops->pt_address(dte);
11071107
u32 *page_table = phys_to_virt(pt_phys);
1108-
dma_unmap_single(dma_dev, pt_phys,
1108+
dma_unmap_single(rk_domain->dma_dev, pt_phys,
11091109
SPAGE_SIZE, DMA_TO_DEVICE);
11101110
iommu_free_page(page_table);
11111111
}
11121112
}
11131113

1114-
dma_unmap_single(dma_dev, rk_domain->dt_dma,
1114+
dma_unmap_single(rk_domain->dma_dev, rk_domain->dt_dma,
11151115
SPAGE_SIZE, DMA_TO_DEVICE);
11161116
iommu_free_page(rk_domain->dt);
11171117

@@ -1256,14 +1256,6 @@ static int rk_iommu_probe(struct platform_device *pdev)
12561256
if (err)
12571257
return err;
12581258

1259-
/*
1260-
* Use the first registered IOMMU device for domain to use with DMA
1261-
* API, since a domain might not physically correspond to a single
1262-
* IOMMU device..
1263-
*/
1264-
if (!dma_dev)
1265-
dma_dev = &pdev->dev;
1266-
12671259
pm_runtime_enable(dev);
12681260

12691261
for (i = 0; i < iommu->num_irq; i++) {

0 commit comments

Comments
 (0)