Skip to content

Commit 6fc7020

Browse files
LuBaolujoergroedel
authored andcommitted
iommu/vt-d: Apply per-device dma_ops
Current Intel IOMMU driver sets the system level dma_ops. This causes each dma API to go through the IOMMU driver even the devices are using identity mapped domains. This sets per-device dma_ops only if a device is using a DMA domain. Otherwise, use the default system level dma_ops for direct dma. Signed-off-by: Lu Baolu <[email protected]> Tested-by: Daniel Drake <[email protected]> Reviewed-by: Jon Derrick <[email protected]> Reviewed-by: Jerry Snitselaar <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent 14b3526 commit 6fc7020

File tree

1 file changed

+26
-56
lines changed

1 file changed

+26
-56
lines changed

drivers/iommu/intel-iommu.c

Lines changed: 26 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -2720,17 +2720,6 @@ static int __init si_domain_init(int hw)
27202720
return 0;
27212721
}
27222722

2723-
static int identity_mapping(struct device *dev)
2724-
{
2725-
struct device_domain_info *info;
2726-
2727-
info = dev->archdata.iommu;
2728-
if (info)
2729-
return (info->domain == si_domain);
2730-
2731-
return 0;
2732-
}
2733-
27342723
static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
27352724
{
27362725
struct dmar_domain *ndomain;
@@ -3315,18 +3304,6 @@ static unsigned long intel_alloc_iova(struct device *dev,
33153304
return iova_pfn;
33163305
}
33173306

3318-
/* Check if the dev needs to go through non-identity map and unmap process.*/
3319-
static bool iommu_need_mapping(struct device *dev)
3320-
{
3321-
if (iommu_dummy(dev))
3322-
return false;
3323-
3324-
if (unlikely(attach_deferred(dev)))
3325-
do_deferred_attach(dev);
3326-
3327-
return !identity_mapping(dev);
3328-
}
3329-
33303307
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
33313308
size_t size, int dir, u64 dma_mask)
33323309
{
@@ -3340,6 +3317,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
33403317

33413318
BUG_ON(dir == DMA_NONE);
33423319

3320+
if (unlikely(attach_deferred(dev)))
3321+
do_deferred_attach(dev);
3322+
33433323
domain = find_domain(dev);
33443324
if (!domain)
33453325
return DMA_MAPPING_ERROR;
@@ -3391,20 +3371,15 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
33913371
enum dma_data_direction dir,
33923372
unsigned long attrs)
33933373
{
3394-
if (iommu_need_mapping(dev))
3395-
return __intel_map_single(dev, page_to_phys(page) + offset,
3396-
size, dir, *dev->dma_mask);
3397-
return dma_direct_map_page(dev, page, offset, size, dir, attrs);
3374+
return __intel_map_single(dev, page_to_phys(page) + offset,
3375+
size, dir, *dev->dma_mask);
33983376
}
33993377

34003378
static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
34013379
size_t size, enum dma_data_direction dir,
34023380
unsigned long attrs)
34033381
{
3404-
if (iommu_need_mapping(dev))
3405-
return __intel_map_single(dev, phys_addr, size, dir,
3406-
*dev->dma_mask);
3407-
return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
3382+
return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
34083383
}
34093384

34103385
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
@@ -3455,17 +3430,13 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
34553430
size_t size, enum dma_data_direction dir,
34563431
unsigned long attrs)
34573432
{
3458-
if (iommu_need_mapping(dev))
3459-
intel_unmap(dev, dev_addr, size);
3460-
else
3461-
dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
3433+
intel_unmap(dev, dev_addr, size);
34623434
}
34633435

34643436
static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
34653437
size_t size, enum dma_data_direction dir, unsigned long attrs)
34663438
{
3467-
if (iommu_need_mapping(dev))
3468-
intel_unmap(dev, dev_addr, size);
3439+
intel_unmap(dev, dev_addr, size);
34693440
}
34703441

34713442
static void *intel_alloc_coherent(struct device *dev, size_t size,
@@ -3475,8 +3446,8 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
34753446
struct page *page = NULL;
34763447
int order;
34773448

3478-
if (!iommu_need_mapping(dev))
3479-
return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
3449+
if (unlikely(attach_deferred(dev)))
3450+
do_deferred_attach(dev);
34803451

34813452
size = PAGE_ALIGN(size);
34823453
order = get_order(size);
@@ -3511,9 +3482,6 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
35113482
int order;
35123483
struct page *page = virt_to_page(vaddr);
35133484

3514-
if (!iommu_need_mapping(dev))
3515-
return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
3516-
35173485
size = PAGE_ALIGN(size);
35183486
order = get_order(size);
35193487

@@ -3531,9 +3499,6 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
35313499
struct scatterlist *sg;
35323500
int i;
35333501

3534-
if (!iommu_need_mapping(dev))
3535-
return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
3536-
35373502
for_each_sg(sglist, sg, nelems, i) {
35383503
nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
35393504
}
@@ -3557,8 +3522,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
35573522
struct intel_iommu *iommu;
35583523

35593524
BUG_ON(dir == DMA_NONE);
3560-
if (!iommu_need_mapping(dev))
3561-
return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
3525+
3526+
if (unlikely(attach_deferred(dev)))
3527+
do_deferred_attach(dev);
35623528

35633529
domain = find_domain(dev);
35643530
if (!domain)
@@ -3605,8 +3571,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
36053571

36063572
static u64 intel_get_required_mask(struct device *dev)
36073573
{
3608-
if (!iommu_need_mapping(dev))
3609-
return dma_direct_get_required_mask(dev);
36103574
return DMA_BIT_MASK(32);
36113575
}
36123576

@@ -4888,8 +4852,6 @@ int __init intel_iommu_init(void)
48884852
}
48894853
up_write(&dmar_global_lock);
48904854

4891-
dma_ops = &intel_dma_ops;
4892-
48934855
init_iommu_pm_ops();
48944856

48954857
down_read(&dmar_global_lock);
@@ -5479,11 +5441,6 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
54795441
if (translation_pre_enabled(iommu))
54805442
dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO;
54815443

5482-
if (device_needs_bounce(dev)) {
5483-
dev_info(dev, "Use Intel IOMMU bounce page dma_ops\n");
5484-
set_dma_ops(dev, &bounce_dma_ops);
5485-
}
5486-
54875444
return &iommu->iommu;
54885445
}
54895446

@@ -5498,7 +5455,19 @@ static void intel_iommu_release_device(struct device *dev)
54985455

54995456
dmar_remove_one_dev_info(dev);
55005457

5458+
set_dma_ops(dev, NULL);
5459+
}
5460+
5461+
static void intel_iommu_probe_finalize(struct device *dev)
5462+
{
5463+
struct iommu_domain *domain;
5464+
5465+
domain = iommu_get_domain_for_dev(dev);
55015466
if (device_needs_bounce(dev))
5467+
set_dma_ops(dev, &bounce_dma_ops);
5468+
else if (domain && domain->type == IOMMU_DOMAIN_DMA)
5469+
set_dma_ops(dev, &intel_dma_ops);
5470+
else
55025471
set_dma_ops(dev, NULL);
55035472
}
55045473

@@ -5830,6 +5799,7 @@ const struct iommu_ops intel_iommu_ops = {
58305799
.unmap = intel_iommu_unmap,
58315800
.iova_to_phys = intel_iommu_iova_to_phys,
58325801
.probe_device = intel_iommu_probe_device,
5802+
.probe_finalize = intel_iommu_probe_finalize,
58335803
.release_device = intel_iommu_release_device,
58345804
.get_resv_regions = intel_iommu_get_resv_regions,
58355805
.put_resv_regions = generic_iommu_put_resv_regions,

0 commit comments

Comments
 (0)