@@ -2720,17 +2720,6 @@ static int __init si_domain_init(int hw)
2720
2720
return 0 ;
2721
2721
}
2722
2722
2723
- static int identity_mapping (struct device * dev )
2724
- {
2725
- struct device_domain_info * info ;
2726
-
2727
- info = dev -> archdata .iommu ;
2728
- if (info )
2729
- return (info -> domain == si_domain );
2730
-
2731
- return 0 ;
2732
- }
2733
-
2734
2723
static int domain_add_dev_info (struct dmar_domain * domain , struct device * dev )
2735
2724
{
2736
2725
struct dmar_domain * ndomain ;
@@ -3315,18 +3304,6 @@ static unsigned long intel_alloc_iova(struct device *dev,
3315
3304
return iova_pfn ;
3316
3305
}
3317
3306
3318
- /* Check if the dev needs to go through non-identity map and unmap process.*/
3319
- static bool iommu_need_mapping (struct device * dev )
3320
- {
3321
- if (iommu_dummy (dev ))
3322
- return false;
3323
-
3324
- if (unlikely (attach_deferred (dev )))
3325
- do_deferred_attach (dev );
3326
-
3327
- return !identity_mapping (dev );
3328
- }
3329
-
3330
3307
static dma_addr_t __intel_map_single (struct device * dev , phys_addr_t paddr ,
3331
3308
size_t size , int dir , u64 dma_mask )
3332
3309
{
@@ -3340,6 +3317,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3340
3317
3341
3318
BUG_ON (dir == DMA_NONE );
3342
3319
3320
+ if (unlikely (attach_deferred (dev )))
3321
+ do_deferred_attach (dev );
3322
+
3343
3323
domain = find_domain (dev );
3344
3324
if (!domain )
3345
3325
return DMA_MAPPING_ERROR ;
@@ -3391,20 +3371,15 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3391
3371
enum dma_data_direction dir ,
3392
3372
unsigned long attrs )
3393
3373
{
3394
- if (iommu_need_mapping (dev ))
3395
- return __intel_map_single (dev , page_to_phys (page ) + offset ,
3396
- size , dir , * dev -> dma_mask );
3397
- return dma_direct_map_page (dev , page , offset , size , dir , attrs );
3374
+ return __intel_map_single (dev , page_to_phys (page ) + offset ,
3375
+ size , dir , * dev -> dma_mask );
3398
3376
}
3399
3377
3400
3378
static dma_addr_t intel_map_resource (struct device * dev , phys_addr_t phys_addr ,
3401
3379
size_t size , enum dma_data_direction dir ,
3402
3380
unsigned long attrs )
3403
3381
{
3404
- if (iommu_need_mapping (dev ))
3405
- return __intel_map_single (dev , phys_addr , size , dir ,
3406
- * dev -> dma_mask );
3407
- return dma_direct_map_resource (dev , phys_addr , size , dir , attrs );
3382
+ return __intel_map_single (dev , phys_addr , size , dir , * dev -> dma_mask );
3408
3383
}
3409
3384
3410
3385
static void intel_unmap (struct device * dev , dma_addr_t dev_addr , size_t size )
@@ -3455,17 +3430,13 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3455
3430
size_t size , enum dma_data_direction dir ,
3456
3431
unsigned long attrs )
3457
3432
{
3458
- if (iommu_need_mapping (dev ))
3459
- intel_unmap (dev , dev_addr , size );
3460
- else
3461
- dma_direct_unmap_page (dev , dev_addr , size , dir , attrs );
3433
+ intel_unmap (dev , dev_addr , size );
3462
3434
}
3463
3435
3464
3436
static void intel_unmap_resource (struct device * dev , dma_addr_t dev_addr ,
3465
3437
size_t size , enum dma_data_direction dir , unsigned long attrs )
3466
3438
{
3467
- if (iommu_need_mapping (dev ))
3468
- intel_unmap (dev , dev_addr , size );
3439
+ intel_unmap (dev , dev_addr , size );
3469
3440
}
3470
3441
3471
3442
static void * intel_alloc_coherent (struct device * dev , size_t size ,
@@ -3475,8 +3446,8 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
3475
3446
struct page * page = NULL ;
3476
3447
int order ;
3477
3448
3478
- if (! iommu_need_mapping ( dev ))
3479
- return dma_direct_alloc (dev , size , dma_handle , flags , attrs );
3449
+ if (unlikely ( attach_deferred ( dev ) ))
3450
+ do_deferred_attach (dev );
3480
3451
3481
3452
size = PAGE_ALIGN (size );
3482
3453
order = get_order (size );
@@ -3511,9 +3482,6 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3511
3482
int order ;
3512
3483
struct page * page = virt_to_page (vaddr );
3513
3484
3514
- if (!iommu_need_mapping (dev ))
3515
- return dma_direct_free (dev , size , vaddr , dma_handle , attrs );
3516
-
3517
3485
size = PAGE_ALIGN (size );
3518
3486
order = get_order (size );
3519
3487
@@ -3531,9 +3499,6 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3531
3499
struct scatterlist * sg ;
3532
3500
int i ;
3533
3501
3534
- if (!iommu_need_mapping (dev ))
3535
- return dma_direct_unmap_sg (dev , sglist , nelems , dir , attrs );
3536
-
3537
3502
for_each_sg (sglist , sg , nelems , i ) {
3538
3503
nrpages += aligned_nrpages (sg_dma_address (sg ), sg_dma_len (sg ));
3539
3504
}
@@ -3557,8 +3522,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
3557
3522
struct intel_iommu * iommu ;
3558
3523
3559
3524
BUG_ON (dir == DMA_NONE );
3560
- if (!iommu_need_mapping (dev ))
3561
- return dma_direct_map_sg (dev , sglist , nelems , dir , attrs );
3525
+
3526
+ if (unlikely (attach_deferred (dev )))
3527
+ do_deferred_attach (dev );
3562
3528
3563
3529
domain = find_domain (dev );
3564
3530
if (!domain )
@@ -3605,8 +3571,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
3605
3571
3606
3572
static u64 intel_get_required_mask (struct device * dev )
3607
3573
{
3608
- if (!iommu_need_mapping (dev ))
3609
- return dma_direct_get_required_mask (dev );
3610
3574
return DMA_BIT_MASK (32 );
3611
3575
}
3612
3576
@@ -4888,8 +4852,6 @@ int __init intel_iommu_init(void)
4888
4852
}
4889
4853
up_write (& dmar_global_lock );
4890
4854
4891
- dma_ops = & intel_dma_ops ;
4892
-
4893
4855
init_iommu_pm_ops ();
4894
4856
4895
4857
down_read (& dmar_global_lock );
@@ -5479,11 +5441,6 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
5479
5441
if (translation_pre_enabled (iommu ))
5480
5442
dev -> archdata .iommu = DEFER_DEVICE_DOMAIN_INFO ;
5481
5443
5482
- if (device_needs_bounce (dev )) {
5483
- dev_info (dev , "Use Intel IOMMU bounce page dma_ops\n" );
5484
- set_dma_ops (dev , & bounce_dma_ops );
5485
- }
5486
-
5487
5444
return & iommu -> iommu ;
5488
5445
}
5489
5446
@@ -5498,7 +5455,19 @@ static void intel_iommu_release_device(struct device *dev)
5498
5455
5499
5456
dmar_remove_one_dev_info (dev );
5500
5457
5458
+ set_dma_ops (dev , NULL );
5459
+ }
5460
+
5461
+ static void intel_iommu_probe_finalize (struct device * dev )
5462
+ {
5463
+ struct iommu_domain * domain ;
5464
+
5465
+ domain = iommu_get_domain_for_dev (dev );
5501
5466
if (device_needs_bounce (dev ))
5467
+ set_dma_ops (dev , & bounce_dma_ops );
5468
+ else if (domain && domain -> type == IOMMU_DOMAIN_DMA )
5469
+ set_dma_ops (dev , & intel_dma_ops );
5470
+ else
5502
5471
set_dma_ops (dev , NULL );
5503
5472
}
5504
5473
@@ -5830,6 +5799,7 @@ const struct iommu_ops intel_iommu_ops = {
5830
5799
.unmap = intel_iommu_unmap ,
5831
5800
.iova_to_phys = intel_iommu_iova_to_phys ,
5832
5801
.probe_device = intel_iommu_probe_device ,
5802
+ .probe_finalize = intel_iommu_probe_finalize ,
5833
5803
.release_device = intel_iommu_release_device ,
5834
5804
.get_resv_regions = intel_iommu_get_resv_regions ,
5835
5805
.put_resv_regions = generic_iommu_put_resv_regions ,
0 commit comments