@@ -2523,22 +2523,18 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
2523
2523
u32 flags , int pgtable )
2524
2524
{
2525
2525
bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING ;
2526
+ struct amd_iommu * iommu = get_amd_iommu_from_dev (dev );
2526
2527
struct protection_domain * domain ;
2527
- struct amd_iommu * iommu = NULL ;
2528
2528
int ret ;
2529
2529
2530
- if (dev )
2531
- iommu = get_amd_iommu_from_dev (dev );
2532
-
2533
2530
/*
2534
2531
* Since DTE[Mode]=0 is prohibited on SNP-enabled system,
2535
2532
* default to use IOMMU_DOMAIN_DMA[_FQ].
2536
2533
*/
2537
2534
if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY ))
2538
2535
return ERR_PTR (- EINVAL );
2539
2536
2540
- domain = protection_domain_alloc (type ,
2541
- dev ? dev_to_node (dev ) : NUMA_NO_NODE );
2537
+ domain = protection_domain_alloc (type , dev_to_node (dev ));
2542
2538
if (!domain )
2543
2539
return ERR_PTR (- ENOMEM );
2544
2540
@@ -2554,13 +2550,11 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
2554
2550
domain -> domain .geometry .force_aperture = true;
2555
2551
domain -> domain .pgsize_bitmap = domain -> iop .pgtbl .cfg .pgsize_bitmap ;
2556
2552
2557
- if (iommu ) {
2558
- domain -> domain .type = type ;
2559
- domain -> domain .ops = iommu -> iommu .ops -> default_domain_ops ;
2553
+ domain -> domain .type = type ;
2554
+ domain -> domain .ops = iommu -> iommu .ops -> default_domain_ops ;
2560
2555
2561
- if (dirty_tracking )
2562
- domain -> domain .dirty_ops = & amd_dirty_ops ;
2563
- }
2556
+ if (dirty_tracking )
2557
+ domain -> domain .dirty_ops = & amd_dirty_ops ;
2564
2558
2565
2559
return & domain -> domain ;
2566
2560
}
@@ -2571,13 +2565,10 @@ amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
2571
2565
2572
2566
{
2573
2567
unsigned int type = IOMMU_DOMAIN_UNMANAGED ;
2574
- struct amd_iommu * iommu = NULL ;
2568
+ struct amd_iommu * iommu = get_amd_iommu_from_dev ( dev ) ;
2575
2569
const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
2576
2570
IOMMU_HWPT_ALLOC_PASID ;
2577
2571
2578
- if (dev )
2579
- iommu = get_amd_iommu_from_dev (dev );
2580
-
2581
2572
if ((flags & ~supported_flags ) || user_data )
2582
2573
return ERR_PTR (- EOPNOTSUPP );
2583
2574
@@ -2591,10 +2582,9 @@ amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
2591
2582
2592
2583
/* Allocate domain with v1 page table for dirty tracking */
2593
2584
if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING ) {
2594
- if (iommu && amd_iommu_hd_support (iommu )) {
2595
- return do_iommu_domain_alloc (type , dev ,
2596
- flags , AMD_IOMMU_V1 );
2597
- }
2585
+ if (amd_iommu_hd_support (iommu ))
2586
+ return do_iommu_domain_alloc (type , dev , flags ,
2587
+ AMD_IOMMU_V1 );
2598
2588
2599
2589
return ERR_PTR (- EOPNOTSUPP );
2600
2590
}
0 commit comments