@@ -1892,11 +1892,6 @@ static int dmar_init_reserved_ranges(void)
1892
1892
return 0 ;
1893
1893
}
1894
1894
1895
- static void domain_reserve_special_ranges (struct dmar_domain * domain )
1896
- {
1897
- copy_reserved_iova (& reserved_iova_list , & domain -> iovad );
1898
- }
1899
-
1900
1895
static inline int guestwidth_to_adjustwidth (int gaw )
1901
1896
{
1902
1897
int agaw ;
@@ -1918,7 +1913,8 @@ static void domain_exit(struct dmar_domain *domain)
1918
1913
domain_remove_dev_info (domain );
1919
1914
1920
1915
/* destroy iovas */
1921
- put_iova_domain (& domain -> iovad );
1916
+ if (domain -> domain .type == IOMMU_DOMAIN_DMA )
1917
+ put_iova_domain (& domain -> iovad );
1922
1918
1923
1919
if (domain -> pgd ) {
1924
1920
struct page * freelist ;
@@ -2627,19 +2623,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2627
2623
}
2628
2624
2629
2625
static int iommu_domain_identity_map (struct dmar_domain * domain ,
2630
- unsigned long long start ,
2631
- unsigned long long end )
2626
+ unsigned long first_vpfn ,
2627
+ unsigned long last_vpfn )
2632
2628
{
2633
- unsigned long first_vpfn = start >> VTD_PAGE_SHIFT ;
2634
- unsigned long last_vpfn = end >> VTD_PAGE_SHIFT ;
2635
-
2636
- if (!reserve_iova (& domain -> iovad , dma_to_mm_pfn (first_vpfn ),
2637
- dma_to_mm_pfn (last_vpfn ))) {
2638
- pr_err ("Reserving iova failed\n" );
2639
- return - ENOMEM ;
2640
- }
2641
-
2642
- pr_debug ("Mapping reserved region %llx-%llx\n" , start , end );
2643
2629
/*
2644
2630
* RMRR range might have overlap with physical memory range,
2645
2631
* clear it first
@@ -2677,7 +2663,8 @@ static int __init si_domain_init(int hw)
2677
2663
2678
2664
for_each_mem_pfn_range (i , nid , & start_pfn , & end_pfn , NULL ) {
2679
2665
ret = iommu_domain_identity_map (si_domain ,
2680
- PFN_PHYS (start_pfn ), PFN_PHYS (end_pfn ));
2666
+ mm_to_dma_pfn (start_pfn ),
2667
+ mm_to_dma_pfn (end_pfn ));
2681
2668
if (ret )
2682
2669
return ret ;
2683
2670
}
@@ -4547,58 +4534,37 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
4547
4534
unsigned long val , void * v )
4548
4535
{
4549
4536
struct memory_notify * mhp = v ;
4550
- unsigned long long start , end ;
4551
- unsigned long start_vpfn , last_vpfn ;
4537
+ unsigned long start_vpfn = mm_to_dma_pfn (mhp -> start_pfn );
4538
+ unsigned long last_vpfn = mm_to_dma_pfn (mhp -> start_pfn +
4539
+ mhp -> nr_pages - 1 );
4552
4540
4553
4541
switch (val ) {
4554
4542
case MEM_GOING_ONLINE :
4555
- start = mhp -> start_pfn << PAGE_SHIFT ;
4556
- end = ((mhp -> start_pfn + mhp -> nr_pages ) << PAGE_SHIFT ) - 1 ;
4557
- if (iommu_domain_identity_map (si_domain , start , end )) {
4558
- pr_warn ("Failed to build identity map for [%llx-%llx]\n" ,
4559
- start , end );
4543
+ if (iommu_domain_identity_map (si_domain ,
4544
+ start_vpfn , last_vpfn )) {
4545
+ pr_warn ("Failed to build identity map for [%lx-%lx]\n" ,
4546
+ start_vpfn , last_vpfn );
4560
4547
return NOTIFY_BAD ;
4561
4548
}
4562
4549
break ;
4563
4550
4564
4551
case MEM_OFFLINE :
4565
4552
case MEM_CANCEL_ONLINE :
4566
- start_vpfn = mm_to_dma_pfn (mhp -> start_pfn );
4567
- last_vpfn = mm_to_dma_pfn (mhp -> start_pfn + mhp -> nr_pages - 1 );
4568
- while (start_vpfn <= last_vpfn ) {
4569
- struct iova * iova ;
4553
+ {
4570
4554
struct dmar_drhd_unit * drhd ;
4571
4555
struct intel_iommu * iommu ;
4572
4556
struct page * freelist ;
4573
4557
4574
- iova = find_iova (& si_domain -> iovad , start_vpfn );
4575
- if (iova == NULL ) {
4576
- pr_debug ("Failed get IOVA for PFN %lx\n" ,
4577
- start_vpfn );
4578
- break ;
4579
- }
4580
-
4581
- iova = split_and_remove_iova (& si_domain -> iovad , iova ,
4582
- start_vpfn , last_vpfn );
4583
- if (iova == NULL ) {
4584
- pr_warn ("Failed to split IOVA PFN [%lx-%lx]\n" ,
4585
- start_vpfn , last_vpfn );
4586
- return NOTIFY_BAD ;
4587
- }
4588
-
4589
- freelist = domain_unmap (si_domain , iova -> pfn_lo ,
4590
- iova -> pfn_hi );
4558
+ freelist = domain_unmap (si_domain ,
4559
+ start_vpfn , last_vpfn );
4591
4560
4592
4561
rcu_read_lock ();
4593
4562
for_each_active_iommu (iommu , drhd )
4594
4563
iommu_flush_iotlb_psi (iommu , si_domain ,
4595
- iova -> pfn_lo , iova_size ( iova ) ,
4564
+ start_vpfn , mhp -> nr_pages ,
4596
4565
!freelist , 0 );
4597
4566
rcu_read_unlock ();
4598
4567
dma_free_pagelist (freelist );
4599
-
4600
- start_vpfn = iova -> pfn_hi + 1 ;
4601
- free_iova_mem (iova );
4602
4568
}
4603
4569
break ;
4604
4570
}
@@ -4626,8 +4592,9 @@ static void free_all_cpu_cached_iovas(unsigned int cpu)
4626
4592
for (did = 0 ; did < cap_ndoms (iommu -> cap ); did ++ ) {
4627
4593
domain = get_iommu_domain (iommu , (u16 )did );
4628
4594
4629
- if (!domain )
4595
+ if (!domain || domain -> domain . type != IOMMU_DOMAIN_DMA )
4630
4596
continue ;
4597
+
4631
4598
free_cpu_cached_iovas (cpu , & domain -> iovad );
4632
4599
}
4633
4600
}
@@ -5037,9 +5004,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
5037
5004
{
5038
5005
int adjust_width ;
5039
5006
5040
- init_iova_domain (& domain -> iovad , VTD_PAGE_SIZE , IOVA_START_PFN );
5041
- domain_reserve_special_ranges (domain );
5042
-
5043
5007
/* calculate AGAW */
5044
5008
domain -> gaw = guest_width ;
5045
5009
adjust_width = guestwidth_to_adjustwidth (guest_width );
@@ -5058,11 +5022,21 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
5058
5022
return 0 ;
5059
5023
}
5060
5024
5025
+ static void intel_init_iova_domain (struct dmar_domain * dmar_domain )
5026
+ {
5027
+ init_iova_domain (& dmar_domain -> iovad , VTD_PAGE_SIZE , IOVA_START_PFN );
5028
+ copy_reserved_iova (& reserved_iova_list , & dmar_domain -> iovad );
5029
+
5030
+ if (!intel_iommu_strict &&
5031
+ init_iova_flush_queue (& dmar_domain -> iovad ,
5032
+ iommu_flush_iova , iova_entry_free ))
5033
+ pr_info ("iova flush queue initialization failed\n" );
5034
+ }
5035
+
5061
5036
static struct iommu_domain * intel_iommu_domain_alloc (unsigned type )
5062
5037
{
5063
5038
struct dmar_domain * dmar_domain ;
5064
5039
struct iommu_domain * domain ;
5065
- int ret ;
5066
5040
5067
5041
switch (type ) {
5068
5042
case IOMMU_DOMAIN_DMA :
@@ -5079,13 +5053,8 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
5079
5053
return NULL ;
5080
5054
}
5081
5055
5082
- if (!intel_iommu_strict && type == IOMMU_DOMAIN_DMA ) {
5083
- ret = init_iova_flush_queue (& dmar_domain -> iovad ,
5084
- iommu_flush_iova ,
5085
- iova_entry_free );
5086
- if (ret )
5087
- pr_info ("iova flush queue initialization failed\n" );
5088
- }
5056
+ if (type == IOMMU_DOMAIN_DMA )
5057
+ intel_init_iova_domain (dmar_domain );
5089
5058
5090
5059
domain_update_iommu_cap (dmar_domain );
5091
5060
0 commit comments