Skip to content

Commit fb5f50a

Browse files
YanfeiXujoergroedel
authored andcommitted
iommu/vt-d: Fix to convert mm pfn to dma pfn
For the case that VT-d page is smaller than mm page, converting dma pfn should be handled in two cases which are for start pfn and for end pfn. Currently the calculation of end dma pfn is incorrect and the result is less than real page frame number which is causing the mapping of iova always misses some page frames. Rename the mm_to_dma_pfn() to mm_to_dma_pfn_start() and add a new helper for converting end dma pfn named mm_to_dma_pfn_end(). Signed-off-by: Yanfei Xu <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Lu Baolu <[email protected]> Signed-off-by: Joerg Roedel <[email protected]>
1 parent 8a3b8e6 commit fb5f50a

File tree

1 file changed

+13
-9
lines changed

1 file changed

+13
-9
lines changed

drivers/iommu/intel/iommu.c

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -113,13 +113,17 @@ static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
113113

114114
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
115115
are never going to work. */
116-
static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
116+
static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
117117
{
118118
return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
119119
}
120+
static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
121+
{
122+
return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1;
123+
}
120124
static inline unsigned long page_to_dma_pfn(struct page *pg)
121125
{
122-
return mm_to_dma_pfn(page_to_pfn(pg));
126+
return mm_to_dma_pfn_start(page_to_pfn(pg));
123127
}
124128
static inline unsigned long virt_to_dma_pfn(void *p)
125129
{
@@ -2403,8 +2407,8 @@ static int __init si_domain_init(int hw)
24032407

24042408
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
24052409
ret = iommu_domain_identity_map(si_domain,
2406-
mm_to_dma_pfn(start_pfn),
2407-
mm_to_dma_pfn(end_pfn));
2410+
mm_to_dma_pfn_start(start_pfn),
2411+
mm_to_dma_pfn_end(end_pfn));
24082412
if (ret)
24092413
return ret;
24102414
}
@@ -2425,8 +2429,8 @@ static int __init si_domain_init(int hw)
24252429
continue;
24262430

24272431
ret = iommu_domain_identity_map(si_domain,
2428-
mm_to_dma_pfn(start >> PAGE_SHIFT),
2429-
mm_to_dma_pfn(end >> PAGE_SHIFT));
2432+
mm_to_dma_pfn_start(start >> PAGE_SHIFT),
2433+
mm_to_dma_pfn_end(end >> PAGE_SHIFT));
24302434
if (ret)
24312435
return ret;
24322436
}
@@ -3549,8 +3553,8 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
35493553
unsigned long val, void *v)
35503554
{
35513555
struct memory_notify *mhp = v;
3552-
unsigned long start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
3553-
unsigned long last_vpfn = mm_to_dma_pfn(mhp->start_pfn +
3556+
unsigned long start_vpfn = mm_to_dma_pfn_start(mhp->start_pfn);
3557+
unsigned long last_vpfn = mm_to_dma_pfn_end(mhp->start_pfn +
35543558
mhp->nr_pages - 1);
35553559

35563560
switch (val) {
@@ -4254,7 +4258,7 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
42544258
unsigned long i;
42554259

42564260
nrpages = aligned_nrpages(gather->start, size);
4257-
start_pfn = mm_to_dma_pfn(iova_pfn);
4261+
start_pfn = mm_to_dma_pfn_start(iova_pfn);
42584262

42594263
xa_for_each(&dmar_domain->iommu_array, i, info)
42604264
iommu_flush_iotlb_psi(info->iommu, dmar_domain,

0 commit comments

Comments
 (0)