Skip to content

Commit 2780025

Browse files
jpemartinsjgunthorpe
authored andcommitted
iommufd/iova_bitmap: Handle recording beyond the mapped pages
IOVA bitmap is a zero-copy scheme of recording dirty bits that iterate the different bitmap user pages at chunks of a maximum of PAGE_SIZE/sizeof(struct page*) pages. When the iterations are split up into 64G, the end of the range may be broken up in a way that's aligned with a non base page PTE size. This leads to only part of the huge page being recorded in the bitmap. Note that in pratice this is only a problem for IOMMU dirty tracking i.e. when the backing PTEs are in IOMMU hugepages and the bitmap is in base page granularity. So far this not something that affects VF dirty trackers (which reports and records at the same granularity). To fix that, if there is a remainder of bits left to set in which the current IOVA bitmap doesn't cover, make a copy of the bitmap structure and iterate-and-set the rest of the bits remaining. Finally, when advancing the iterator, skip all the bits that were set ahead. Link: https://lore.kernel.org/r/[email protected] Reported-by: Avihai Horon <[email protected]> Fixes: f35f22c ("iommu/vt-d: Access/Dirty bit support for SS domains") Fixes: 421a511 ("iommu/amd: Access/Dirty bit support in IOPTEs") Signed-off-by: Joao Martins <[email protected]> Tested-by: Avihai Horon <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
1 parent 42af951 commit 2780025

File tree

1 file changed

+43
-0
lines changed

1 file changed

+43
-0
lines changed

drivers/iommu/iommufd/iova_bitmap.c

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,9 @@ struct iova_bitmap {
113113

114114
/* length of the IOVA range for the whole bitmap */
115115
size_t length;
116+
117+
/* length of the IOVA range set ahead the pinned pages */
118+
unsigned long set_ahead_length;
116119
};
117120

118121
/*
@@ -341,6 +344,32 @@ static bool iova_bitmap_done(struct iova_bitmap *bitmap)
341344
return bitmap->mapped_base_index >= bitmap->mapped_total_index;
342345
}
343346

347+
static int iova_bitmap_set_ahead(struct iova_bitmap *bitmap,
348+
size_t set_ahead_length)
349+
{
350+
int ret = 0;
351+
352+
while (set_ahead_length > 0 && !iova_bitmap_done(bitmap)) {
353+
unsigned long length = iova_bitmap_mapped_length(bitmap);
354+
unsigned long iova = iova_bitmap_mapped_iova(bitmap);
355+
356+
ret = iova_bitmap_get(bitmap);
357+
if (ret)
358+
break;
359+
360+
length = min(length, set_ahead_length);
361+
iova_bitmap_set(bitmap, iova, length);
362+
363+
set_ahead_length -= length;
364+
bitmap->mapped_base_index +=
365+
iova_bitmap_offset_to_index(bitmap, length - 1) + 1;
366+
iova_bitmap_put(bitmap);
367+
}
368+
369+
bitmap->set_ahead_length = 0;
370+
return ret;
371+
}
372+
344373
/*
345374
* Advances to the next range, releases the current pinned
346375
* pages and pins the next set of bitmap pages.
@@ -357,6 +386,15 @@ static int iova_bitmap_advance(struct iova_bitmap *bitmap)
357386
if (iova_bitmap_done(bitmap))
358387
return 0;
359388

389+
/* Iterate, set and skip any bits requested for next iteration */
390+
if (bitmap->set_ahead_length) {
391+
int ret;
392+
393+
ret = iova_bitmap_set_ahead(bitmap, bitmap->set_ahead_length);
394+
if (ret)
395+
return ret;
396+
}
397+
360398
/* When advancing the index we pin the next set of bitmap pages */
361399
return iova_bitmap_get(bitmap);
362400
}
@@ -426,5 +464,10 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
426464
kunmap_local(kaddr);
427465
cur_bit += nbits;
428466
} while (cur_bit <= last_bit);
467+
468+
if (unlikely(cur_bit <= last_bit)) {
469+
bitmap->set_ahead_length =
470+
((last_bit - cur_bit + 1) << bitmap->mapped.pgshift);
471+
}
429472
}
430473
EXPORT_SYMBOL_NS_GPL(iova_bitmap_set, IOMMUFD);

0 commit comments

Comments
 (0)