Skip to content

Commit 18f06e9

Browse files
committed
KVM: Add helpers to consolidate gfn_to_pfn_cache's page split check
Add a helper to check that the incoming length for a gfn_to_pfn_cache is valid with respect to the cache's GPA and/or HVA. To avoid activating a cache with a bogus GPA, a future fix will fork the page split check in the inner refresh path into activate() and the public rerfresh() APIs, at which point KVM will check the length in three separate places. Deliberately keep the "page offset" logic open coded, as the only other path that consumes the offset, __kvm_gpc_refresh(), already needs to differentiate between GPA-based and HVA-based caches, and it's not obvious that using a helper is a net positive in overall code readability. Note, for GPA-based caches, this has a subtle side effect of using the GPA instead of the resolved HVA in the check() path, but that should be a nop as the HVA offset is derived from the GPA, i.e. the two offsets are identical, barring a KVM bug. Reviewed-by: Paul Durrant <[email protected]> Reviewed-by: David Woodhouse <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent fec50db commit 18f06e9

File tree

1 file changed

+19
-8
lines changed

1 file changed

+19
-8
lines changed

virt/kvm/pfncache.c

Lines changed: 19 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,19 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
5757
spin_unlock(&kvm->gpc_lock);
5858
}
5959

60+
static bool kvm_gpc_is_valid_len(gpa_t gpa, unsigned long uhva,
61+
unsigned long len)
62+
{
63+
unsigned long offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) :
64+
offset_in_page(gpa);
65+
66+
/*
67+
* The cached access must fit within a single page. The 'len' argument
68+
* to activate() and refresh() exists only to enforce that.
69+
*/
70+
return offset + len <= PAGE_SIZE;
71+
}
72+
6073
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
6174
{
6275
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
@@ -74,7 +87,7 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
7487
if (kvm_is_error_hva(gpc->uhva))
7588
return false;
7689

77-
if (offset_in_page(gpc->uhva) + len > PAGE_SIZE)
90+
if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len))
7891
return false;
7992

8093
if (!gpc->valid)
@@ -247,13 +260,7 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l
247260
if (WARN_ON_ONCE(kvm_is_error_gpa(gpa) == kvm_is_error_hva(uhva)))
248261
return -EINVAL;
249262

250-
/*
251-
* The cached acces must fit within a single page. The 'len' argument
252-
* exists only to enforce that.
253-
*/
254-
page_offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) :
255-
offset_in_page(gpa);
256-
if (page_offset + len > PAGE_SIZE)
263+
if (!kvm_gpc_is_valid_len(gpa, uhva, len))
257264
return -EINVAL;
258265

259266
lockdep_assert_held(&gpc->refresh_lock);
@@ -270,6 +277,8 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l
270277
old_uhva = PAGE_ALIGN_DOWN(gpc->uhva);
271278

272279
if (kvm_is_error_gpa(gpa)) {
280+
page_offset = offset_in_page(uhva);
281+
273282
gpc->gpa = INVALID_GPA;
274283
gpc->memslot = NULL;
275284
gpc->uhva = PAGE_ALIGN_DOWN(uhva);
@@ -279,6 +288,8 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l
279288
} else {
280289
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
281290

291+
page_offset = offset_in_page(gpa);
292+
282293
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
283294
kvm_is_error_hva(gpc->uhva)) {
284295
gfn_t gfn = gpa_to_gfn(gpa);

0 commit comments

Comments
 (0)