Skip to content

Commit 58f5ee5

Browse files
sean-jcdwmw2
authored andcommitted
KVM: Drop @gpa from exported gfn=>pfn cache check() and refresh() helpers
Drop the @gpa param from the exported check()+refresh() helpers and limit changing the cache's GPA to the activate path. All external users just feed in gpc->gpa, i.e. this is a fancy nop. Allowing users to change the GPA at check()+refresh() is dangerous as those helpers explicitly allow concurrent calls, e.g. KVM could get into a livelock scenario. It's also unclear as to what the expected behavior should be if multiple tasks attempt to refresh with different GPAs. Signed-off-by: Sean Christopherson <[email protected]> Signed-off-by: David Woodhouse <[email protected]>
1 parent 5762cb1 commit 58f5ee5

File tree

4 files changed

+27
-26
lines changed

4 files changed

+27
-26
lines changed

arch/x86/kvm/x86.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3035,12 +3035,10 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
30353035
unsigned long flags;
30363036

30373037
read_lock_irqsave(&gpc->lock, flags);
3038-
while (!kvm_gpc_check(gpc, gpc->gpa,
3039-
offset + sizeof(*guest_hv_clock))) {
3038+
while (!kvm_gpc_check(gpc, offset + sizeof(*guest_hv_clock))) {
30403039
read_unlock_irqrestore(&gpc->lock, flags);
30413040

3042-
if (kvm_gpc_refresh(gpc, gpc->gpa,
3043-
offset + sizeof(*guest_hv_clock)))
3041+
if (kvm_gpc_refresh(gpc, offset + sizeof(*guest_hv_clock)))
30443042
return;
30453043

30463044
read_lock_irqsave(&gpc->lock, flags);

arch/x86/kvm/xen.c

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -272,14 +272,14 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
272272
* gfn_to_pfn caches that cover the region.
273273
*/
274274
read_lock_irqsave(&gpc1->lock, flags);
275-
while (!kvm_gpc_check(gpc1, gpc1->gpa, user_len1)) {
275+
while (!kvm_gpc_check(gpc1, user_len1)) {
276276
read_unlock_irqrestore(&gpc1->lock, flags);
277277

278278
/* When invoked from kvm_sched_out() we cannot sleep */
279279
if (atomic)
280280
return;
281281

282-
if (kvm_gpc_refresh(gpc1, gpc1->gpa, user_len1))
282+
if (kvm_gpc_refresh(gpc1, user_len1))
283283
return;
284284

285285
read_lock_irqsave(&gpc1->lock, flags);
@@ -308,7 +308,7 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
308308
*/
309309
read_lock(&gpc2->lock);
310310

311-
if (!kvm_gpc_check(gpc2, gpc2->gpa, user_len2)) {
311+
if (!kvm_gpc_check(gpc2, user_len2)) {
312312
read_unlock(&gpc2->lock);
313313
read_unlock_irqrestore(&gpc1->lock, flags);
314314

@@ -488,10 +488,10 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
488488
* little more honest about it.
489489
*/
490490
read_lock_irqsave(&gpc->lock, flags);
491-
while (!kvm_gpc_check(gpc, gpc->gpa, sizeof(struct vcpu_info))) {
491+
while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
492492
read_unlock_irqrestore(&gpc->lock, flags);
493493

494-
if (kvm_gpc_refresh(gpc, gpc->gpa, sizeof(struct vcpu_info)))
494+
if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info)))
495495
return;
496496

497497
read_lock_irqsave(&gpc->lock, flags);
@@ -551,7 +551,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
551551
sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
552552

553553
read_lock_irqsave(&gpc->lock, flags);
554-
while (!kvm_gpc_check(gpc, gpc->gpa, sizeof(struct vcpu_info))) {
554+
while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
555555
read_unlock_irqrestore(&gpc->lock, flags);
556556

557557
/*
@@ -565,7 +565,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
565565
if (in_atomic() || !task_is_running(current))
566566
return 1;
567567

568-
if (kvm_gpc_refresh(gpc, gpc->gpa, sizeof(struct vcpu_info))) {
568+
if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info))) {
569569
/*
570570
* If this failed, userspace has screwed up the
571571
* vcpu_info mapping. No interrupts for you.
@@ -1154,7 +1154,7 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,
11541154

11551155
read_lock_irqsave(&gpc->lock, flags);
11561156
idx = srcu_read_lock(&kvm->srcu);
1157-
if (!kvm_gpc_check(gpc, gpc->gpa, PAGE_SIZE))
1157+
if (!kvm_gpc_check(gpc, PAGE_SIZE))
11581158
goto out_rcu;
11591159

11601160
ret = false;
@@ -1576,7 +1576,7 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
15761576
idx = srcu_read_lock(&kvm->srcu);
15771577

15781578
read_lock_irqsave(&gpc->lock, flags);
1579-
if (!kvm_gpc_check(gpc, gpc->gpa, PAGE_SIZE))
1579+
if (!kvm_gpc_check(gpc, PAGE_SIZE))
15801580
goto out_rcu;
15811581

15821582
if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
@@ -1610,7 +1610,7 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
16101610
gpc = &vcpu->arch.xen.vcpu_info_cache;
16111611

16121612
read_lock_irqsave(&gpc->lock, flags);
1613-
if (!kvm_gpc_check(gpc, gpc->gpa, sizeof(struct vcpu_info))) {
1613+
if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
16141614
/*
16151615
* Could not access the vcpu_info. Set the bit in-kernel
16161616
* and prod the vCPU to deliver it for itself.
@@ -1708,7 +1708,7 @@ static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
17081708
break;
17091709

17101710
idx = srcu_read_lock(&kvm->srcu);
1711-
rc = kvm_gpc_refresh(gpc, gpc->gpa, PAGE_SIZE);
1711+
rc = kvm_gpc_refresh(gpc, PAGE_SIZE);
17121712
srcu_read_unlock(&kvm->srcu, idx);
17131713
} while(!rc);
17141714

include/linux/kvm_host.h

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1299,7 +1299,6 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
12991299
* kvm_gpc_check - check validity of a gfn_to_pfn_cache.
13001300
*
13011301
* @gpc: struct gfn_to_pfn_cache object.
1302-
* @gpa: current guest physical address to map.
13031302
* @len: sanity check; the range being access must fit a single page.
13041303
*
13051304
* @return: %true if the cache is still valid and the address matches.
@@ -1312,15 +1311,14 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
13121311
* Callers in IN_GUEST_MODE may do so without locking, although they should
13131312
* still hold a read lock on kvm->scru for the memslot checks.
13141313
*/
1315-
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
1314+
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len);
13161315

13171316
/**
13181317
* kvm_gpc_refresh - update a previously initialized cache.
13191318
*
13201319
* @gpc: struct gfn_to_pfn_cache object.
1321-
* @gpa: updated guest physical address to map.
13221320
* @len: sanity check; the range being access must fit a single page.
1323-
1321+
*
13241322
* @return: 0 for success.
13251323
* -EINVAL for a mapping which would cross a page boundary.
13261324
* -EFAULT for an untranslatable guest physical address.
@@ -1331,7 +1329,7 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
13311329
* still lock and check the cache status, as this function does not return
13321330
* with the lock still held to permit access.
13331331
*/
1334-
int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
1332+
int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len);
13351333

13361334
/**
13371335
* kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.

virt/kvm/pfncache.c

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -76,18 +76,17 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
7676
}
7777
}
7878

79-
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
79+
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
8080
{
8181
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
8282

8383
if (!gpc->active)
8484
return false;
8585

86-
if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE)
86+
if ((gpc->gpa & ~PAGE_MASK) + len > PAGE_SIZE)
8787
return false;
8888

89-
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
90-
kvm_is_error_hva(gpc->uhva))
89+
if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
9190
return false;
9291

9392
if (!gpc->valid)
@@ -237,7 +236,8 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
237236
return -EFAULT;
238237
}
239238

240-
int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
239+
static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
240+
unsigned long len)
241241
{
242242
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
243243
unsigned long page_offset = gpa & ~PAGE_MASK;
@@ -331,6 +331,11 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
331331

332332
return ret;
333333
}
334+
335+
int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
336+
{
337+
return __kvm_gpc_refresh(gpc, gpc->gpa, len);
338+
}
334339
EXPORT_SYMBOL_GPL(kvm_gpc_refresh);
335340

336341
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
@@ -371,7 +376,7 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
371376
gpc->active = true;
372377
write_unlock_irq(&gpc->lock);
373378
}
374-
return kvm_gpc_refresh(gpc, gpa, len);
379+
return __kvm_gpc_refresh(gpc, gpa, len);
375380
}
376381
EXPORT_SYMBOL_GPL(kvm_gpc_activate);
377382

0 commit comments

Comments
 (0)