Skip to content

Commit aba3cae

Browse files
mmhalbonzini
authored andcommitted
KVM: Shorten gfn_to_pfn_cache function names
Formalize "gpc" as the acronym and use it in function names. No functional change intended. Suggested-by: Sean Christopherson <[email protected]> Signed-off-by: Michal Luczaj <[email protected]> Signed-off-by: Sean Christopherson <[email protected]> Signed-off-by: David Woodhouse <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 8acc351 commit aba3cae

File tree

4 files changed

+39
-40
lines changed

4 files changed

+39
-40
lines changed

arch/x86/kvm/x86.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3037,12 +3037,12 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
30373037
unsigned long flags;
30383038

30393039
read_lock_irqsave(&gpc->lock, flags);
3040-
while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
3041-
offset + sizeof(*guest_hv_clock))) {
3040+
while (!kvm_gpc_check(v->kvm, gpc, gpc->gpa,
3041+
offset + sizeof(*guest_hv_clock))) {
30423042
read_unlock_irqrestore(&gpc->lock, flags);
30433043

3044-
if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa,
3045-
offset + sizeof(*guest_hv_clock)))
3044+
if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa,
3045+
offset + sizeof(*guest_hv_clock)))
30463046
return;
30473047

30483048
read_lock_irqsave(&gpc->lock, flags);

arch/x86/kvm/xen.c

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -273,14 +273,14 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
273273
* gfn_to_pfn caches that cover the region.
274274
*/
275275
read_lock_irqsave(&gpc1->lock, flags);
276-
while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc1, gpc1->gpa, user_len1)) {
276+
while (!kvm_gpc_check(v->kvm, gpc1, gpc1->gpa, user_len1)) {
277277
read_unlock_irqrestore(&gpc1->lock, flags);
278278

279279
/* When invoked from kvm_sched_out() we cannot sleep */
280280
if (atomic)
281281
return;
282282

283-
if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc1, gpc1->gpa, user_len1))
283+
if (kvm_gpc_refresh(v->kvm, gpc1, gpc1->gpa, user_len1))
284284
return;
285285

286286
read_lock_irqsave(&gpc1->lock, flags);
@@ -309,7 +309,7 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
309309
*/
310310
read_lock(&gpc2->lock);
311311

312-
if (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc2, gpc2->gpa, user_len2)) {
312+
if (!kvm_gpc_check(v->kvm, gpc2, gpc2->gpa, user_len2)) {
313313
read_unlock(&gpc2->lock);
314314
read_unlock_irqrestore(&gpc1->lock, flags);
315315

@@ -489,12 +489,12 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
489489
* little more honest about it.
490490
*/
491491
read_lock_irqsave(&gpc->lock, flags);
492-
while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
493-
sizeof(struct vcpu_info))) {
492+
while (!kvm_gpc_check(v->kvm, gpc, gpc->gpa,
493+
sizeof(struct vcpu_info))) {
494494
read_unlock_irqrestore(&gpc->lock, flags);
495495

496-
if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa,
497-
sizeof(struct vcpu_info)))
496+
if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa,
497+
sizeof(struct vcpu_info)))
498498
return;
499499

500500
read_lock_irqsave(&gpc->lock, flags);
@@ -554,8 +554,8 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
554554
sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
555555

556556
read_lock_irqsave(&gpc->lock, flags);
557-
while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
558-
sizeof(struct vcpu_info))) {
557+
while (!kvm_gpc_check(v->kvm, gpc, gpc->gpa,
558+
sizeof(struct vcpu_info))) {
559559
read_unlock_irqrestore(&gpc->lock, flags);
560560

561561
/*
@@ -569,8 +569,8 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
569569
if (in_atomic() || !task_is_running(current))
570570
return 1;
571571

572-
if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa,
573-
sizeof(struct vcpu_info))) {
572+
if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa,
573+
sizeof(struct vcpu_info))) {
574574
/*
575575
* If this failed, userspace has screwed up the
576576
* vcpu_info mapping. No interrupts for you.
@@ -1167,7 +1167,7 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,
11671167

11681168
read_lock_irqsave(&gpc->lock, flags);
11691169
idx = srcu_read_lock(&kvm->srcu);
1170-
if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
1170+
if (!kvm_gpc_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
11711171
goto out_rcu;
11721172

11731173
ret = false;
@@ -1564,7 +1564,7 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
15641564
idx = srcu_read_lock(&kvm->srcu);
15651565

15661566
read_lock_irqsave(&gpc->lock, flags);
1567-
if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
1567+
if (!kvm_gpc_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
15681568
goto out_rcu;
15691569

15701570
if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
@@ -1598,7 +1598,7 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
15981598
gpc = &vcpu->arch.xen.vcpu_info_cache;
15991599

16001600
read_lock_irqsave(&gpc->lock, flags);
1601-
if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, sizeof(struct vcpu_info))) {
1601+
if (!kvm_gpc_check(kvm, gpc, gpc->gpa, sizeof(struct vcpu_info))) {
16021602
/*
16031603
* Could not access the vcpu_info. Set the bit in-kernel
16041604
* and prod the vCPU to deliver it for itself.
@@ -1696,7 +1696,7 @@ static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
16961696
break;
16971697

16981698
idx = srcu_read_lock(&kvm->srcu);
1699-
rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE);
1699+
rc = kvm_gpc_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE);
17001700
srcu_read_unlock(&kvm->srcu, idx);
17011701
} while(!rc);
17021702

include/linux/kvm_host.h

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1288,16 +1288,15 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc);
12881288
* -EFAULT for an untranslatable guest physical address.
12891289
*
12901290
* This primes a gfn_to_pfn_cache and links it into the @kvm's list for
1291-
* invalidations to be processed. Callers are required to use
1292-
* kvm_gfn_to_pfn_cache_check() to ensure that the cache is valid before
1293-
* accessing the target page.
1291+
* invalidations to be processed. Callers are required to use kvm_gpc_check()
1292+
* to ensure that the cache is valid before accessing the target page.
12941293
*/
12951294
int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
12961295
struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
12971296
gpa_t gpa, unsigned long len);
12981297

12991298
/**
1300-
* kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache.
1299+
* kvm_gpc_check - check validity of a gfn_to_pfn_cache.
13011300
*
13021301
* @kvm: pointer to kvm instance.
13031302
* @gpc: struct gfn_to_pfn_cache object.
@@ -1314,11 +1313,11 @@ int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
13141313
* Callers in IN_GUEST_MODE may do so without locking, although they should
13151314
* still hold a read lock on kvm->scru for the memslot checks.
13161315
*/
1317-
bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
1318-
gpa_t gpa, unsigned long len);
1316+
bool kvm_gpc_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
1317+
unsigned long len);
13191318

13201319
/**
1321-
* kvm_gfn_to_pfn_cache_refresh - update a previously initialized cache.
1320+
* kvm_gpc_refresh - update a previously initialized cache.
13221321
*
13231322
* @kvm: pointer to kvm instance.
13241323
* @gpc: struct gfn_to_pfn_cache object.
@@ -1335,11 +1334,11 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
13351334
* still lock and check the cache status, as this function does not return
13361335
* with the lock still held to permit access.
13371336
*/
1338-
int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
1339-
gpa_t gpa, unsigned long len);
1337+
int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
1338+
unsigned long len);
13401339

13411340
/**
1342-
* kvm_gfn_to_pfn_cache_unmap - temporarily unmap a gfn_to_pfn_cache.
1341+
* kvm_gpc_unmap - temporarily unmap a gfn_to_pfn_cache.
13431342
*
13441343
* @kvm: pointer to kvm instance.
13451344
* @gpc: struct gfn_to_pfn_cache object.
@@ -1348,7 +1347,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
13481347
* but at least the mapping from GPA to userspace HVA will remain cached
13491348
* and can be reused on a subsequent refresh.
13501349
*/
1351-
void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
1350+
void kvm_gpc_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
13521351

13531352
/**
13541353
* kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.

virt/kvm/pfncache.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -76,8 +76,8 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
7676
}
7777
}
7878

79-
bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
80-
gpa_t gpa, unsigned long len)
79+
bool kvm_gpc_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
80+
unsigned long len)
8181
{
8282
struct kvm_memslots *slots = kvm_memslots(kvm);
8383

@@ -96,7 +96,7 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
9696

9797
return true;
9898
}
99-
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check);
99+
EXPORT_SYMBOL_GPL(kvm_gpc_check);
100100

101101
static void gpc_unmap_khva(struct kvm *kvm, kvm_pfn_t pfn, void *khva)
102102
{
@@ -238,8 +238,8 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
238238
return -EFAULT;
239239
}
240240

241-
int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
242-
gpa_t gpa, unsigned long len)
241+
int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
242+
unsigned long len)
243243
{
244244
struct kvm_memslots *slots = kvm_memslots(kvm);
245245
unsigned long page_offset = gpa & ~PAGE_MASK;
@@ -333,9 +333,9 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
333333

334334
return ret;
335335
}
336-
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_refresh);
336+
EXPORT_SYMBOL_GPL(kvm_gpc_refresh);
337337

338-
void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
338+
void kvm_gpc_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
339339
{
340340
void *old_khva;
341341
kvm_pfn_t old_pfn;
@@ -360,7 +360,7 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
360360

361361
gpc_unmap_khva(kvm, old_pfn, old_khva);
362362
}
363-
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);
363+
EXPORT_SYMBOL_GPL(kvm_gpc_unmap);
364364

365365
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc)
366366
{
@@ -396,7 +396,7 @@ int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
396396
gpc->active = true;
397397
write_unlock_irq(&gpc->lock);
398398
}
399-
return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len);
399+
return kvm_gpc_refresh(kvm, gpc, gpa, len);
400400
}
401401
EXPORT_SYMBOL_GPL(kvm_gpc_activate);
402402

@@ -416,7 +416,7 @@ void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
416416
list_del(&gpc->list);
417417
spin_unlock(&kvm->gpc_lock);
418418

419-
kvm_gfn_to_pfn_cache_unmap(kvm, gpc);
419+
kvm_gpc_unmap(kvm, gpc);
420420
}
421421
}
422422
EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);

0 commit comments

Comments
 (0)