Skip to content

Commit 20ec3eb

Browse files
chao-pbonzini
authored andcommitted
KVM: Rename mmu_notifier_* to mmu_invalidate_*
The motivation of this renaming is to make these variables and related helper functions less mmu_notifier bound and can also be used for non mmu_notifier based page invalidation. mmu_invalidate_* was chosen to better describe the purpose of 'invalidating' a page that those variables are used for. - mmu_notifier_seq/range_start/range_end are renamed to mmu_invalidate_seq/range_start/range_end. - mmu_notifier_retry{_hva} helper functions are renamed to mmu_invalidate_retry{_hva}. - mmu_notifier_count is renamed to mmu_invalidate_in_progress to avoid confusion with mn_active_invalidate_count. - While here, also update kvm_inc/dec_notifier_count() to kvm_mmu_invalidate_begin/end() to match the change for mmu_notifier_count. No functional change intended. Signed-off-by: Chao Peng <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent bdd1c37 commit 20ec3eb

File tree

15 files changed

+103
-98
lines changed

15 files changed

+103
-98
lines changed

arch/arm64/kvm/mmu.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -993,7 +993,7 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
993993
* THP doesn't start to split while we are adjusting the
994994
* refcounts.
995995
*
996-
* We are sure this doesn't happen, because mmu_notifier_retry
996+
* We are sure this doesn't happen, because mmu_invalidate_retry
997997
* was successful and we are holding the mmu_lock, so if this
998998
* THP is trying to split, it will be blocked in the mmu
999999
* notifier before touching any of the pages, specifically
@@ -1188,9 +1188,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
11881188
return ret;
11891189
}
11901190

1191-
mmu_seq = vcpu->kvm->mmu_notifier_seq;
1191+
mmu_seq = vcpu->kvm->mmu_invalidate_seq;
11921192
/*
1193-
* Ensure the read of mmu_notifier_seq happens before we call
1193+
* Ensure the read of mmu_invalidate_seq happens before we call
11941194
* gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
11951195
* the page we just got a reference to gets unmapped before we have a
11961196
* chance to grab the mmu_lock, which ensure that if the page gets
@@ -1246,7 +1246,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
12461246
else
12471247
write_lock(&kvm->mmu_lock);
12481248
pgt = vcpu->arch.hw_mmu->pgt;
1249-
if (mmu_notifier_retry(kvm, mmu_seq))
1249+
if (mmu_invalidate_retry(kvm, mmu_seq))
12501250
goto out_unlock;
12511251

12521252
/*

arch/mips/kvm/mmu.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -615,17 +615,17 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
615615
* Used to check for invalidations in progress, of the pfn that is
616616
* returned by pfn_to_pfn_prot below.
617617
*/
618-
mmu_seq = kvm->mmu_notifier_seq;
618+
mmu_seq = kvm->mmu_invalidate_seq;
619619
/*
620-
* Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in
621-
* gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
620+
* Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads
621+
* in gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
622622
* risk the page we get a reference to getting unmapped before we have a
623-
* chance to grab the mmu_lock without mmu_notifier_retry() noticing.
623+
* chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
624624
*
625625
* This smp_rmb() pairs with the effective smp_wmb() of the combination
626626
* of the pte_unmap_unlock() after the PTE is zapped, and the
627627
* spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before
628-
* mmu_notifier_seq is incremented.
628+
* mmu_invalidate_seq is incremented.
629629
*/
630630
smp_rmb();
631631

@@ -638,7 +638,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
638638

639639
spin_lock(&kvm->mmu_lock);
640640
/* Check if an invalidation has taken place since we got pfn */
641-
if (mmu_notifier_retry(kvm, mmu_seq)) {
641+
if (mmu_invalidate_retry(kvm, mmu_seq)) {
642642
/*
643643
* This can happen when mappings are changed asynchronously, but
644644
* also synchronously if a COW is triggered by

arch/powerpc/include/asm/kvm_book3s_64.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -666,7 +666,7 @@ static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
666666
VM_WARN(!spin_is_locked(&kvm->mmu_lock),
667667
"%s called with kvm mmu_lock not held \n", __func__);
668668

669-
if (mmu_notifier_retry(kvm, mmu_seq))
669+
if (mmu_invalidate_retry(kvm, mmu_seq))
670670
return NULL;
671671

672672
pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift);

arch/powerpc/kvm/book3s_64_mmu_host.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
9090
unsigned long pfn;
9191

9292
/* used to check for invalidations in progress */
93-
mmu_seq = kvm->mmu_notifier_seq;
93+
mmu_seq = kvm->mmu_invalidate_seq;
9494
smp_rmb();
9595

9696
/* Get host physical address for gpa */
@@ -151,7 +151,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
151151
cpte = kvmppc_mmu_hpte_cache_next(vcpu);
152152

153153
spin_lock(&kvm->mmu_lock);
154-
if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
154+
if (!cpte || mmu_invalidate_retry(kvm, mmu_seq)) {
155155
r = -EAGAIN;
156156
goto out_unlock;
157157
}

arch/powerpc/kvm/book3s_64_mmu_hv.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -578,7 +578,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
578578
return -EFAULT;
579579

580580
/* used to check for invalidations in progress */
581-
mmu_seq = kvm->mmu_notifier_seq;
581+
mmu_seq = kvm->mmu_invalidate_seq;
582582
smp_rmb();
583583

584584
ret = -EFAULT;
@@ -693,7 +693,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
693693

694694
/* Check if we might have been invalidated; let the guest retry if so */
695695
ret = RESUME_GUEST;
696-
if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
696+
if (mmu_invalidate_retry(vcpu->kvm, mmu_seq)) {
697697
unlock_rmap(rmap);
698698
goto out_unlock;
699699
}

arch/powerpc/kvm/book3s_64_mmu_radix.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -640,7 +640,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
640640
/* Check if we might have been invalidated; let the guest retry if so */
641641
spin_lock(&kvm->mmu_lock);
642642
ret = -EAGAIN;
643-
if (mmu_notifier_retry(kvm, mmu_seq))
643+
if (mmu_invalidate_retry(kvm, mmu_seq))
644644
goto out_unlock;
645645

646646
/* Now traverse again under the lock and change the tree */
@@ -830,7 +830,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
830830
bool large_enable;
831831

832832
/* used to check for invalidations in progress */
833-
mmu_seq = kvm->mmu_notifier_seq;
833+
mmu_seq = kvm->mmu_invalidate_seq;
834834
smp_rmb();
835835

836836
/*
@@ -1191,7 +1191,7 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
11911191
* Increase the mmu notifier sequence number to prevent any page
11921192
* fault that read the memslot earlier from writing a PTE.
11931193
*/
1194-
kvm->mmu_notifier_seq++;
1194+
kvm->mmu_invalidate_seq++;
11951195
spin_unlock(&kvm->mmu_lock);
11961196
}
11971197

arch/powerpc/kvm/book3s_hv_nested.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1580,7 +1580,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
15801580
/* 2. Find the host pte for this L1 guest real address */
15811581

15821582
/* Used to check for invalidations in progress */
1583-
mmu_seq = kvm->mmu_notifier_seq;
1583+
mmu_seq = kvm->mmu_invalidate_seq;
15841584
smp_rmb();
15851585

15861586
/* See if can find translation in our partition scoped tables for L1 */

arch/powerpc/kvm/book3s_hv_rm_mmu.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
219219
g_ptel = ptel;
220220

221221
/* used later to detect if we might have been invalidated */
222-
mmu_seq = kvm->mmu_notifier_seq;
222+
mmu_seq = kvm->mmu_invalidate_seq;
223223
smp_rmb();
224224

225225
/* Find the memslot (if any) for this address */
@@ -366,7 +366,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
366366
rmap = real_vmalloc_addr(rmap);
367367
lock_rmap(rmap);
368368
/* Check for pending invalidations under the rmap chain lock */
369-
if (mmu_notifier_retry(kvm, mmu_seq)) {
369+
if (mmu_invalidate_retry(kvm, mmu_seq)) {
370370
/* inval in progress, write a non-present HPTE */
371371
pteh |= HPTE_V_ABSENT;
372372
pteh &= ~HPTE_V_VALID;
@@ -932,7 +932,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
932932
int i;
933933

934934
/* Used later to detect if we might have been invalidated */
935-
mmu_seq = kvm->mmu_notifier_seq;
935+
mmu_seq = kvm->mmu_invalidate_seq;
936936
smp_rmb();
937937

938938
arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
@@ -960,7 +960,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
960960
long ret = H_SUCCESS;
961961

962962
/* Used later to detect if we might have been invalidated */
963-
mmu_seq = kvm->mmu_notifier_seq;
963+
mmu_seq = kvm->mmu_invalidate_seq;
964964
smp_rmb();
965965

966966
arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);

arch/powerpc/kvm/e500_mmu_host.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -339,7 +339,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
339339
unsigned long flags;
340340

341341
/* used to check for invalidations in progress */
342-
mmu_seq = kvm->mmu_notifier_seq;
342+
mmu_seq = kvm->mmu_invalidate_seq;
343343
smp_rmb();
344344

345345
/*
@@ -460,7 +460,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
460460
}
461461

462462
spin_lock(&kvm->mmu_lock);
463-
if (mmu_notifier_retry(kvm, mmu_seq)) {
463+
if (mmu_invalidate_retry(kvm, mmu_seq)) {
464464
ret = -EAGAIN;
465465
goto out;
466466
}

arch/riscv/kvm/mmu.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -666,7 +666,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
666666
return ret;
667667
}
668668

669-
mmu_seq = kvm->mmu_notifier_seq;
669+
mmu_seq = kvm->mmu_invalidate_seq;
670670

671671
hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable);
672672
if (hfn == KVM_PFN_ERR_HWPOISON) {
@@ -686,7 +686,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
686686

687687
spin_lock(&kvm->mmu_lock);
688688

689-
if (mmu_notifier_retry(kvm, mmu_seq))
689+
if (mmu_invalidate_retry(kvm, mmu_seq))
690690
goto out_unlock;
691691

692692
if (writable) {

0 commit comments

Comments
 (0)