Skip to content

Commit 4b99412

Browse files
kvaneeshmpe
authored andcommitted
powerpc/kvm/book3s: Add helper to walk partition scoped linux page table.
The locking rules for walking partition scoped table is different from process scoped table. Hence add a helper for secondary linux page table walk and also add check whether we are holding the right locks. Signed-off-by: Aneesh Kumar K.V <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 87013f9 commit 4b99412

File tree

3 files changed

+20
-7
lines changed

3 files changed

+20
-7
lines changed

arch/powerpc/include/asm/kvm_book3s_64.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#include <asm/book3s/64/mmu-hash.h>
1515
#include <asm/cpu_has_feature.h>
1616
#include <asm/ppc-opcode.h>
17+
#include <asm/pte-walk.h>
1718

1819
#ifdef CONFIG_PPC_PSERIES
1920
static inline bool kvmhv_on_pseries(void)
@@ -634,6 +635,18 @@ extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
634635
unsigned long gpa, unsigned long hpa,
635636
unsigned long nbytes);
636637

638+
static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea,
639+
unsigned *hshift)
640+
{
641+
pte_t *pte;
642+
643+
VM_WARN(!spin_is_locked(&kvm->mmu_lock),
644+
"%s called with kvm mmu_lock not held \n", __func__);
645+
pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
646+
647+
return pte;
648+
}
649+
637650
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
638651

639652
#endif /* __ASM_KVM_BOOK3S_64_H__ */

arch/powerpc/kvm/book3s_64_mmu_radix.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -981,11 +981,11 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
981981
return 0;
982982
}
983983

984-
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
984+
ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
985985
if (ptep && pte_present(*ptep))
986986
kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
987987
kvm->arch.lpid);
988-
return 0;
988+
return 0;
989989
}
990990

991991
/* Called with kvm->mmu_lock held */
@@ -1001,7 +1001,7 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
10011001
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
10021002
return ref;
10031003

1004-
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1004+
ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
10051005
if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
10061006
old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
10071007
gpa, shift);
@@ -1028,7 +1028,7 @@ int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
10281028
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
10291029
return ref;
10301030

1031-
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1031+
ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
10321032
if (ptep && pte_present(*ptep) && pte_young(*ptep))
10331033
ref = 1;
10341034
return ref;
@@ -1048,7 +1048,7 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
10481048
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
10491049
return ret;
10501050

1051-
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1051+
ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
10521052
if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
10531053
ret = 1;
10541054
if (shift)
@@ -1109,7 +1109,7 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
11091109
gpa = memslot->base_gfn << PAGE_SHIFT;
11101110
spin_lock(&kvm->mmu_lock);
11111111
for (n = memslot->npages; n; --n) {
1112-
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1112+
ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
11131113
if (ptep && pte_present(*ptep))
11141114
kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
11151115
kvm->arch.lpid);

arch/powerpc/kvm/book3s_hv_nested.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1362,7 +1362,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run,
13621362
/* See if can find translation in our partition scoped tables for L1 */
13631363
pte = __pte(0);
13641364
spin_lock(&kvm->mmu_lock);
1365-
pte_p = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1365+
pte_p = find_kvm_secondary_pte(kvm, gpa, &shift);
13661366
if (!shift)
13671367
shift = PAGE_SHIFT;
13681368
if (pte_p)

0 commit comments

Comments
 (0)